Merge branch 'perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic...
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_event_intel.c
CommitLineData
f22f54f4
PZ
1#ifdef CONFIG_CPU_SUP_INTEL
2
3/*
b622d644 4 * Intel PerfMon, used on Core and later.
f22f54f4
PZ
5 */
6static const u64 intel_perfmon_event_map[] =
7{
8 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
9 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
10 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
11 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
12 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
13 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
14 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
15};
16
17static struct event_constraint intel_core_event_constraints[] =
18{
19 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
20 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
21 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
22 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
23 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
24 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
25 EVENT_CONSTRAINT_END
26};
27
28static struct event_constraint intel_core2_event_constraints[] =
29{
b622d644
PZ
30 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
31 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
32 /*
33 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
34 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
35 * ratio between these counters.
36 */
37 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
38 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
44 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
45 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
b622d644 46 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
f22f54f4
PZ
47 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
48 EVENT_CONSTRAINT_END
49};
50
51static struct event_constraint intel_nehalem_event_constraints[] =
52{
b622d644
PZ
53 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
54 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
56 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
57 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
58 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
59 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
60 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
61 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
62 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
63 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
64 EVENT_CONSTRAINT_END
65};
66
67static struct event_constraint intel_westmere_event_constraints[] =
68{
b622d644
PZ
69 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
70 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
71 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
72 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
73 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
74 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
75 EVENT_CONSTRAINT_END
76};
77
78static struct event_constraint intel_gen_event_constraints[] =
79{
b622d644
PZ
80 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
81 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
82 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
83 EVENT_CONSTRAINT_END
84};
85
86static u64 intel_pmu_event_map(int hw_event)
87{
88 return intel_perfmon_event_map[hw_event];
89}
90
caaa8be3 91static __initconst const u64 westmere_hw_cache_event_ids
f22f54f4
PZ
92 [PERF_COUNT_HW_CACHE_MAX]
93 [PERF_COUNT_HW_CACHE_OP_MAX]
94 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
95{
96 [ C(L1D) ] = {
97 [ C(OP_READ) ] = {
98 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
99 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
100 },
101 [ C(OP_WRITE) ] = {
102 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
103 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
104 },
105 [ C(OP_PREFETCH) ] = {
106 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
107 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
108 },
109 },
110 [ C(L1I ) ] = {
111 [ C(OP_READ) ] = {
112 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
113 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
114 },
115 [ C(OP_WRITE) ] = {
116 [ C(RESULT_ACCESS) ] = -1,
117 [ C(RESULT_MISS) ] = -1,
118 },
119 [ C(OP_PREFETCH) ] = {
120 [ C(RESULT_ACCESS) ] = 0x0,
121 [ C(RESULT_MISS) ] = 0x0,
122 },
123 },
124 [ C(LL ) ] = {
125 [ C(OP_READ) ] = {
126 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
127 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
128 },
129 [ C(OP_WRITE) ] = {
130 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
131 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
132 },
133 [ C(OP_PREFETCH) ] = {
134 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
135 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
136 },
137 },
138 [ C(DTLB) ] = {
139 [ C(OP_READ) ] = {
140 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
141 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
142 },
143 [ C(OP_WRITE) ] = {
144 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
145 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
146 },
147 [ C(OP_PREFETCH) ] = {
148 [ C(RESULT_ACCESS) ] = 0x0,
149 [ C(RESULT_MISS) ] = 0x0,
150 },
151 },
152 [ C(ITLB) ] = {
153 [ C(OP_READ) ] = {
154 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
155 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
156 },
157 [ C(OP_WRITE) ] = {
158 [ C(RESULT_ACCESS) ] = -1,
159 [ C(RESULT_MISS) ] = -1,
160 },
161 [ C(OP_PREFETCH) ] = {
162 [ C(RESULT_ACCESS) ] = -1,
163 [ C(RESULT_MISS) ] = -1,
164 },
165 },
166 [ C(BPU ) ] = {
167 [ C(OP_READ) ] = {
168 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
169 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
170 },
171 [ C(OP_WRITE) ] = {
172 [ C(RESULT_ACCESS) ] = -1,
173 [ C(RESULT_MISS) ] = -1,
174 },
175 [ C(OP_PREFETCH) ] = {
176 [ C(RESULT_ACCESS) ] = -1,
177 [ C(RESULT_MISS) ] = -1,
178 },
179 },
180};
181
caaa8be3 182static __initconst const u64 nehalem_hw_cache_event_ids
f22f54f4
PZ
183 [PERF_COUNT_HW_CACHE_MAX]
184 [PERF_COUNT_HW_CACHE_OP_MAX]
185 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
186{
187 [ C(L1D) ] = {
188 [ C(OP_READ) ] = {
189 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
190 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
191 },
192 [ C(OP_WRITE) ] = {
193 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
194 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
195 },
196 [ C(OP_PREFETCH) ] = {
197 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
198 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
199 },
200 },
201 [ C(L1I ) ] = {
202 [ C(OP_READ) ] = {
203 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
204 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
205 },
206 [ C(OP_WRITE) ] = {
207 [ C(RESULT_ACCESS) ] = -1,
208 [ C(RESULT_MISS) ] = -1,
209 },
210 [ C(OP_PREFETCH) ] = {
211 [ C(RESULT_ACCESS) ] = 0x0,
212 [ C(RESULT_MISS) ] = 0x0,
213 },
214 },
215 [ C(LL ) ] = {
216 [ C(OP_READ) ] = {
217 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
218 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
219 },
220 [ C(OP_WRITE) ] = {
221 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
222 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
223 },
224 [ C(OP_PREFETCH) ] = {
225 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
226 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
227 },
228 },
229 [ C(DTLB) ] = {
230 [ C(OP_READ) ] = {
231 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
232 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
233 },
234 [ C(OP_WRITE) ] = {
235 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
236 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
237 },
238 [ C(OP_PREFETCH) ] = {
239 [ C(RESULT_ACCESS) ] = 0x0,
240 [ C(RESULT_MISS) ] = 0x0,
241 },
242 },
243 [ C(ITLB) ] = {
244 [ C(OP_READ) ] = {
245 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
246 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
247 },
248 [ C(OP_WRITE) ] = {
249 [ C(RESULT_ACCESS) ] = -1,
250 [ C(RESULT_MISS) ] = -1,
251 },
252 [ C(OP_PREFETCH) ] = {
253 [ C(RESULT_ACCESS) ] = -1,
254 [ C(RESULT_MISS) ] = -1,
255 },
256 },
257 [ C(BPU ) ] = {
258 [ C(OP_READ) ] = {
259 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
260 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
261 },
262 [ C(OP_WRITE) ] = {
263 [ C(RESULT_ACCESS) ] = -1,
264 [ C(RESULT_MISS) ] = -1,
265 },
266 [ C(OP_PREFETCH) ] = {
267 [ C(RESULT_ACCESS) ] = -1,
268 [ C(RESULT_MISS) ] = -1,
269 },
270 },
271};
272
caaa8be3 273static __initconst const u64 core2_hw_cache_event_ids
f22f54f4
PZ
274 [PERF_COUNT_HW_CACHE_MAX]
275 [PERF_COUNT_HW_CACHE_OP_MAX]
276 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
277{
278 [ C(L1D) ] = {
279 [ C(OP_READ) ] = {
280 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
281 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
282 },
283 [ C(OP_WRITE) ] = {
284 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
285 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
286 },
287 [ C(OP_PREFETCH) ] = {
288 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
289 [ C(RESULT_MISS) ] = 0,
290 },
291 },
292 [ C(L1I ) ] = {
293 [ C(OP_READ) ] = {
294 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
295 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
296 },
297 [ C(OP_WRITE) ] = {
298 [ C(RESULT_ACCESS) ] = -1,
299 [ C(RESULT_MISS) ] = -1,
300 },
301 [ C(OP_PREFETCH) ] = {
302 [ C(RESULT_ACCESS) ] = 0,
303 [ C(RESULT_MISS) ] = 0,
304 },
305 },
306 [ C(LL ) ] = {
307 [ C(OP_READ) ] = {
308 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
309 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
310 },
311 [ C(OP_WRITE) ] = {
312 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
313 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
314 },
315 [ C(OP_PREFETCH) ] = {
316 [ C(RESULT_ACCESS) ] = 0,
317 [ C(RESULT_MISS) ] = 0,
318 },
319 },
320 [ C(DTLB) ] = {
321 [ C(OP_READ) ] = {
322 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
323 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
324 },
325 [ C(OP_WRITE) ] = {
326 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
327 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
328 },
329 [ C(OP_PREFETCH) ] = {
330 [ C(RESULT_ACCESS) ] = 0,
331 [ C(RESULT_MISS) ] = 0,
332 },
333 },
334 [ C(ITLB) ] = {
335 [ C(OP_READ) ] = {
336 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
337 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
338 },
339 [ C(OP_WRITE) ] = {
340 [ C(RESULT_ACCESS) ] = -1,
341 [ C(RESULT_MISS) ] = -1,
342 },
343 [ C(OP_PREFETCH) ] = {
344 [ C(RESULT_ACCESS) ] = -1,
345 [ C(RESULT_MISS) ] = -1,
346 },
347 },
348 [ C(BPU ) ] = {
349 [ C(OP_READ) ] = {
350 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
351 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
352 },
353 [ C(OP_WRITE) ] = {
354 [ C(RESULT_ACCESS) ] = -1,
355 [ C(RESULT_MISS) ] = -1,
356 },
357 [ C(OP_PREFETCH) ] = {
358 [ C(RESULT_ACCESS) ] = -1,
359 [ C(RESULT_MISS) ] = -1,
360 },
361 },
362};
363
caaa8be3 364static __initconst const u64 atom_hw_cache_event_ids
f22f54f4
PZ
365 [PERF_COUNT_HW_CACHE_MAX]
366 [PERF_COUNT_HW_CACHE_OP_MAX]
367 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
368{
369 [ C(L1D) ] = {
370 [ C(OP_READ) ] = {
371 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
372 [ C(RESULT_MISS) ] = 0,
373 },
374 [ C(OP_WRITE) ] = {
375 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
376 [ C(RESULT_MISS) ] = 0,
377 },
378 [ C(OP_PREFETCH) ] = {
379 [ C(RESULT_ACCESS) ] = 0x0,
380 [ C(RESULT_MISS) ] = 0,
381 },
382 },
383 [ C(L1I ) ] = {
384 [ C(OP_READ) ] = {
385 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
386 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
387 },
388 [ C(OP_WRITE) ] = {
389 [ C(RESULT_ACCESS) ] = -1,
390 [ C(RESULT_MISS) ] = -1,
391 },
392 [ C(OP_PREFETCH) ] = {
393 [ C(RESULT_ACCESS) ] = 0,
394 [ C(RESULT_MISS) ] = 0,
395 },
396 },
397 [ C(LL ) ] = {
398 [ C(OP_READ) ] = {
399 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
400 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
401 },
402 [ C(OP_WRITE) ] = {
403 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
404 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
405 },
406 [ C(OP_PREFETCH) ] = {
407 [ C(RESULT_ACCESS) ] = 0,
408 [ C(RESULT_MISS) ] = 0,
409 },
410 },
411 [ C(DTLB) ] = {
412 [ C(OP_READ) ] = {
413 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
414 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
415 },
416 [ C(OP_WRITE) ] = {
417 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
418 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
419 },
420 [ C(OP_PREFETCH) ] = {
421 [ C(RESULT_ACCESS) ] = 0,
422 [ C(RESULT_MISS) ] = 0,
423 },
424 },
425 [ C(ITLB) ] = {
426 [ C(OP_READ) ] = {
427 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
428 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
429 },
430 [ C(OP_WRITE) ] = {
431 [ C(RESULT_ACCESS) ] = -1,
432 [ C(RESULT_MISS) ] = -1,
433 },
434 [ C(OP_PREFETCH) ] = {
435 [ C(RESULT_ACCESS) ] = -1,
436 [ C(RESULT_MISS) ] = -1,
437 },
438 },
439 [ C(BPU ) ] = {
440 [ C(OP_READ) ] = {
441 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
442 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
443 },
444 [ C(OP_WRITE) ] = {
445 [ C(RESULT_ACCESS) ] = -1,
446 [ C(RESULT_MISS) ] = -1,
447 },
448 [ C(OP_PREFETCH) ] = {
449 [ C(RESULT_ACCESS) ] = -1,
450 [ C(RESULT_MISS) ] = -1,
451 },
452 },
453};
454
f22f54f4
PZ
455static void intel_pmu_disable_all(void)
456{
457 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
458
459 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
460
461 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
462 intel_pmu_disable_bts();
ca037701
PZ
463
464 intel_pmu_pebs_disable_all();
caff2bef 465 intel_pmu_lbr_disable_all();
f22f54f4
PZ
466}
467
11164cd4 468static void intel_pmu_enable_all(int added)
f22f54f4
PZ
469{
470 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
471
d329527e
PZ
472 intel_pmu_pebs_enable_all();
473 intel_pmu_lbr_enable_all();
f22f54f4
PZ
474 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
475
476 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
477 struct perf_event *event =
478 cpuc->events[X86_PMC_IDX_FIXED_BTS];
479
480 if (WARN_ON_ONCE(!event))
481 return;
482
483 intel_pmu_enable_bts(event->hw.config);
484 }
485}
486
11164cd4
PZ
487/*
488 * Workaround for:
489 * Intel Errata AAK100 (model 26)
490 * Intel Errata AAP53 (model 30)
40b91cd1 491 * Intel Errata BD53 (model 44)
11164cd4
PZ
492 *
493 * These chips need to be 'reset' when adding counters by programming
494 * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5
495 * either in sequence on the same PMC or on different PMCs.
496 */
497static void intel_pmu_nhm_enable_all(int added)
498{
499 if (added) {
500 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
501 int i;
502
503 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2);
504 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1);
505 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5);
506
507 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
508 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
509
510 for (i = 0; i < 3; i++) {
511 struct perf_event *event = cpuc->events[i];
512
513 if (!event)
514 continue;
515
31fa58af
RR
516 __x86_pmu_enable_event(&event->hw,
517 ARCH_PERFMON_EVENTSEL_ENABLE);
11164cd4
PZ
518 }
519 }
520 intel_pmu_enable_all(added);
521}
522
f22f54f4
PZ
523static inline u64 intel_pmu_get_status(void)
524{
525 u64 status;
526
527 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
528
529 return status;
530}
531
532static inline void intel_pmu_ack_status(u64 ack)
533{
534 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
535}
536
ca037701 537static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
f22f54f4 538{
aff3d91a 539 int idx = hwc->idx - X86_PMC_IDX_FIXED;
f22f54f4
PZ
540 u64 ctrl_val, mask;
541
542 mask = 0xfULL << (idx * 4);
543
544 rdmsrl(hwc->config_base, ctrl_val);
545 ctrl_val &= ~mask;
7645a24c 546 wrmsrl(hwc->config_base, ctrl_val);
f22f54f4
PZ
547}
548
ca037701 549static void intel_pmu_disable_event(struct perf_event *event)
f22f54f4 550{
aff3d91a
PZ
551 struct hw_perf_event *hwc = &event->hw;
552
553 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
f22f54f4
PZ
554 intel_pmu_disable_bts();
555 intel_pmu_drain_bts_buffer();
556 return;
557 }
558
559 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
aff3d91a 560 intel_pmu_disable_fixed(hwc);
f22f54f4
PZ
561 return;
562 }
563
aff3d91a 564 x86_pmu_disable_event(event);
ca037701 565
ab608344 566 if (unlikely(event->attr.precise_ip))
ef21f683 567 intel_pmu_pebs_disable(event);
f22f54f4
PZ
568}
569
ca037701 570static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
f22f54f4 571{
aff3d91a 572 int idx = hwc->idx - X86_PMC_IDX_FIXED;
f22f54f4 573 u64 ctrl_val, bits, mask;
f22f54f4
PZ
574
575 /*
576 * Enable IRQ generation (0x8),
577 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
578 * if requested:
579 */
580 bits = 0x8ULL;
581 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
582 bits |= 0x2;
583 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
584 bits |= 0x1;
585
586 /*
587 * ANY bit is supported in v3 and up
588 */
589 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
590 bits |= 0x4;
591
592 bits <<= (idx * 4);
593 mask = 0xfULL << (idx * 4);
594
595 rdmsrl(hwc->config_base, ctrl_val);
596 ctrl_val &= ~mask;
597 ctrl_val |= bits;
7645a24c 598 wrmsrl(hwc->config_base, ctrl_val);
f22f54f4
PZ
599}
600
aff3d91a 601static void intel_pmu_enable_event(struct perf_event *event)
f22f54f4 602{
aff3d91a
PZ
603 struct hw_perf_event *hwc = &event->hw;
604
605 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
f22f54f4
PZ
606 if (!__get_cpu_var(cpu_hw_events).enabled)
607 return;
608
609 intel_pmu_enable_bts(hwc->config);
610 return;
611 }
612
613 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
aff3d91a 614 intel_pmu_enable_fixed(hwc);
f22f54f4
PZ
615 return;
616 }
617
ab608344 618 if (unlikely(event->attr.precise_ip))
ef21f683 619 intel_pmu_pebs_enable(event);
ca037701 620
31fa58af 621 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
f22f54f4
PZ
622}
623
624/*
625 * Save and restart an expired event. Called by NMI contexts,
626 * so it has to be careful about preempting normal event ops:
627 */
628static int intel_pmu_save_and_restart(struct perf_event *event)
629{
cc2ad4ba
PZ
630 x86_perf_event_update(event);
631 return x86_perf_event_set_period(event);
f22f54f4
PZ
632}
633
634static void intel_pmu_reset(void)
635{
636 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
637 unsigned long flags;
638 int idx;
639
948b1bb8 640 if (!x86_pmu.num_counters)
f22f54f4
PZ
641 return;
642
643 local_irq_save(flags);
644
645 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
646
948b1bb8 647 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
f22f54f4
PZ
648 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
649 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
650 }
948b1bb8 651 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
f22f54f4 652 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
948b1bb8 653
f22f54f4
PZ
654 if (ds)
655 ds->bts_index = ds->bts_buffer_base;
656
657 local_irq_restore(flags);
658}
659
660/*
661 * This handler is triggered by the local APIC, so the APIC IRQ handling
662 * rules apply:
663 */
664static int intel_pmu_handle_irq(struct pt_regs *regs)
665{
666 struct perf_sample_data data;
667 struct cpu_hw_events *cpuc;
668 int bit, loops;
669 u64 ack, status;
670
dc1d628a 671 perf_sample_data_init(&data, 0);
f22f54f4
PZ
672
673 cpuc = &__get_cpu_var(cpu_hw_events);
674
3fb2b8dd 675 intel_pmu_disable_all();
f22f54f4
PZ
676 intel_pmu_drain_bts_buffer();
677 status = intel_pmu_get_status();
678 if (!status) {
11164cd4 679 intel_pmu_enable_all(0);
f22f54f4
PZ
680 return 0;
681 }
682
683 loops = 0;
684again:
685 if (++loops > 100) {
686 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
687 perf_event_print_debug();
688 intel_pmu_reset();
3fb2b8dd 689 goto done;
f22f54f4
PZ
690 }
691
692 inc_irq_stat(apic_perf_irqs);
693 ack = status;
ca037701 694
caff2bef
PZ
695 intel_pmu_lbr_read();
696
ca037701
PZ
697 /*
698 * PEBS overflow sets bit 62 in the global status register
699 */
700 if (__test_and_clear_bit(62, (unsigned long *)&status))
701 x86_pmu.drain_pebs(regs);
702
984b3f57 703 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
f22f54f4
PZ
704 struct perf_event *event = cpuc->events[bit];
705
f22f54f4
PZ
706 if (!test_bit(bit, cpuc->active_mask))
707 continue;
708
709 if (!intel_pmu_save_and_restart(event))
710 continue;
711
712 data.period = event->hw.last_period;
713
714 if (perf_event_overflow(event, 1, &data, regs))
71e2d282 715 x86_pmu_stop(event);
f22f54f4
PZ
716 }
717
718 intel_pmu_ack_status(ack);
719
720 /*
721 * Repeat if there is more work to be done:
722 */
723 status = intel_pmu_get_status();
724 if (status)
725 goto again;
726
3fb2b8dd 727done:
11164cd4 728 intel_pmu_enable_all(0);
f22f54f4
PZ
729 return 1;
730}
731
f22f54f4 732static struct event_constraint *
ca037701 733intel_bts_constraints(struct perf_event *event)
f22f54f4 734{
ca037701
PZ
735 struct hw_perf_event *hwc = &event->hw;
736 unsigned int hw_event, bts_event;
f22f54f4 737
ca037701
PZ
738 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
739 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
f22f54f4 740
ca037701 741 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
f22f54f4 742 return &bts_constraint;
ca037701 743
f22f54f4
PZ
744 return NULL;
745}
746
747static struct event_constraint *
748intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
749{
750 struct event_constraint *c;
751
ca037701
PZ
752 c = intel_bts_constraints(event);
753 if (c)
754 return c;
755
756 c = intel_pebs_constraints(event);
f22f54f4
PZ
757 if (c)
758 return c;
759
760 return x86_get_event_constraints(cpuc, event);
761}
762
b4cdc5c2
PZ
763static int intel_pmu_hw_config(struct perf_event *event)
764{
765 int ret = x86_pmu_hw_config(event);
766
767 if (ret)
768 return ret;
769
770 if (event->attr.type != PERF_TYPE_RAW)
771 return 0;
772
773 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
774 return 0;
775
776 if (x86_pmu.version < 3)
777 return -EINVAL;
778
779 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
780 return -EACCES;
781
782 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
783
784 return 0;
785}
786
caaa8be3 787static __initconst const struct x86_pmu core_pmu = {
f22f54f4
PZ
788 .name = "core",
789 .handle_irq = x86_pmu_handle_irq,
790 .disable_all = x86_pmu_disable_all,
791 .enable_all = x86_pmu_enable_all,
792 .enable = x86_pmu_enable_event,
793 .disable = x86_pmu_disable_event,
b4cdc5c2 794 .hw_config = x86_pmu_hw_config,
a072738e 795 .schedule_events = x86_schedule_events,
f22f54f4
PZ
796 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
797 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
798 .event_map = intel_pmu_event_map,
f22f54f4
PZ
799 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
800 .apic = 1,
801 /*
802 * Intel PMCs cannot be accessed sanely above 32 bit width,
803 * so we install an artificial 1<<31 period regardless of
804 * the generic event period:
805 */
806 .max_period = (1ULL << 31) - 1,
807 .get_event_constraints = intel_get_event_constraints,
808 .event_constraints = intel_core_event_constraints,
809};
810
74846d35
PZ
811static void intel_pmu_cpu_starting(int cpu)
812{
813 init_debug_store_on_cpu(cpu);
814 /*
815 * Deal with CPUs that don't clear their LBRs on power-up.
816 */
817 intel_pmu_lbr_reset();
818}
819
820static void intel_pmu_cpu_dying(int cpu)
821{
822 fini_debug_store_on_cpu(cpu);
823}
824
caaa8be3 825static __initconst const struct x86_pmu intel_pmu = {
f22f54f4
PZ
826 .name = "Intel",
827 .handle_irq = intel_pmu_handle_irq,
828 .disable_all = intel_pmu_disable_all,
829 .enable_all = intel_pmu_enable_all,
830 .enable = intel_pmu_enable_event,
831 .disable = intel_pmu_disable_event,
b4cdc5c2 832 .hw_config = intel_pmu_hw_config,
a072738e 833 .schedule_events = x86_schedule_events,
f22f54f4
PZ
834 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
835 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
836 .event_map = intel_pmu_event_map,
f22f54f4
PZ
837 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
838 .apic = 1,
839 /*
840 * Intel PMCs cannot be accessed sanely above 32 bit width,
841 * so we install an artificial 1<<31 period regardless of
842 * the generic event period:
843 */
844 .max_period = (1ULL << 31) - 1,
3f6da390
PZ
845 .get_event_constraints = intel_get_event_constraints,
846
74846d35
PZ
847 .cpu_starting = intel_pmu_cpu_starting,
848 .cpu_dying = intel_pmu_cpu_dying,
f22f54f4
PZ
849};
850
3c44780b
PZ
851static void intel_clovertown_quirks(void)
852{
853 /*
854 * PEBS is unreliable due to:
855 *
856 * AJ67 - PEBS may experience CPL leaks
857 * AJ68 - PEBS PMI may be delayed by one event
858 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
859 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
860 *
861 * AJ67 could be worked around by restricting the OS/USR flags.
862 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
863 *
864 * AJ106 could possibly be worked around by not allowing LBR
865 * usage from PEBS, including the fixup.
866 * AJ68 could possibly be worked around by always programming
867 * a pebs_event_reset[0] value and coping with the lost events.
868 *
869 * But taken together it might just make sense to not enable PEBS on
870 * these chips.
871 */
872 printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
873 x86_pmu.pebs = 0;
874 x86_pmu.pebs_constraints = NULL;
875}
876
f22f54f4
PZ
877static __init int intel_pmu_init(void)
878{
879 union cpuid10_edx edx;
880 union cpuid10_eax eax;
881 unsigned int unused;
882 unsigned int ebx;
883 int version;
884
885 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
a072738e
CG
886 switch (boot_cpu_data.x86) {
887 case 0x6:
888 return p6_pmu_init();
889 case 0xf:
890 return p4_pmu_init();
891 }
f22f54f4 892 return -ENODEV;
f22f54f4
PZ
893 }
894
895 /*
896 * Check whether the Architectural PerfMon supports
897 * Branch Misses Retired hw_event or not.
898 */
899 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
900 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
901 return -ENODEV;
902
903 version = eax.split.version_id;
904 if (version < 2)
905 x86_pmu = core_pmu;
906 else
907 x86_pmu = intel_pmu;
908
909 x86_pmu.version = version;
948b1bb8
RR
910 x86_pmu.num_counters = eax.split.num_counters;
911 x86_pmu.cntval_bits = eax.split.bit_width;
912 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
f22f54f4
PZ
913
914 /*
915 * Quirk: v2 perfmon does not report fixed-purpose events, so
916 * assume at least 3 events:
917 */
918 if (version > 1)
948b1bb8 919 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
f22f54f4 920
8db909a7
PZ
921 /*
922 * v2 and above have a perf capabilities MSR
923 */
924 if (version > 1) {
925 u64 capabilities;
926
927 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
928 x86_pmu.intel_cap.capabilities = capabilities;
929 }
930
ca037701
PZ
931 intel_ds_init();
932
f22f54f4
PZ
933 /*
934 * Install the hw-cache-events table:
935 */
936 switch (boot_cpu_data.x86_model) {
937 case 14: /* 65 nm core solo/duo, "Yonah" */
938 pr_cont("Core events, ");
939 break;
940
941 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
3c44780b 942 x86_pmu.quirks = intel_clovertown_quirks;
f22f54f4
PZ
943 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
944 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
945 case 29: /* six-core 45 nm xeon "Dunnington" */
946 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
947 sizeof(hw_cache_event_ids));
948
caff2bef
PZ
949 intel_pmu_lbr_init_core();
950
f22f54f4
PZ
951 x86_pmu.event_constraints = intel_core2_event_constraints;
952 pr_cont("Core2 events, ");
953 break;
954
955 case 26: /* 45 nm nehalem, "Bloomfield" */
956 case 30: /* 45 nm nehalem, "Lynnfield" */
134fbadf 957 case 46: /* 45 nm nehalem-ex, "Beckton" */
f22f54f4
PZ
958 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
959 sizeof(hw_cache_event_ids));
960
caff2bef
PZ
961 intel_pmu_lbr_init_nhm();
962
f22f54f4 963 x86_pmu.event_constraints = intel_nehalem_event_constraints;
11164cd4
PZ
964 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
965 pr_cont("Nehalem events, ");
f22f54f4 966 break;
caff2bef 967
b622d644 968 case 28: /* Atom */
f22f54f4
PZ
969 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
970 sizeof(hw_cache_event_ids));
971
caff2bef
PZ
972 intel_pmu_lbr_init_atom();
973
f22f54f4
PZ
974 x86_pmu.event_constraints = intel_gen_event_constraints;
975 pr_cont("Atom events, ");
976 break;
977
978 case 37: /* 32 nm nehalem, "Clarkdale" */
979 case 44: /* 32 nm nehalem, "Gulftown" */
980 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
981 sizeof(hw_cache_event_ids));
982
caff2bef
PZ
983 intel_pmu_lbr_init_nhm();
984
f22f54f4 985 x86_pmu.event_constraints = intel_westmere_event_constraints;
40b91cd1 986 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
f22f54f4
PZ
987 pr_cont("Westmere events, ");
988 break;
b622d644 989
f22f54f4
PZ
990 default:
991 /*
992 * default constraints for v2 and up
993 */
994 x86_pmu.event_constraints = intel_gen_event_constraints;
995 pr_cont("generic architected perfmon, ");
996 }
997 return 0;
998}
999
1000#else /* CONFIG_CPU_SUP_INTEL */
1001
1002static int intel_pmu_init(void)
1003{
1004 return 0;
1005}
1006
1007#endif /* CONFIG_CPU_SUP_INTEL */
This page took 0.118756 seconds and 5 git commands to generate.