4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/hardirq.h>
19 #include "perf_event.h"
22 * Intel PerfMon, used on Core and later.
24 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
26 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
27 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
28 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
29 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
30 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
31 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
32 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
33 [PERF_COUNT_HW_REF_CPU_CYCLES
] = 0x0300, /* pseudo-encoding */
36 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
38 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
39 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
40 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
41 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
42 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
43 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
47 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
49 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
50 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
51 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
52 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
53 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
54 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
55 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
56 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
57 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
58 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
59 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
60 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
61 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
65 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
67 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
68 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
69 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
70 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
71 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
72 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
73 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
74 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
75 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
76 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
77 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
81 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
83 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
87 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
89 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
90 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
91 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
92 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
93 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
94 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
95 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
99 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
101 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
102 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
103 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
104 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
105 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
106 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
110 static struct event_constraint intel_ivb_event_constraints
[] __read_mostly
=
112 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
113 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
114 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
115 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
116 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
117 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
118 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
119 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
120 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
121 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
122 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
123 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
124 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
125 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
126 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
127 INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
131 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
133 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
134 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
138 static struct event_constraint intel_v1_event_constraints
[] __read_mostly
=
143 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
145 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
146 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
147 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
151 static struct extra_reg intel_snb_extra_regs
[] __read_mostly
= {
152 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0x3fffffffffull
, RSP_0
),
153 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0x3fffffffffull
, RSP_1
),
157 static u64
intel_pmu_event_map(int hw_event
)
159 return intel_perfmon_event_map
[hw_event
];
162 #define SNB_DMND_DATA_RD (1ULL << 0)
163 #define SNB_DMND_RFO (1ULL << 1)
164 #define SNB_DMND_IFETCH (1ULL << 2)
165 #define SNB_DMND_WB (1ULL << 3)
166 #define SNB_PF_DATA_RD (1ULL << 4)
167 #define SNB_PF_RFO (1ULL << 5)
168 #define SNB_PF_IFETCH (1ULL << 6)
169 #define SNB_LLC_DATA_RD (1ULL << 7)
170 #define SNB_LLC_RFO (1ULL << 8)
171 #define SNB_LLC_IFETCH (1ULL << 9)
172 #define SNB_BUS_LOCKS (1ULL << 10)
173 #define SNB_STRM_ST (1ULL << 11)
174 #define SNB_OTHER (1ULL << 15)
175 #define SNB_RESP_ANY (1ULL << 16)
176 #define SNB_NO_SUPP (1ULL << 17)
177 #define SNB_LLC_HITM (1ULL << 18)
178 #define SNB_LLC_HITE (1ULL << 19)
179 #define SNB_LLC_HITS (1ULL << 20)
180 #define SNB_LLC_HITF (1ULL << 21)
181 #define SNB_LOCAL (1ULL << 22)
182 #define SNB_REMOTE (0xffULL << 23)
183 #define SNB_SNP_NONE (1ULL << 31)
184 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
185 #define SNB_SNP_MISS (1ULL << 33)
186 #define SNB_NO_FWD (1ULL << 34)
187 #define SNB_SNP_FWD (1ULL << 35)
188 #define SNB_HITM (1ULL << 36)
189 #define SNB_NON_DRAM (1ULL << 37)
191 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
192 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
193 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
195 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
196 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
199 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
200 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
202 #define SNB_L3_ACCESS SNB_RESP_ANY
203 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
205 static __initconst
const u64 snb_hw_cache_extra_regs
206 [PERF_COUNT_HW_CACHE_MAX
]
207 [PERF_COUNT_HW_CACHE_OP_MAX
]
208 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
212 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_L3_ACCESS
,
213 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_L3_MISS
,
216 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_L3_ACCESS
,
217 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_L3_MISS
,
219 [ C(OP_PREFETCH
) ] = {
220 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_L3_ACCESS
,
221 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_L3_MISS
,
226 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_DRAM_ANY
,
227 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_DRAM_REMOTE
,
230 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_DRAM_ANY
,
231 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_DRAM_REMOTE
,
233 [ C(OP_PREFETCH
) ] = {
234 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_ANY
,
235 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_REMOTE
,
240 static __initconst
const u64 snb_hw_cache_event_ids
241 [PERF_COUNT_HW_CACHE_MAX
]
242 [PERF_COUNT_HW_CACHE_OP_MAX
]
243 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
247 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
248 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
251 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
252 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
254 [ C(OP_PREFETCH
) ] = {
255 [ C(RESULT_ACCESS
) ] = 0x0,
256 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
261 [ C(RESULT_ACCESS
) ] = 0x0,
262 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
265 [ C(RESULT_ACCESS
) ] = -1,
266 [ C(RESULT_MISS
) ] = -1,
268 [ C(OP_PREFETCH
) ] = {
269 [ C(RESULT_ACCESS
) ] = 0x0,
270 [ C(RESULT_MISS
) ] = 0x0,
275 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
276 [ C(RESULT_ACCESS
) ] = 0x01b7,
277 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
278 [ C(RESULT_MISS
) ] = 0x01b7,
281 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
282 [ C(RESULT_ACCESS
) ] = 0x01b7,
283 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
284 [ C(RESULT_MISS
) ] = 0x01b7,
286 [ C(OP_PREFETCH
) ] = {
287 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
288 [ C(RESULT_ACCESS
) ] = 0x01b7,
289 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
290 [ C(RESULT_MISS
) ] = 0x01b7,
295 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
296 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
299 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
300 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
302 [ C(OP_PREFETCH
) ] = {
303 [ C(RESULT_ACCESS
) ] = 0x0,
304 [ C(RESULT_MISS
) ] = 0x0,
309 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
310 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
313 [ C(RESULT_ACCESS
) ] = -1,
314 [ C(RESULT_MISS
) ] = -1,
316 [ C(OP_PREFETCH
) ] = {
317 [ C(RESULT_ACCESS
) ] = -1,
318 [ C(RESULT_MISS
) ] = -1,
323 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
324 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
327 [ C(RESULT_ACCESS
) ] = -1,
328 [ C(RESULT_MISS
) ] = -1,
330 [ C(OP_PREFETCH
) ] = {
331 [ C(RESULT_ACCESS
) ] = -1,
332 [ C(RESULT_MISS
) ] = -1,
337 [ C(RESULT_ACCESS
) ] = 0x01b7,
338 [ C(RESULT_MISS
) ] = 0x01b7,
341 [ C(RESULT_ACCESS
) ] = 0x01b7,
342 [ C(RESULT_MISS
) ] = 0x01b7,
344 [ C(OP_PREFETCH
) ] = {
345 [ C(RESULT_ACCESS
) ] = 0x01b7,
346 [ C(RESULT_MISS
) ] = 0x01b7,
352 static __initconst
const u64 westmere_hw_cache_event_ids
353 [PERF_COUNT_HW_CACHE_MAX
]
354 [PERF_COUNT_HW_CACHE_OP_MAX
]
355 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
359 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
360 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
363 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
364 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
366 [ C(OP_PREFETCH
) ] = {
367 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
368 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
373 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
374 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
377 [ C(RESULT_ACCESS
) ] = -1,
378 [ C(RESULT_MISS
) ] = -1,
380 [ C(OP_PREFETCH
) ] = {
381 [ C(RESULT_ACCESS
) ] = 0x0,
382 [ C(RESULT_MISS
) ] = 0x0,
387 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
388 [ C(RESULT_ACCESS
) ] = 0x01b7,
389 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
390 [ C(RESULT_MISS
) ] = 0x01b7,
393 * Use RFO, not WRITEBACK, because a write miss would typically occur
397 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
398 [ C(RESULT_ACCESS
) ] = 0x01b7,
399 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
400 [ C(RESULT_MISS
) ] = 0x01b7,
402 [ C(OP_PREFETCH
) ] = {
403 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
404 [ C(RESULT_ACCESS
) ] = 0x01b7,
405 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
406 [ C(RESULT_MISS
) ] = 0x01b7,
411 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
412 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
415 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
416 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
418 [ C(OP_PREFETCH
) ] = {
419 [ C(RESULT_ACCESS
) ] = 0x0,
420 [ C(RESULT_MISS
) ] = 0x0,
425 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
426 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
429 [ C(RESULT_ACCESS
) ] = -1,
430 [ C(RESULT_MISS
) ] = -1,
432 [ C(OP_PREFETCH
) ] = {
433 [ C(RESULT_ACCESS
) ] = -1,
434 [ C(RESULT_MISS
) ] = -1,
439 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
440 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
443 [ C(RESULT_ACCESS
) ] = -1,
444 [ C(RESULT_MISS
) ] = -1,
446 [ C(OP_PREFETCH
) ] = {
447 [ C(RESULT_ACCESS
) ] = -1,
448 [ C(RESULT_MISS
) ] = -1,
453 [ C(RESULT_ACCESS
) ] = 0x01b7,
454 [ C(RESULT_MISS
) ] = 0x01b7,
457 [ C(RESULT_ACCESS
) ] = 0x01b7,
458 [ C(RESULT_MISS
) ] = 0x01b7,
460 [ C(OP_PREFETCH
) ] = {
461 [ C(RESULT_ACCESS
) ] = 0x01b7,
462 [ C(RESULT_MISS
) ] = 0x01b7,
468 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
469 * See IA32 SDM Vol 3B 30.6.1.3
472 #define NHM_DMND_DATA_RD (1 << 0)
473 #define NHM_DMND_RFO (1 << 1)
474 #define NHM_DMND_IFETCH (1 << 2)
475 #define NHM_DMND_WB (1 << 3)
476 #define NHM_PF_DATA_RD (1 << 4)
477 #define NHM_PF_DATA_RFO (1 << 5)
478 #define NHM_PF_IFETCH (1 << 6)
479 #define NHM_OFFCORE_OTHER (1 << 7)
480 #define NHM_UNCORE_HIT (1 << 8)
481 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
482 #define NHM_OTHER_CORE_HITM (1 << 10)
484 #define NHM_REMOTE_CACHE_FWD (1 << 12)
485 #define NHM_REMOTE_DRAM (1 << 13)
486 #define NHM_LOCAL_DRAM (1 << 14)
487 #define NHM_NON_DRAM (1 << 15)
489 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
490 #define NHM_REMOTE (NHM_REMOTE_DRAM)
492 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
493 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
494 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
496 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
497 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
498 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
500 static __initconst
const u64 nehalem_hw_cache_extra_regs
501 [PERF_COUNT_HW_CACHE_MAX
]
502 [PERF_COUNT_HW_CACHE_OP_MAX
]
503 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
507 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
508 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
511 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
512 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
514 [ C(OP_PREFETCH
) ] = {
515 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
516 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
521 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_LOCAL
|NHM_REMOTE
,
522 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_REMOTE
,
525 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_LOCAL
|NHM_REMOTE
,
526 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_REMOTE
,
528 [ C(OP_PREFETCH
) ] = {
529 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_LOCAL
|NHM_REMOTE
,
530 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_REMOTE
,
535 static __initconst
const u64 nehalem_hw_cache_event_ids
536 [PERF_COUNT_HW_CACHE_MAX
]
537 [PERF_COUNT_HW_CACHE_OP_MAX
]
538 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
542 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
543 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
546 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
547 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
549 [ C(OP_PREFETCH
) ] = {
550 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
551 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
556 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
557 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
560 [ C(RESULT_ACCESS
) ] = -1,
561 [ C(RESULT_MISS
) ] = -1,
563 [ C(OP_PREFETCH
) ] = {
564 [ C(RESULT_ACCESS
) ] = 0x0,
565 [ C(RESULT_MISS
) ] = 0x0,
570 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
571 [ C(RESULT_ACCESS
) ] = 0x01b7,
572 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
573 [ C(RESULT_MISS
) ] = 0x01b7,
576 * Use RFO, not WRITEBACK, because a write miss would typically occur
580 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
581 [ C(RESULT_ACCESS
) ] = 0x01b7,
582 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
583 [ C(RESULT_MISS
) ] = 0x01b7,
585 [ C(OP_PREFETCH
) ] = {
586 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
587 [ C(RESULT_ACCESS
) ] = 0x01b7,
588 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
589 [ C(RESULT_MISS
) ] = 0x01b7,
594 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
595 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
598 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
599 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
601 [ C(OP_PREFETCH
) ] = {
602 [ C(RESULT_ACCESS
) ] = 0x0,
603 [ C(RESULT_MISS
) ] = 0x0,
608 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
609 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
612 [ C(RESULT_ACCESS
) ] = -1,
613 [ C(RESULT_MISS
) ] = -1,
615 [ C(OP_PREFETCH
) ] = {
616 [ C(RESULT_ACCESS
) ] = -1,
617 [ C(RESULT_MISS
) ] = -1,
622 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
623 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
626 [ C(RESULT_ACCESS
) ] = -1,
627 [ C(RESULT_MISS
) ] = -1,
629 [ C(OP_PREFETCH
) ] = {
630 [ C(RESULT_ACCESS
) ] = -1,
631 [ C(RESULT_MISS
) ] = -1,
636 [ C(RESULT_ACCESS
) ] = 0x01b7,
637 [ C(RESULT_MISS
) ] = 0x01b7,
640 [ C(RESULT_ACCESS
) ] = 0x01b7,
641 [ C(RESULT_MISS
) ] = 0x01b7,
643 [ C(OP_PREFETCH
) ] = {
644 [ C(RESULT_ACCESS
) ] = 0x01b7,
645 [ C(RESULT_MISS
) ] = 0x01b7,
650 static __initconst
const u64 core2_hw_cache_event_ids
651 [PERF_COUNT_HW_CACHE_MAX
]
652 [PERF_COUNT_HW_CACHE_OP_MAX
]
653 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
657 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
658 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
661 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
662 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
664 [ C(OP_PREFETCH
) ] = {
665 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
666 [ C(RESULT_MISS
) ] = 0,
671 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
672 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
675 [ C(RESULT_ACCESS
) ] = -1,
676 [ C(RESULT_MISS
) ] = -1,
678 [ C(OP_PREFETCH
) ] = {
679 [ C(RESULT_ACCESS
) ] = 0,
680 [ C(RESULT_MISS
) ] = 0,
685 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
686 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
689 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
690 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
692 [ C(OP_PREFETCH
) ] = {
693 [ C(RESULT_ACCESS
) ] = 0,
694 [ C(RESULT_MISS
) ] = 0,
699 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
700 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
703 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
704 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
706 [ C(OP_PREFETCH
) ] = {
707 [ C(RESULT_ACCESS
) ] = 0,
708 [ C(RESULT_MISS
) ] = 0,
713 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
714 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
717 [ C(RESULT_ACCESS
) ] = -1,
718 [ C(RESULT_MISS
) ] = -1,
720 [ C(OP_PREFETCH
) ] = {
721 [ C(RESULT_ACCESS
) ] = -1,
722 [ C(RESULT_MISS
) ] = -1,
727 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
728 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
731 [ C(RESULT_ACCESS
) ] = -1,
732 [ C(RESULT_MISS
) ] = -1,
734 [ C(OP_PREFETCH
) ] = {
735 [ C(RESULT_ACCESS
) ] = -1,
736 [ C(RESULT_MISS
) ] = -1,
741 static __initconst
const u64 atom_hw_cache_event_ids
742 [PERF_COUNT_HW_CACHE_MAX
]
743 [PERF_COUNT_HW_CACHE_OP_MAX
]
744 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
748 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
749 [ C(RESULT_MISS
) ] = 0,
752 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
753 [ C(RESULT_MISS
) ] = 0,
755 [ C(OP_PREFETCH
) ] = {
756 [ C(RESULT_ACCESS
) ] = 0x0,
757 [ C(RESULT_MISS
) ] = 0,
762 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
763 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
766 [ C(RESULT_ACCESS
) ] = -1,
767 [ C(RESULT_MISS
) ] = -1,
769 [ C(OP_PREFETCH
) ] = {
770 [ C(RESULT_ACCESS
) ] = 0,
771 [ C(RESULT_MISS
) ] = 0,
776 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
777 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
780 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
781 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
783 [ C(OP_PREFETCH
) ] = {
784 [ C(RESULT_ACCESS
) ] = 0,
785 [ C(RESULT_MISS
) ] = 0,
790 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
791 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
794 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
795 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
797 [ C(OP_PREFETCH
) ] = {
798 [ C(RESULT_ACCESS
) ] = 0,
799 [ C(RESULT_MISS
) ] = 0,
804 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
805 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
808 [ C(RESULT_ACCESS
) ] = -1,
809 [ C(RESULT_MISS
) ] = -1,
811 [ C(OP_PREFETCH
) ] = {
812 [ C(RESULT_ACCESS
) ] = -1,
813 [ C(RESULT_MISS
) ] = -1,
818 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
819 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
822 [ C(RESULT_ACCESS
) ] = -1,
823 [ C(RESULT_MISS
) ] = -1,
825 [ C(OP_PREFETCH
) ] = {
826 [ C(RESULT_ACCESS
) ] = -1,
827 [ C(RESULT_MISS
) ] = -1,
832 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event
*event
)
834 /* user explicitly requested branch sampling */
835 if (has_branch_stack(event
))
838 /* implicit branch sampling to correct PEBS skid */
839 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1)
845 static void intel_pmu_disable_all(void)
847 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
849 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
851 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
852 intel_pmu_disable_bts();
854 intel_pmu_pebs_disable_all();
855 intel_pmu_lbr_disable_all();
858 static void intel_pmu_enable_all(int added
)
860 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
862 intel_pmu_pebs_enable_all();
863 intel_pmu_lbr_enable_all();
864 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
,
865 x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
);
867 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
868 struct perf_event
*event
=
869 cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
871 if (WARN_ON_ONCE(!event
))
874 intel_pmu_enable_bts(event
->hw
.config
);
880 * Intel Errata AAK100 (model 26)
881 * Intel Errata AAP53 (model 30)
882 * Intel Errata BD53 (model 44)
884 * The official story:
885 * These chips need to be 'reset' when adding counters by programming the
886 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
887 * in sequence on the same PMC or on different PMCs.
889 * In practise it appears some of these events do in fact count, and
890 * we need to programm all 4 events.
892 static void intel_pmu_nhm_workaround(void)
894 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
895 static const unsigned long nhm_magic
[4] = {
901 struct perf_event
*event
;
905 * The Errata requires below steps:
906 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
907 * 2) Configure 4 PERFEVTSELx with the magic events and clear
908 * the corresponding PMCx;
909 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
910 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
911 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
915 * The real steps we choose are a little different from above.
916 * A) To reduce MSR operations, we don't run step 1) as they
917 * are already cleared before this function is called;
918 * B) Call x86_perf_event_update to save PMCx before configuring
919 * PERFEVTSELx with magic number;
920 * C) With step 5), we do clear only when the PERFEVTSELx is
921 * not used currently.
922 * D) Call x86_perf_event_set_period to restore PMCx;
925 /* We always operate 4 pairs of PERF Counters */
926 for (i
= 0; i
< 4; i
++) {
927 event
= cpuc
->events
[i
];
929 x86_perf_event_update(event
);
932 for (i
= 0; i
< 4; i
++) {
933 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
934 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
937 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
938 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
940 for (i
= 0; i
< 4; i
++) {
941 event
= cpuc
->events
[i
];
944 x86_perf_event_set_period(event
);
945 __x86_pmu_enable_event(&event
->hw
,
946 ARCH_PERFMON_EVENTSEL_ENABLE
);
948 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
952 static void intel_pmu_nhm_enable_all(int added
)
955 intel_pmu_nhm_workaround();
956 intel_pmu_enable_all(added
);
959 static inline u64
intel_pmu_get_status(void)
963 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
968 static inline void intel_pmu_ack_status(u64 ack
)
970 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
973 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
975 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
978 mask
= 0xfULL
<< (idx
* 4);
980 rdmsrl(hwc
->config_base
, ctrl_val
);
982 wrmsrl(hwc
->config_base
, ctrl_val
);
985 static void intel_pmu_disable_event(struct perf_event
*event
)
987 struct hw_perf_event
*hwc
= &event
->hw
;
988 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
990 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
991 intel_pmu_disable_bts();
992 intel_pmu_drain_bts_buffer();
996 cpuc
->intel_ctrl_guest_mask
&= ~(1ull << hwc
->idx
);
997 cpuc
->intel_ctrl_host_mask
&= ~(1ull << hwc
->idx
);
1000 * must disable before any actual event
1001 * because any event may be combined with LBR
1003 if (intel_pmu_needs_lbr_smpl(event
))
1004 intel_pmu_lbr_disable(event
);
1006 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1007 intel_pmu_disable_fixed(hwc
);
1011 x86_pmu_disable_event(event
);
1013 if (unlikely(event
->attr
.precise_ip
))
1014 intel_pmu_pebs_disable(event
);
1017 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
1019 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1020 u64 ctrl_val
, bits
, mask
;
1023 * Enable IRQ generation (0x8),
1024 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1028 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
1030 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
1034 * ANY bit is supported in v3 and up
1036 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
1040 mask
= 0xfULL
<< (idx
* 4);
1042 rdmsrl(hwc
->config_base
, ctrl_val
);
1045 wrmsrl(hwc
->config_base
, ctrl_val
);
1048 static void intel_pmu_enable_event(struct perf_event
*event
)
1050 struct hw_perf_event
*hwc
= &event
->hw
;
1051 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1053 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1054 if (!__this_cpu_read(cpu_hw_events
.enabled
))
1057 intel_pmu_enable_bts(hwc
->config
);
1061 * must enabled before any actual event
1062 * because any event may be combined with LBR
1064 if (intel_pmu_needs_lbr_smpl(event
))
1065 intel_pmu_lbr_enable(event
);
1067 if (event
->attr
.exclude_host
)
1068 cpuc
->intel_ctrl_guest_mask
|= (1ull << hwc
->idx
);
1069 if (event
->attr
.exclude_guest
)
1070 cpuc
->intel_ctrl_host_mask
|= (1ull << hwc
->idx
);
1072 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1073 intel_pmu_enable_fixed(hwc
);
1077 if (unlikely(event
->attr
.precise_ip
))
1078 intel_pmu_pebs_enable(event
);
1080 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1084 * Save and restart an expired event. Called by NMI contexts,
1085 * so it has to be careful about preempting normal event ops:
1087 int intel_pmu_save_and_restart(struct perf_event
*event
)
1089 x86_perf_event_update(event
);
1090 return x86_perf_event_set_period(event
);
1093 static void intel_pmu_reset(void)
1095 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
1096 unsigned long flags
;
1099 if (!x86_pmu
.num_counters
)
1102 local_irq_save(flags
);
1104 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1106 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1107 wrmsrl_safe(x86_pmu_config_addr(idx
), 0ull);
1108 wrmsrl_safe(x86_pmu_event_addr(idx
), 0ull);
1110 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
1111 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1114 ds
->bts_index
= ds
->bts_buffer_base
;
1116 local_irq_restore(flags
);
1120 * This handler is triggered by the local APIC, so the APIC IRQ handling
1123 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1125 struct perf_sample_data data
;
1126 struct cpu_hw_events
*cpuc
;
1131 cpuc
= &__get_cpu_var(cpu_hw_events
);
1134 * Some chipsets need to unmask the LVTPC in a particular spot
1135 * inside the nmi handler. As a result, the unmasking was pushed
1136 * into all the nmi handlers.
1138 * This handler doesn't seem to have any issues with the unmasking
1139 * so it was left at the top.
1141 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1143 intel_pmu_disable_all();
1144 handled
= intel_pmu_drain_bts_buffer();
1145 status
= intel_pmu_get_status();
1147 intel_pmu_enable_all(0);
1153 intel_pmu_ack_status(status
);
1154 if (++loops
> 100) {
1155 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1156 perf_event_print_debug();
1161 inc_irq_stat(apic_perf_irqs
);
1163 intel_pmu_lbr_read();
1166 * PEBS overflow sets bit 62 in the global status register
1168 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
1170 x86_pmu
.drain_pebs(regs
);
1173 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1174 struct perf_event
*event
= cpuc
->events
[bit
];
1178 if (!test_bit(bit
, cpuc
->active_mask
))
1181 if (!intel_pmu_save_and_restart(event
))
1184 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
1186 if (has_branch_stack(event
))
1187 data
.br_stack
= &cpuc
->lbr_stack
;
1189 if (perf_event_overflow(event
, &data
, regs
))
1190 x86_pmu_stop(event
, 0);
1194 * Repeat if there is more work to be done:
1196 status
= intel_pmu_get_status();
1201 intel_pmu_enable_all(0);
1205 static struct event_constraint
*
1206 intel_bts_constraints(struct perf_event
*event
)
1208 struct hw_perf_event
*hwc
= &event
->hw
;
1209 unsigned int hw_event
, bts_event
;
1211 if (event
->attr
.freq
)
1214 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1215 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1217 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1218 return &bts_constraint
;
1223 static int intel_alt_er(int idx
)
1225 if (!(x86_pmu
.er_flags
& ERF_HAS_RSP_1
))
1228 if (idx
== EXTRA_REG_RSP_0
)
1229 return EXTRA_REG_RSP_1
;
1231 if (idx
== EXTRA_REG_RSP_1
)
1232 return EXTRA_REG_RSP_0
;
1237 static void intel_fixup_er(struct perf_event
*event
, int idx
)
1239 event
->hw
.extra_reg
.idx
= idx
;
1241 if (idx
== EXTRA_REG_RSP_0
) {
1242 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1243 event
->hw
.config
|= 0x01b7;
1244 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_0
;
1245 } else if (idx
== EXTRA_REG_RSP_1
) {
1246 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1247 event
->hw
.config
|= 0x01bb;
1248 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_1
;
1253 * manage allocation of shared extra msr for certain events
1256 * per-cpu: to be shared between the various events on a single PMU
1257 * per-core: per-cpu + shared by HT threads
1259 static struct event_constraint
*
1260 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
1261 struct perf_event
*event
,
1262 struct hw_perf_event_extra
*reg
)
1264 struct event_constraint
*c
= &emptyconstraint
;
1265 struct er_account
*era
;
1266 unsigned long flags
;
1270 * reg->alloc can be set due to existing state, so for fake cpuc we
1271 * need to ignore this, otherwise we might fail to allocate proper fake
1272 * state for this extra reg constraint. Also see the comment below.
1274 if (reg
->alloc
&& !cpuc
->is_fake
)
1275 return NULL
; /* call x86_get_event_constraint() */
1278 era
= &cpuc
->shared_regs
->regs
[idx
];
1280 * we use spin_lock_irqsave() to avoid lockdep issues when
1281 * passing a fake cpuc
1283 raw_spin_lock_irqsave(&era
->lock
, flags
);
1285 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
1288 * If its a fake cpuc -- as per validate_{group,event}() we
1289 * shouldn't touch event state and we can avoid doing so
1290 * since both will only call get_event_constraints() once
1291 * on each event, this avoids the need for reg->alloc.
1293 * Not doing the ER fixup will only result in era->reg being
1294 * wrong, but since we won't actually try and program hardware
1295 * this isn't a problem either.
1297 if (!cpuc
->is_fake
) {
1298 if (idx
!= reg
->idx
)
1299 intel_fixup_er(event
, idx
);
1302 * x86_schedule_events() can call get_event_constraints()
1303 * multiple times on events in the case of incremental
1304 * scheduling(). reg->alloc ensures we only do the ER
1310 /* lock in msr value */
1311 era
->config
= reg
->config
;
1312 era
->reg
= reg
->reg
;
1315 atomic_inc(&era
->ref
);
1318 * need to call x86_get_event_constraint()
1319 * to check if associated event has constraints
1323 idx
= intel_alt_er(idx
);
1324 if (idx
!= reg
->idx
) {
1325 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1329 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1335 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
1336 struct hw_perf_event_extra
*reg
)
1338 struct er_account
*era
;
1341 * Only put constraint if extra reg was actually allocated. Also takes
1342 * care of event which do not use an extra shared reg.
1344 * Also, if this is a fake cpuc we shouldn't touch any event state
1345 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1346 * either since it'll be thrown out.
1348 if (!reg
->alloc
|| cpuc
->is_fake
)
1351 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1353 /* one fewer user */
1354 atomic_dec(&era
->ref
);
1356 /* allocate again next time */
1360 static struct event_constraint
*
1361 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
1362 struct perf_event
*event
)
1364 struct event_constraint
*c
= NULL
, *d
;
1365 struct hw_perf_event_extra
*xreg
, *breg
;
1367 xreg
= &event
->hw
.extra_reg
;
1368 if (xreg
->idx
!= EXTRA_REG_NONE
) {
1369 c
= __intel_shared_reg_get_constraints(cpuc
, event
, xreg
);
1370 if (c
== &emptyconstraint
)
1373 breg
= &event
->hw
.branch_reg
;
1374 if (breg
->idx
!= EXTRA_REG_NONE
) {
1375 d
= __intel_shared_reg_get_constraints(cpuc
, event
, breg
);
1376 if (d
== &emptyconstraint
) {
1377 __intel_shared_reg_put_constraints(cpuc
, xreg
);
1384 struct event_constraint
*
1385 x86_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1387 struct event_constraint
*c
;
1389 if (x86_pmu
.event_constraints
) {
1390 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
1391 if ((event
->hw
.config
& c
->cmask
) == c
->code
)
1396 return &unconstrained
;
1399 static struct event_constraint
*
1400 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1402 struct event_constraint
*c
;
1404 c
= intel_bts_constraints(event
);
1408 c
= intel_pebs_constraints(event
);
1412 c
= intel_shared_regs_constraints(cpuc
, event
);
1416 return x86_get_event_constraints(cpuc
, event
);
1420 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
1421 struct perf_event
*event
)
1423 struct hw_perf_event_extra
*reg
;
1425 reg
= &event
->hw
.extra_reg
;
1426 if (reg
->idx
!= EXTRA_REG_NONE
)
1427 __intel_shared_reg_put_constraints(cpuc
, reg
);
1429 reg
= &event
->hw
.branch_reg
;
1430 if (reg
->idx
!= EXTRA_REG_NONE
)
1431 __intel_shared_reg_put_constraints(cpuc
, reg
);
1434 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1435 struct perf_event
*event
)
1437 intel_put_shared_regs_event_constraints(cpuc
, event
);
1440 static void intel_pebs_aliases_core2(struct perf_event
*event
)
1442 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1444 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1445 * (0x003c) so that we can use it with PEBS.
1447 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1448 * PEBS capable. However we can use INST_RETIRED.ANY_P
1449 * (0x00c0), which is a PEBS capable event, to get the same
1452 * INST_RETIRED.ANY_P counts the number of cycles that retires
1453 * CNTMASK instructions. By setting CNTMASK to a value (16)
1454 * larger than the maximum number of instructions that can be
1455 * retired per cycle (4) and then inverting the condition, we
1456 * count all cycles that retire 16 or less instructions, which
1459 * Thereby we gain a PEBS capable cycle counter.
1461 u64 alt_config
= X86_CONFIG(.event
=0xc0, .inv
=1, .cmask
=16);
1463 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1464 event
->hw
.config
= alt_config
;
1468 static void intel_pebs_aliases_snb(struct perf_event
*event
)
1470 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1472 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1473 * (0x003c) so that we can use it with PEBS.
1475 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1476 * PEBS capable. However we can use UOPS_RETIRED.ALL
1477 * (0x01c2), which is a PEBS capable event, to get the same
1480 * UOPS_RETIRED.ALL counts the number of cycles that retires
1481 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1482 * larger than the maximum number of micro-ops that can be
1483 * retired per cycle (4) and then inverting the condition, we
1484 * count all cycles that retire 16 or less micro-ops, which
1487 * Thereby we gain a PEBS capable cycle counter.
1489 u64 alt_config
= X86_CONFIG(.event
=0xc2, .umask
=0x01, .inv
=1, .cmask
=16);
1491 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1492 event
->hw
.config
= alt_config
;
1496 static int intel_pmu_hw_config(struct perf_event
*event
)
1498 int ret
= x86_pmu_hw_config(event
);
1503 if (event
->attr
.precise_ip
&& x86_pmu
.pebs_aliases
)
1504 x86_pmu
.pebs_aliases(event
);
1506 if (intel_pmu_needs_lbr_smpl(event
)) {
1507 ret
= intel_pmu_setup_lbr_filter(event
);
1512 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1515 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1518 if (x86_pmu
.version
< 3)
1521 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1524 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1529 struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
1531 if (x86_pmu
.guest_get_msrs
)
1532 return x86_pmu
.guest_get_msrs(nr
);
1536 EXPORT_SYMBOL_GPL(perf_guest_get_msrs
);
1538 static struct perf_guest_switch_msr
*intel_guest_get_msrs(int *nr
)
1540 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1541 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1543 arr
[0].msr
= MSR_CORE_PERF_GLOBAL_CTRL
;
1544 arr
[0].host
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
;
1545 arr
[0].guest
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_host_mask
;
1547 * If PMU counter has PEBS enabled it is not enough to disable counter
1548 * on a guest entry since PEBS memory write can overshoot guest entry
1549 * and corrupt guest memory. Disabling PEBS solves the problem.
1551 arr
[1].msr
= MSR_IA32_PEBS_ENABLE
;
1552 arr
[1].host
= cpuc
->pebs_enabled
;
1559 static struct perf_guest_switch_msr
*core_guest_get_msrs(int *nr
)
1561 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1562 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1565 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1566 struct perf_event
*event
= cpuc
->events
[idx
];
1568 arr
[idx
].msr
= x86_pmu_config_addr(idx
);
1569 arr
[idx
].host
= arr
[idx
].guest
= 0;
1571 if (!test_bit(idx
, cpuc
->active_mask
))
1574 arr
[idx
].host
= arr
[idx
].guest
=
1575 event
->hw
.config
| ARCH_PERFMON_EVENTSEL_ENABLE
;
1577 if (event
->attr
.exclude_host
)
1578 arr
[idx
].host
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1579 else if (event
->attr
.exclude_guest
)
1580 arr
[idx
].guest
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1583 *nr
= x86_pmu
.num_counters
;
1587 static void core_pmu_enable_event(struct perf_event
*event
)
1589 if (!event
->attr
.exclude_host
)
1590 x86_pmu_enable_event(event
);
1593 static void core_pmu_enable_all(int added
)
1595 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1598 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1599 struct hw_perf_event
*hwc
= &cpuc
->events
[idx
]->hw
;
1601 if (!test_bit(idx
, cpuc
->active_mask
) ||
1602 cpuc
->events
[idx
]->attr
.exclude_host
)
1605 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1609 PMU_FORMAT_ATTR(event
, "config:0-7" );
1610 PMU_FORMAT_ATTR(umask
, "config:8-15" );
1611 PMU_FORMAT_ATTR(edge
, "config:18" );
1612 PMU_FORMAT_ATTR(pc
, "config:19" );
1613 PMU_FORMAT_ATTR(any
, "config:21" ); /* v3 + */
1614 PMU_FORMAT_ATTR(inv
, "config:23" );
1615 PMU_FORMAT_ATTR(cmask
, "config:24-31" );
1617 static struct attribute
*intel_arch_formats_attr
[] = {
1618 &format_attr_event
.attr
,
1619 &format_attr_umask
.attr
,
1620 &format_attr_edge
.attr
,
1621 &format_attr_pc
.attr
,
1622 &format_attr_inv
.attr
,
1623 &format_attr_cmask
.attr
,
1627 ssize_t
intel_event_sysfs_show(char *page
, u64 config
)
1629 u64 event
= (config
& ARCH_PERFMON_EVENTSEL_EVENT
);
1631 return x86_event_sysfs_show(page
, config
, event
);
1634 static __initconst
const struct x86_pmu core_pmu
= {
1636 .handle_irq
= x86_pmu_handle_irq
,
1637 .disable_all
= x86_pmu_disable_all
,
1638 .enable_all
= core_pmu_enable_all
,
1639 .enable
= core_pmu_enable_event
,
1640 .disable
= x86_pmu_disable_event
,
1641 .hw_config
= x86_pmu_hw_config
,
1642 .schedule_events
= x86_schedule_events
,
1643 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1644 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1645 .event_map
= intel_pmu_event_map
,
1646 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1649 * Intel PMCs cannot be accessed sanely above 32 bit width,
1650 * so we install an artificial 1<<31 period regardless of
1651 * the generic event period:
1653 .max_period
= (1ULL << 31) - 1,
1654 .get_event_constraints
= intel_get_event_constraints
,
1655 .put_event_constraints
= intel_put_event_constraints
,
1656 .event_constraints
= intel_core_event_constraints
,
1657 .guest_get_msrs
= core_guest_get_msrs
,
1658 .format_attrs
= intel_arch_formats_attr
,
1659 .events_sysfs_show
= intel_event_sysfs_show
,
1662 struct intel_shared_regs
*allocate_shared_regs(int cpu
)
1664 struct intel_shared_regs
*regs
;
1667 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
1668 GFP_KERNEL
, cpu_to_node(cpu
));
1671 * initialize the locks to keep lockdep happy
1673 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
1674 raw_spin_lock_init(®s
->regs
[i
].lock
);
1681 static int intel_pmu_cpu_prepare(int cpu
)
1683 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1685 if (!(x86_pmu
.extra_regs
|| x86_pmu
.lbr_sel_map
))
1688 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
1689 if (!cpuc
->shared_regs
)
1695 static void intel_pmu_cpu_starting(int cpu
)
1697 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1698 int core_id
= topology_core_id(cpu
);
1701 init_debug_store_on_cpu(cpu
);
1703 * Deal with CPUs that don't clear their LBRs on power-up.
1705 intel_pmu_lbr_reset();
1707 cpuc
->lbr_sel
= NULL
;
1709 if (!cpuc
->shared_regs
)
1712 if (!(x86_pmu
.er_flags
& ERF_NO_HT_SHARING
)) {
1713 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
1714 struct intel_shared_regs
*pc
;
1716 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
1717 if (pc
&& pc
->core_id
== core_id
) {
1718 cpuc
->kfree_on_online
= cpuc
->shared_regs
;
1719 cpuc
->shared_regs
= pc
;
1723 cpuc
->shared_regs
->core_id
= core_id
;
1724 cpuc
->shared_regs
->refcnt
++;
1727 if (x86_pmu
.lbr_sel_map
)
1728 cpuc
->lbr_sel
= &cpuc
->shared_regs
->regs
[EXTRA_REG_LBR
];
1731 static void intel_pmu_cpu_dying(int cpu
)
1733 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1734 struct intel_shared_regs
*pc
;
1736 pc
= cpuc
->shared_regs
;
1738 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
1740 cpuc
->shared_regs
= NULL
;
1743 fini_debug_store_on_cpu(cpu
);
1746 static void intel_pmu_flush_branch_stack(void)
1749 * Intel LBR does not tag entries with the
1750 * PID of the current task, then we need to
1752 * For now, we simply reset it
1755 intel_pmu_lbr_reset();
1758 PMU_FORMAT_ATTR(offcore_rsp
, "config1:0-63");
1760 static struct attribute
*intel_arch3_formats_attr
[] = {
1761 &format_attr_event
.attr
,
1762 &format_attr_umask
.attr
,
1763 &format_attr_edge
.attr
,
1764 &format_attr_pc
.attr
,
1765 &format_attr_any
.attr
,
1766 &format_attr_inv
.attr
,
1767 &format_attr_cmask
.attr
,
1769 &format_attr_offcore_rsp
.attr
, /* XXX do NHM/WSM + SNB breakout */
1773 static __initconst
const struct x86_pmu intel_pmu
= {
1775 .handle_irq
= intel_pmu_handle_irq
,
1776 .disable_all
= intel_pmu_disable_all
,
1777 .enable_all
= intel_pmu_enable_all
,
1778 .enable
= intel_pmu_enable_event
,
1779 .disable
= intel_pmu_disable_event
,
1780 .hw_config
= intel_pmu_hw_config
,
1781 .schedule_events
= x86_schedule_events
,
1782 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1783 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1784 .event_map
= intel_pmu_event_map
,
1785 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1788 * Intel PMCs cannot be accessed sanely above 32 bit width,
1789 * so we install an artificial 1<<31 period regardless of
1790 * the generic event period:
1792 .max_period
= (1ULL << 31) - 1,
1793 .get_event_constraints
= intel_get_event_constraints
,
1794 .put_event_constraints
= intel_put_event_constraints
,
1795 .pebs_aliases
= intel_pebs_aliases_core2
,
1797 .format_attrs
= intel_arch3_formats_attr
,
1798 .events_sysfs_show
= intel_event_sysfs_show
,
1800 .cpu_prepare
= intel_pmu_cpu_prepare
,
1801 .cpu_starting
= intel_pmu_cpu_starting
,
1802 .cpu_dying
= intel_pmu_cpu_dying
,
1803 .guest_get_msrs
= intel_guest_get_msrs
,
1804 .flush_branch_stack
= intel_pmu_flush_branch_stack
,
1807 static __init
void intel_clovertown_quirk(void)
1810 * PEBS is unreliable due to:
1812 * AJ67 - PEBS may experience CPL leaks
1813 * AJ68 - PEBS PMI may be delayed by one event
1814 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1815 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1817 * AJ67 could be worked around by restricting the OS/USR flags.
1818 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1820 * AJ106 could possibly be worked around by not allowing LBR
1821 * usage from PEBS, including the fixup.
1822 * AJ68 could possibly be worked around by always programming
1823 * a pebs_event_reset[0] value and coping with the lost events.
1825 * But taken together it might just make sense to not enable PEBS on
1828 pr_warn("PEBS disabled due to CPU errata\n");
1830 x86_pmu
.pebs_constraints
= NULL
;
1833 static int intel_snb_pebs_broken(int cpu
)
1835 u32 rev
= UINT_MAX
; /* default to broken for unknown models */
1837 switch (cpu_data(cpu
).x86_model
) {
1842 case 45: /* SNB-EP */
1843 switch (cpu_data(cpu
).x86_mask
) {
1844 case 6: rev
= 0x618; break;
1845 case 7: rev
= 0x70c; break;
1849 return (cpu_data(cpu
).microcode
< rev
);
1852 static void intel_snb_check_microcode(void)
1854 int pebs_broken
= 0;
1858 for_each_online_cpu(cpu
) {
1859 if ((pebs_broken
= intel_snb_pebs_broken(cpu
)))
1864 if (pebs_broken
== x86_pmu
.pebs_broken
)
1868 * Serialized by the microcode lock..
1870 if (x86_pmu
.pebs_broken
) {
1871 pr_info("PEBS enabled due to microcode update\n");
1872 x86_pmu
.pebs_broken
= 0;
1874 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
1875 x86_pmu
.pebs_broken
= 1;
1879 static __init
void intel_sandybridge_quirk(void)
1881 x86_pmu
.check_microcode
= intel_snb_check_microcode
;
1882 intel_snb_check_microcode();
1885 static const struct { int id
; char *name
; } intel_arch_events_map
[] __initconst
= {
1886 { PERF_COUNT_HW_CPU_CYCLES
, "cpu cycles" },
1887 { PERF_COUNT_HW_INSTRUCTIONS
, "instructions" },
1888 { PERF_COUNT_HW_BUS_CYCLES
, "bus cycles" },
1889 { PERF_COUNT_HW_CACHE_REFERENCES
, "cache references" },
1890 { PERF_COUNT_HW_CACHE_MISSES
, "cache misses" },
1891 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS
, "branch instructions" },
1892 { PERF_COUNT_HW_BRANCH_MISSES
, "branch misses" },
1895 static __init
void intel_arch_events_quirk(void)
1899 /* disable event that reported as not presend by cpuid */
1900 for_each_set_bit(bit
, x86_pmu
.events_mask
, ARRAY_SIZE(intel_arch_events_map
)) {
1901 intel_perfmon_event_map
[intel_arch_events_map
[bit
].id
] = 0;
1902 pr_warn("CPUID marked event: \'%s\' unavailable\n",
1903 intel_arch_events_map
[bit
].name
);
1907 static __init
void intel_nehalem_quirk(void)
1909 union cpuid10_ebx ebx
;
1911 ebx
.full
= x86_pmu
.events_maskl
;
1912 if (ebx
.split
.no_branch_misses_retired
) {
1914 * Erratum AAJ80 detected, we work it around by using
1915 * the BR_MISP_EXEC.ANY event. This will over-count
1916 * branch-misses, but it's still much better than the
1917 * architectural event which is often completely bogus:
1919 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
1920 ebx
.split
.no_branch_misses_retired
= 0;
1921 x86_pmu
.events_maskl
= ebx
.full
;
1922 pr_info("CPU erratum AAJ80 worked around\n");
1926 __init
int intel_pmu_init(void)
1928 union cpuid10_edx edx
;
1929 union cpuid10_eax eax
;
1930 union cpuid10_ebx ebx
;
1931 struct event_constraint
*c
;
1932 unsigned int unused
;
1935 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
1936 switch (boot_cpu_data
.x86
) {
1938 return p6_pmu_init();
1940 return knc_pmu_init();
1942 return p4_pmu_init();
1948 * Check whether the Architectural PerfMon supports
1949 * Branch Misses Retired hw_event or not.
1951 cpuid(10, &eax
.full
, &ebx
.full
, &unused
, &edx
.full
);
1952 if (eax
.split
.mask_length
< ARCH_PERFMON_EVENTS_COUNT
)
1955 version
= eax
.split
.version_id
;
1959 x86_pmu
= intel_pmu
;
1961 x86_pmu
.version
= version
;
1962 x86_pmu
.num_counters
= eax
.split
.num_counters
;
1963 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
1964 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
1966 x86_pmu
.events_maskl
= ebx
.full
;
1967 x86_pmu
.events_mask_len
= eax
.split
.mask_length
;
1969 x86_pmu
.max_pebs_events
= min_t(unsigned, MAX_PEBS_EVENTS
, x86_pmu
.num_counters
);
1972 * Quirk: v2 perfmon does not report fixed-purpose events, so
1973 * assume at least 3 events:
1976 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
1979 * v2 and above have a perf capabilities MSR
1984 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
1985 x86_pmu
.intel_cap
.capabilities
= capabilities
;
1990 x86_add_quirk(intel_arch_events_quirk
); /* Install first, so it runs last */
1993 * Install the hw-cache-events table:
1995 switch (boot_cpu_data
.x86_model
) {
1996 case 14: /* 65 nm core solo/duo, "Yonah" */
1997 pr_cont("Core events, ");
2000 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2001 x86_add_quirk(intel_clovertown_quirk
);
2002 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2003 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2004 case 29: /* six-core 45 nm xeon "Dunnington" */
2005 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
2006 sizeof(hw_cache_event_ids
));
2008 intel_pmu_lbr_init_core();
2010 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
2011 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
2012 pr_cont("Core2 events, ");
2015 case 26: /* 45 nm nehalem, "Bloomfield" */
2016 case 30: /* 45 nm nehalem, "Lynnfield" */
2017 case 46: /* 45 nm nehalem-ex, "Beckton" */
2018 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
2019 sizeof(hw_cache_event_ids
));
2020 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2021 sizeof(hw_cache_extra_regs
));
2023 intel_pmu_lbr_init_nhm();
2025 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
2026 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
2027 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2028 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
2030 /* UOPS_ISSUED.STALLED_CYCLES */
2031 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2032 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2033 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2034 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2035 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2037 x86_add_quirk(intel_nehalem_quirk
);
2039 pr_cont("Nehalem events, ");
2043 case 38: /* Lincroft */
2044 case 39: /* Penwell */
2045 case 53: /* Cloverview */
2046 case 54: /* Cedarview */
2047 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
2048 sizeof(hw_cache_event_ids
));
2050 intel_pmu_lbr_init_atom();
2052 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2053 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
2054 pr_cont("Atom events, ");
2057 case 37: /* 32 nm nehalem, "Clarkdale" */
2058 case 44: /* 32 nm nehalem, "Gulftown" */
2059 case 47: /* 32 nm Xeon E7 */
2060 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
2061 sizeof(hw_cache_event_ids
));
2062 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2063 sizeof(hw_cache_extra_regs
));
2065 intel_pmu_lbr_init_nhm();
2067 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
2068 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2069 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
2070 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
2071 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2073 /* UOPS_ISSUED.STALLED_CYCLES */
2074 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2075 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2076 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2077 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2078 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2080 pr_cont("Westmere events, ");
2083 case 42: /* SandyBridge */
2084 case 45: /* SandyBridge, "Romely-EP" */
2085 x86_add_quirk(intel_sandybridge_quirk
);
2086 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2087 sizeof(hw_cache_event_ids
));
2088 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2089 sizeof(hw_cache_extra_regs
));
2091 intel_pmu_lbr_init_snb();
2093 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
2094 x86_pmu
.pebs_constraints
= intel_snb_pebs_event_constraints
;
2095 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2096 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2097 /* all extra regs are per-cpu when HT is on */
2098 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2099 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2101 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2102 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2103 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2104 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
2105 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2106 X86_CONFIG(.event
=0xb1, .umask
=0x01, .inv
=1, .cmask
=1);
2108 pr_cont("SandyBridge events, ");
2110 case 58: /* IvyBridge */
2111 case 62: /* IvyBridge EP */
2112 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2113 sizeof(hw_cache_event_ids
));
2114 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2115 sizeof(hw_cache_extra_regs
));
2117 intel_pmu_lbr_init_snb();
2119 x86_pmu
.event_constraints
= intel_ivb_event_constraints
;
2120 x86_pmu
.pebs_constraints
= intel_ivb_pebs_event_constraints
;
2121 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2122 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2123 /* all extra regs are per-cpu when HT is on */
2124 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2125 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2127 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2128 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2129 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2131 pr_cont("IvyBridge events, ");
2136 switch (x86_pmu
.version
) {
2138 x86_pmu
.event_constraints
= intel_v1_event_constraints
;
2139 pr_cont("generic architected perfmon v1, ");
2143 * default constraints for v2 and up
2145 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2146 pr_cont("generic architected perfmon, ");
2151 if (x86_pmu
.num_counters
> INTEL_PMC_MAX_GENERIC
) {
2152 WARN(1, KERN_ERR
"hw perf events %d > max(%d), clipping!",
2153 x86_pmu
.num_counters
, INTEL_PMC_MAX_GENERIC
);
2154 x86_pmu
.num_counters
= INTEL_PMC_MAX_GENERIC
;
2156 x86_pmu
.intel_ctrl
= (1 << x86_pmu
.num_counters
) - 1;
2158 if (x86_pmu
.num_counters_fixed
> INTEL_PMC_MAX_FIXED
) {
2159 WARN(1, KERN_ERR
"hw perf events fixed %d > max(%d), clipping!",
2160 x86_pmu
.num_counters_fixed
, INTEL_PMC_MAX_FIXED
);
2161 x86_pmu
.num_counters_fixed
= INTEL_PMC_MAX_FIXED
;
2164 x86_pmu
.intel_ctrl
|=
2165 ((1LL << x86_pmu
.num_counters_fixed
)-1) << INTEL_PMC_IDX_FIXED
;
2167 if (x86_pmu
.event_constraints
) {
2169 * event on fixed counter2 (REF_CYCLES) only works on this
2170 * counter, so do not extend mask to generic counters
2172 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
2173 if (c
->cmask
!= X86_RAW_EVENT_MASK
2174 || c
->idxmsk64
== INTEL_PMC_MSK_FIXED_REF_CYCLES
) {
2178 c
->idxmsk64
|= (1ULL << x86_pmu
.num_counters
) - 1;
2179 c
->weight
+= x86_pmu
.num_counters
;