4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/cpufeature.h>
17 #include <asm/hardirq.h>
20 #include "perf_event.h"
23 * Intel PerfMon, used on Core and later.
25 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
27 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
28 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
29 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
30 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
31 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
32 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
33 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
34 [PERF_COUNT_HW_REF_CPU_CYCLES
] = 0x0300, /* pseudo-encoding */
37 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
44 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
48 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
50 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
51 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
52 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
53 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
54 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
55 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
56 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
57 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
58 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
59 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
60 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
61 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
62 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
66 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
68 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
69 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
70 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
71 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
72 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
73 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
74 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
75 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
76 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
77 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
78 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
82 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
84 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
85 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
86 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
90 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
92 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
93 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
94 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
95 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
96 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
97 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
98 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
102 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
104 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
105 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
106 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
107 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
108 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
109 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
110 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
111 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
112 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
113 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
114 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
115 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
119 static struct event_constraint intel_ivb_event_constraints
[] __read_mostly
=
121 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
122 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
123 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
124 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
125 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
126 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
127 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
128 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
129 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
130 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
131 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
132 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
133 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
135 * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
136 * siblings; disable these events because they can corrupt unrelated
139 INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
140 INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
141 INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
142 INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
146 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
148 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
149 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
150 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
151 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
155 static struct event_constraint intel_v1_event_constraints
[] __read_mostly
=
160 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
162 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
163 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
164 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
168 static struct event_constraint intel_slm_event_constraints
[] __read_mostly
=
170 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
171 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
172 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
176 static struct extra_reg intel_snb_extra_regs
[] __read_mostly
= {
177 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
178 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x3f807f8fffull
, RSP_0
),
179 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0x3f807f8fffull
, RSP_1
),
180 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
184 static struct extra_reg intel_snbep_extra_regs
[] __read_mostly
= {
185 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
186 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x3fffff8fffull
, RSP_0
),
187 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0x3fffff8fffull
, RSP_1
),
188 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
192 EVENT_ATTR_STR(mem
-loads
, mem_ld_nhm
, "event=0x0b,umask=0x10,ldlat=3");
193 EVENT_ATTR_STR(mem
-loads
, mem_ld_snb
, "event=0xcd,umask=0x1,ldlat=3");
194 EVENT_ATTR_STR(mem
-stores
, mem_st_snb
, "event=0xcd,umask=0x2");
196 struct attribute
*nhm_events_attrs
[] = {
197 EVENT_PTR(mem_ld_nhm
),
201 struct attribute
*snb_events_attrs
[] = {
202 EVENT_PTR(mem_ld_snb
),
203 EVENT_PTR(mem_st_snb
),
207 static struct event_constraint intel_hsw_event_constraints
[] = {
208 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
209 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
210 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
211 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
212 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
213 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
214 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
215 INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
216 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
217 INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
218 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
219 INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
223 static u64
intel_pmu_event_map(int hw_event
)
225 return intel_perfmon_event_map
[hw_event
];
228 #define SNB_DMND_DATA_RD (1ULL << 0)
229 #define SNB_DMND_RFO (1ULL << 1)
230 #define SNB_DMND_IFETCH (1ULL << 2)
231 #define SNB_DMND_WB (1ULL << 3)
232 #define SNB_PF_DATA_RD (1ULL << 4)
233 #define SNB_PF_RFO (1ULL << 5)
234 #define SNB_PF_IFETCH (1ULL << 6)
235 #define SNB_LLC_DATA_RD (1ULL << 7)
236 #define SNB_LLC_RFO (1ULL << 8)
237 #define SNB_LLC_IFETCH (1ULL << 9)
238 #define SNB_BUS_LOCKS (1ULL << 10)
239 #define SNB_STRM_ST (1ULL << 11)
240 #define SNB_OTHER (1ULL << 15)
241 #define SNB_RESP_ANY (1ULL << 16)
242 #define SNB_NO_SUPP (1ULL << 17)
243 #define SNB_LLC_HITM (1ULL << 18)
244 #define SNB_LLC_HITE (1ULL << 19)
245 #define SNB_LLC_HITS (1ULL << 20)
246 #define SNB_LLC_HITF (1ULL << 21)
247 #define SNB_LOCAL (1ULL << 22)
248 #define SNB_REMOTE (0xffULL << 23)
249 #define SNB_SNP_NONE (1ULL << 31)
250 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
251 #define SNB_SNP_MISS (1ULL << 33)
252 #define SNB_NO_FWD (1ULL << 34)
253 #define SNB_SNP_FWD (1ULL << 35)
254 #define SNB_HITM (1ULL << 36)
255 #define SNB_NON_DRAM (1ULL << 37)
257 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
258 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
259 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
261 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
262 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
265 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
266 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
268 #define SNB_L3_ACCESS SNB_RESP_ANY
269 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
271 static __initconst
const u64 snb_hw_cache_extra_regs
272 [PERF_COUNT_HW_CACHE_MAX
]
273 [PERF_COUNT_HW_CACHE_OP_MAX
]
274 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
278 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_L3_ACCESS
,
279 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_L3_MISS
,
282 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_L3_ACCESS
,
283 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_L3_MISS
,
285 [ C(OP_PREFETCH
) ] = {
286 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_L3_ACCESS
,
287 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_L3_MISS
,
292 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_DRAM_ANY
,
293 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_DRAM_REMOTE
,
296 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_DRAM_ANY
,
297 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_DRAM_REMOTE
,
299 [ C(OP_PREFETCH
) ] = {
300 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_ANY
,
301 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_REMOTE
,
306 static __initconst
const u64 snb_hw_cache_event_ids
307 [PERF_COUNT_HW_CACHE_MAX
]
308 [PERF_COUNT_HW_CACHE_OP_MAX
]
309 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
313 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
314 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
317 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
318 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
320 [ C(OP_PREFETCH
) ] = {
321 [ C(RESULT_ACCESS
) ] = 0x0,
322 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
327 [ C(RESULT_ACCESS
) ] = 0x0,
328 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
331 [ C(RESULT_ACCESS
) ] = -1,
332 [ C(RESULT_MISS
) ] = -1,
334 [ C(OP_PREFETCH
) ] = {
335 [ C(RESULT_ACCESS
) ] = 0x0,
336 [ C(RESULT_MISS
) ] = 0x0,
341 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
342 [ C(RESULT_ACCESS
) ] = 0x01b7,
343 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
344 [ C(RESULT_MISS
) ] = 0x01b7,
347 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
348 [ C(RESULT_ACCESS
) ] = 0x01b7,
349 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
350 [ C(RESULT_MISS
) ] = 0x01b7,
352 [ C(OP_PREFETCH
) ] = {
353 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
354 [ C(RESULT_ACCESS
) ] = 0x01b7,
355 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
356 [ C(RESULT_MISS
) ] = 0x01b7,
361 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
362 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
365 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
366 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
368 [ C(OP_PREFETCH
) ] = {
369 [ C(RESULT_ACCESS
) ] = 0x0,
370 [ C(RESULT_MISS
) ] = 0x0,
375 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
376 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
379 [ C(RESULT_ACCESS
) ] = -1,
380 [ C(RESULT_MISS
) ] = -1,
382 [ C(OP_PREFETCH
) ] = {
383 [ C(RESULT_ACCESS
) ] = -1,
384 [ C(RESULT_MISS
) ] = -1,
389 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
390 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
393 [ C(RESULT_ACCESS
) ] = -1,
394 [ C(RESULT_MISS
) ] = -1,
396 [ C(OP_PREFETCH
) ] = {
397 [ C(RESULT_ACCESS
) ] = -1,
398 [ C(RESULT_MISS
) ] = -1,
403 [ C(RESULT_ACCESS
) ] = 0x01b7,
404 [ C(RESULT_MISS
) ] = 0x01b7,
407 [ C(RESULT_ACCESS
) ] = 0x01b7,
408 [ C(RESULT_MISS
) ] = 0x01b7,
410 [ C(OP_PREFETCH
) ] = {
411 [ C(RESULT_ACCESS
) ] = 0x01b7,
412 [ C(RESULT_MISS
) ] = 0x01b7,
419 * Notes on the events:
420 * - data reads do not include code reads (comparable to earlier tables)
421 * - data counts include speculative execution (except L1 write, dtlb, bpu)
422 * - remote node access includes remote memory, remote cache, remote mmio.
423 * - prefetches are not included in the counts because they are not
427 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
428 #define HSW_DEMAND_RFO BIT_ULL(1)
429 #define HSW_ANY_RESPONSE BIT_ULL(16)
430 #define HSW_SUPPLIER_NONE BIT_ULL(17)
431 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
432 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
433 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
434 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
435 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
436 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
437 HSW_L3_MISS_REMOTE_HOP2P)
438 #define HSW_SNOOP_NONE BIT_ULL(31)
439 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
440 #define HSW_SNOOP_MISS BIT_ULL(33)
441 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
442 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
443 #define HSW_SNOOP_HITM BIT_ULL(36)
444 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
445 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
446 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
447 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
448 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
449 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
450 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
451 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
452 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
453 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
454 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
456 static __initconst
const u64 hsw_hw_cache_event_ids
457 [PERF_COUNT_HW_CACHE_MAX
]
458 [PERF_COUNT_HW_CACHE_OP_MAX
]
459 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
463 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
464 [ C(RESULT_MISS
) ] = 0x151, /* L1D.REPLACEMENT */
467 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
468 [ C(RESULT_MISS
) ] = 0x0,
470 [ C(OP_PREFETCH
) ] = {
471 [ C(RESULT_ACCESS
) ] = 0x0,
472 [ C(RESULT_MISS
) ] = 0x0,
477 [ C(RESULT_ACCESS
) ] = 0x0,
478 [ C(RESULT_MISS
) ] = 0x280, /* ICACHE.MISSES */
481 [ C(RESULT_ACCESS
) ] = -1,
482 [ C(RESULT_MISS
) ] = -1,
484 [ C(OP_PREFETCH
) ] = {
485 [ C(RESULT_ACCESS
) ] = 0x0,
486 [ C(RESULT_MISS
) ] = 0x0,
491 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
492 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
495 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
496 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
498 [ C(OP_PREFETCH
) ] = {
499 [ C(RESULT_ACCESS
) ] = 0x0,
500 [ C(RESULT_MISS
) ] = 0x0,
505 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
506 [ C(RESULT_MISS
) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
509 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
510 [ C(RESULT_MISS
) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
512 [ C(OP_PREFETCH
) ] = {
513 [ C(RESULT_ACCESS
) ] = 0x0,
514 [ C(RESULT_MISS
) ] = 0x0,
519 [ C(RESULT_ACCESS
) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
520 [ C(RESULT_MISS
) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
523 [ C(RESULT_ACCESS
) ] = -1,
524 [ C(RESULT_MISS
) ] = -1,
526 [ C(OP_PREFETCH
) ] = {
527 [ C(RESULT_ACCESS
) ] = -1,
528 [ C(RESULT_MISS
) ] = -1,
533 [ C(RESULT_ACCESS
) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
534 [ C(RESULT_MISS
) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
537 [ C(RESULT_ACCESS
) ] = -1,
538 [ C(RESULT_MISS
) ] = -1,
540 [ C(OP_PREFETCH
) ] = {
541 [ C(RESULT_ACCESS
) ] = -1,
542 [ C(RESULT_MISS
) ] = -1,
547 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
548 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
551 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
552 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
554 [ C(OP_PREFETCH
) ] = {
555 [ C(RESULT_ACCESS
) ] = 0x0,
556 [ C(RESULT_MISS
) ] = 0x0,
561 static __initconst
const u64 hsw_hw_cache_extra_regs
562 [PERF_COUNT_HW_CACHE_MAX
]
563 [PERF_COUNT_HW_CACHE_OP_MAX
]
564 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
568 [ C(RESULT_ACCESS
) ] = HSW_DEMAND_READ
|
570 [ C(RESULT_MISS
) ] = HSW_DEMAND_READ
|
571 HSW_L3_MISS
|HSW_ANY_SNOOP
,
574 [ C(RESULT_ACCESS
) ] = HSW_DEMAND_WRITE
|
576 [ C(RESULT_MISS
) ] = HSW_DEMAND_WRITE
|
577 HSW_L3_MISS
|HSW_ANY_SNOOP
,
579 [ C(OP_PREFETCH
) ] = {
580 [ C(RESULT_ACCESS
) ] = 0x0,
581 [ C(RESULT_MISS
) ] = 0x0,
586 [ C(RESULT_ACCESS
) ] = HSW_DEMAND_READ
|
587 HSW_L3_MISS_LOCAL_DRAM
|
589 [ C(RESULT_MISS
) ] = HSW_DEMAND_READ
|
594 [ C(RESULT_ACCESS
) ] = HSW_DEMAND_WRITE
|
595 HSW_L3_MISS_LOCAL_DRAM
|
597 [ C(RESULT_MISS
) ] = HSW_DEMAND_WRITE
|
601 [ C(OP_PREFETCH
) ] = {
602 [ C(RESULT_ACCESS
) ] = 0x0,
603 [ C(RESULT_MISS
) ] = 0x0,
608 static __initconst
const u64 westmere_hw_cache_event_ids
609 [PERF_COUNT_HW_CACHE_MAX
]
610 [PERF_COUNT_HW_CACHE_OP_MAX
]
611 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
615 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
616 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
619 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
620 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
622 [ C(OP_PREFETCH
) ] = {
623 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
624 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
629 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
630 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
633 [ C(RESULT_ACCESS
) ] = -1,
634 [ C(RESULT_MISS
) ] = -1,
636 [ C(OP_PREFETCH
) ] = {
637 [ C(RESULT_ACCESS
) ] = 0x0,
638 [ C(RESULT_MISS
) ] = 0x0,
643 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
644 [ C(RESULT_ACCESS
) ] = 0x01b7,
645 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
646 [ C(RESULT_MISS
) ] = 0x01b7,
649 * Use RFO, not WRITEBACK, because a write miss would typically occur
653 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
654 [ C(RESULT_ACCESS
) ] = 0x01b7,
655 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
656 [ C(RESULT_MISS
) ] = 0x01b7,
658 [ C(OP_PREFETCH
) ] = {
659 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
660 [ C(RESULT_ACCESS
) ] = 0x01b7,
661 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
662 [ C(RESULT_MISS
) ] = 0x01b7,
667 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
668 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
671 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
672 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
674 [ C(OP_PREFETCH
) ] = {
675 [ C(RESULT_ACCESS
) ] = 0x0,
676 [ C(RESULT_MISS
) ] = 0x0,
681 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
682 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
685 [ C(RESULT_ACCESS
) ] = -1,
686 [ C(RESULT_MISS
) ] = -1,
688 [ C(OP_PREFETCH
) ] = {
689 [ C(RESULT_ACCESS
) ] = -1,
690 [ C(RESULT_MISS
) ] = -1,
695 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
696 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
699 [ C(RESULT_ACCESS
) ] = -1,
700 [ C(RESULT_MISS
) ] = -1,
702 [ C(OP_PREFETCH
) ] = {
703 [ C(RESULT_ACCESS
) ] = -1,
704 [ C(RESULT_MISS
) ] = -1,
709 [ C(RESULT_ACCESS
) ] = 0x01b7,
710 [ C(RESULT_MISS
) ] = 0x01b7,
713 [ C(RESULT_ACCESS
) ] = 0x01b7,
714 [ C(RESULT_MISS
) ] = 0x01b7,
716 [ C(OP_PREFETCH
) ] = {
717 [ C(RESULT_ACCESS
) ] = 0x01b7,
718 [ C(RESULT_MISS
) ] = 0x01b7,
724 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
725 * See IA32 SDM Vol 3B 30.6.1.3
728 #define NHM_DMND_DATA_RD (1 << 0)
729 #define NHM_DMND_RFO (1 << 1)
730 #define NHM_DMND_IFETCH (1 << 2)
731 #define NHM_DMND_WB (1 << 3)
732 #define NHM_PF_DATA_RD (1 << 4)
733 #define NHM_PF_DATA_RFO (1 << 5)
734 #define NHM_PF_IFETCH (1 << 6)
735 #define NHM_OFFCORE_OTHER (1 << 7)
736 #define NHM_UNCORE_HIT (1 << 8)
737 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
738 #define NHM_OTHER_CORE_HITM (1 << 10)
740 #define NHM_REMOTE_CACHE_FWD (1 << 12)
741 #define NHM_REMOTE_DRAM (1 << 13)
742 #define NHM_LOCAL_DRAM (1 << 14)
743 #define NHM_NON_DRAM (1 << 15)
745 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
746 #define NHM_REMOTE (NHM_REMOTE_DRAM)
748 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
749 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
750 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
752 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
753 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
754 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
756 static __initconst
const u64 nehalem_hw_cache_extra_regs
757 [PERF_COUNT_HW_CACHE_MAX
]
758 [PERF_COUNT_HW_CACHE_OP_MAX
]
759 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
763 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
764 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
767 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
768 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
770 [ C(OP_PREFETCH
) ] = {
771 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
772 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
777 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_LOCAL
|NHM_REMOTE
,
778 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_REMOTE
,
781 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_LOCAL
|NHM_REMOTE
,
782 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_REMOTE
,
784 [ C(OP_PREFETCH
) ] = {
785 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_LOCAL
|NHM_REMOTE
,
786 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_REMOTE
,
791 static __initconst
const u64 nehalem_hw_cache_event_ids
792 [PERF_COUNT_HW_CACHE_MAX
]
793 [PERF_COUNT_HW_CACHE_OP_MAX
]
794 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
798 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
799 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
802 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
803 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
805 [ C(OP_PREFETCH
) ] = {
806 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
807 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
812 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
813 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
816 [ C(RESULT_ACCESS
) ] = -1,
817 [ C(RESULT_MISS
) ] = -1,
819 [ C(OP_PREFETCH
) ] = {
820 [ C(RESULT_ACCESS
) ] = 0x0,
821 [ C(RESULT_MISS
) ] = 0x0,
826 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
827 [ C(RESULT_ACCESS
) ] = 0x01b7,
828 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
829 [ C(RESULT_MISS
) ] = 0x01b7,
832 * Use RFO, not WRITEBACK, because a write miss would typically occur
836 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
837 [ C(RESULT_ACCESS
) ] = 0x01b7,
838 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
839 [ C(RESULT_MISS
) ] = 0x01b7,
841 [ C(OP_PREFETCH
) ] = {
842 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
843 [ C(RESULT_ACCESS
) ] = 0x01b7,
844 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
845 [ C(RESULT_MISS
) ] = 0x01b7,
850 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
851 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
854 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
855 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
857 [ C(OP_PREFETCH
) ] = {
858 [ C(RESULT_ACCESS
) ] = 0x0,
859 [ C(RESULT_MISS
) ] = 0x0,
864 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
865 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
868 [ C(RESULT_ACCESS
) ] = -1,
869 [ C(RESULT_MISS
) ] = -1,
871 [ C(OP_PREFETCH
) ] = {
872 [ C(RESULT_ACCESS
) ] = -1,
873 [ C(RESULT_MISS
) ] = -1,
878 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
879 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
882 [ C(RESULT_ACCESS
) ] = -1,
883 [ C(RESULT_MISS
) ] = -1,
885 [ C(OP_PREFETCH
) ] = {
886 [ C(RESULT_ACCESS
) ] = -1,
887 [ C(RESULT_MISS
) ] = -1,
892 [ C(RESULT_ACCESS
) ] = 0x01b7,
893 [ C(RESULT_MISS
) ] = 0x01b7,
896 [ C(RESULT_ACCESS
) ] = 0x01b7,
897 [ C(RESULT_MISS
) ] = 0x01b7,
899 [ C(OP_PREFETCH
) ] = {
900 [ C(RESULT_ACCESS
) ] = 0x01b7,
901 [ C(RESULT_MISS
) ] = 0x01b7,
906 static __initconst
const u64 core2_hw_cache_event_ids
907 [PERF_COUNT_HW_CACHE_MAX
]
908 [PERF_COUNT_HW_CACHE_OP_MAX
]
909 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
913 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
914 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
917 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
918 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
920 [ C(OP_PREFETCH
) ] = {
921 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
922 [ C(RESULT_MISS
) ] = 0,
927 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
928 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
931 [ C(RESULT_ACCESS
) ] = -1,
932 [ C(RESULT_MISS
) ] = -1,
934 [ C(OP_PREFETCH
) ] = {
935 [ C(RESULT_ACCESS
) ] = 0,
936 [ C(RESULT_MISS
) ] = 0,
941 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
942 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
945 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
946 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
948 [ C(OP_PREFETCH
) ] = {
949 [ C(RESULT_ACCESS
) ] = 0,
950 [ C(RESULT_MISS
) ] = 0,
955 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
956 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
959 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
960 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
962 [ C(OP_PREFETCH
) ] = {
963 [ C(RESULT_ACCESS
) ] = 0,
964 [ C(RESULT_MISS
) ] = 0,
969 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
970 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
973 [ C(RESULT_ACCESS
) ] = -1,
974 [ C(RESULT_MISS
) ] = -1,
976 [ C(OP_PREFETCH
) ] = {
977 [ C(RESULT_ACCESS
) ] = -1,
978 [ C(RESULT_MISS
) ] = -1,
983 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
984 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
987 [ C(RESULT_ACCESS
) ] = -1,
988 [ C(RESULT_MISS
) ] = -1,
990 [ C(OP_PREFETCH
) ] = {
991 [ C(RESULT_ACCESS
) ] = -1,
992 [ C(RESULT_MISS
) ] = -1,
997 static __initconst
const u64 atom_hw_cache_event_ids
998 [PERF_COUNT_HW_CACHE_MAX
]
999 [PERF_COUNT_HW_CACHE_OP_MAX
]
1000 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1004 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
1005 [ C(RESULT_MISS
) ] = 0,
1008 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
1009 [ C(RESULT_MISS
) ] = 0,
1011 [ C(OP_PREFETCH
) ] = {
1012 [ C(RESULT_ACCESS
) ] = 0x0,
1013 [ C(RESULT_MISS
) ] = 0,
1018 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
1019 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
1022 [ C(RESULT_ACCESS
) ] = -1,
1023 [ C(RESULT_MISS
) ] = -1,
1025 [ C(OP_PREFETCH
) ] = {
1026 [ C(RESULT_ACCESS
) ] = 0,
1027 [ C(RESULT_MISS
) ] = 0,
1032 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
1033 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
1036 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
1037 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
1039 [ C(OP_PREFETCH
) ] = {
1040 [ C(RESULT_ACCESS
) ] = 0,
1041 [ C(RESULT_MISS
) ] = 0,
1046 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1047 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1050 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1051 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1053 [ C(OP_PREFETCH
) ] = {
1054 [ C(RESULT_ACCESS
) ] = 0,
1055 [ C(RESULT_MISS
) ] = 0,
1060 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1061 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
1064 [ C(RESULT_ACCESS
) ] = -1,
1065 [ C(RESULT_MISS
) ] = -1,
1067 [ C(OP_PREFETCH
) ] = {
1068 [ C(RESULT_ACCESS
) ] = -1,
1069 [ C(RESULT_MISS
) ] = -1,
1074 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1075 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1078 [ C(RESULT_ACCESS
) ] = -1,
1079 [ C(RESULT_MISS
) ] = -1,
1081 [ C(OP_PREFETCH
) ] = {
1082 [ C(RESULT_ACCESS
) ] = -1,
1083 [ C(RESULT_MISS
) ] = -1,
1088 static struct extra_reg intel_slm_extra_regs
[] __read_mostly
=
1090 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1091 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x768005ffffull
, RSP_0
),
1092 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1
, 0x768005ffffull
, RSP_1
),
1096 #define SLM_DMND_READ SNB_DMND_DATA_RD
1097 #define SLM_DMND_WRITE SNB_DMND_RFO
1098 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1100 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1101 #define SLM_LLC_ACCESS SNB_RESP_ANY
1102 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1104 static __initconst
const u64 slm_hw_cache_extra_regs
1105 [PERF_COUNT_HW_CACHE_MAX
]
1106 [PERF_COUNT_HW_CACHE_OP_MAX
]
1107 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1111 [ C(RESULT_ACCESS
) ] = SLM_DMND_READ
|SLM_LLC_ACCESS
,
1112 [ C(RESULT_MISS
) ] = SLM_DMND_READ
|SLM_LLC_MISS
,
1115 [ C(RESULT_ACCESS
) ] = SLM_DMND_WRITE
|SLM_LLC_ACCESS
,
1116 [ C(RESULT_MISS
) ] = SLM_DMND_WRITE
|SLM_LLC_MISS
,
1118 [ C(OP_PREFETCH
) ] = {
1119 [ C(RESULT_ACCESS
) ] = SLM_DMND_PREFETCH
|SLM_LLC_ACCESS
,
1120 [ C(RESULT_MISS
) ] = SLM_DMND_PREFETCH
|SLM_LLC_MISS
,
1125 static __initconst
const u64 slm_hw_cache_event_ids
1126 [PERF_COUNT_HW_CACHE_MAX
]
1127 [PERF_COUNT_HW_CACHE_OP_MAX
]
1128 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1132 [ C(RESULT_ACCESS
) ] = 0,
1133 [ C(RESULT_MISS
) ] = 0x0104, /* LD_DCU_MISS */
1136 [ C(RESULT_ACCESS
) ] = 0,
1137 [ C(RESULT_MISS
) ] = 0,
1139 [ C(OP_PREFETCH
) ] = {
1140 [ C(RESULT_ACCESS
) ] = 0,
1141 [ C(RESULT_MISS
) ] = 0,
1146 [ C(RESULT_ACCESS
) ] = 0x0380, /* ICACHE.ACCESSES */
1147 [ C(RESULT_MISS
) ] = 0x0280, /* ICACGE.MISSES */
1150 [ C(RESULT_ACCESS
) ] = -1,
1151 [ C(RESULT_MISS
) ] = -1,
1153 [ C(OP_PREFETCH
) ] = {
1154 [ C(RESULT_ACCESS
) ] = 0,
1155 [ C(RESULT_MISS
) ] = 0,
1160 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1161 [ C(RESULT_ACCESS
) ] = 0x01b7,
1162 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1163 [ C(RESULT_MISS
) ] = 0x01b7,
1166 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1167 [ C(RESULT_ACCESS
) ] = 0x01b7,
1168 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1169 [ C(RESULT_MISS
) ] = 0x01b7,
1171 [ C(OP_PREFETCH
) ] = {
1172 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1173 [ C(RESULT_ACCESS
) ] = 0x01b7,
1174 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1175 [ C(RESULT_MISS
) ] = 0x01b7,
1180 [ C(RESULT_ACCESS
) ] = 0,
1181 [ C(RESULT_MISS
) ] = 0x0804, /* LD_DTLB_MISS */
1184 [ C(RESULT_ACCESS
) ] = 0,
1185 [ C(RESULT_MISS
) ] = 0,
1187 [ C(OP_PREFETCH
) ] = {
1188 [ C(RESULT_ACCESS
) ] = 0,
1189 [ C(RESULT_MISS
) ] = 0,
1194 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1195 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
1198 [ C(RESULT_ACCESS
) ] = -1,
1199 [ C(RESULT_MISS
) ] = -1,
1201 [ C(OP_PREFETCH
) ] = {
1202 [ C(RESULT_ACCESS
) ] = -1,
1203 [ C(RESULT_MISS
) ] = -1,
1208 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1209 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1212 [ C(RESULT_ACCESS
) ] = -1,
1213 [ C(RESULT_MISS
) ] = -1,
1215 [ C(OP_PREFETCH
) ] = {
1216 [ C(RESULT_ACCESS
) ] = -1,
1217 [ C(RESULT_MISS
) ] = -1,
1222 static void intel_pmu_disable_all(void)
1224 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1226 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
1228 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
1229 intel_pmu_disable_bts();
1231 intel_pmu_pebs_disable_all();
1232 intel_pmu_lbr_disable_all();
1235 static void intel_pmu_enable_all(int added
)
1237 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1239 intel_pmu_pebs_enable_all();
1240 intel_pmu_lbr_enable_all();
1241 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
,
1242 x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
);
1244 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
1245 struct perf_event
*event
=
1246 cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
1248 if (WARN_ON_ONCE(!event
))
1251 intel_pmu_enable_bts(event
->hw
.config
);
1257 * Intel Errata AAK100 (model 26)
1258 * Intel Errata AAP53 (model 30)
1259 * Intel Errata BD53 (model 44)
1261 * The official story:
1262 * These chips need to be 'reset' when adding counters by programming the
1263 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1264 * in sequence on the same PMC or on different PMCs.
1266 * In practise it appears some of these events do in fact count, and
1267 * we need to programm all 4 events.
1269 static void intel_pmu_nhm_workaround(void)
1271 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1272 static const unsigned long nhm_magic
[4] = {
1278 struct perf_event
*event
;
1282 * The Errata requires below steps:
1283 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1284 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1285 * the corresponding PMCx;
1286 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1287 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1288 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1292 * The real steps we choose are a little different from above.
1293 * A) To reduce MSR operations, we don't run step 1) as they
1294 * are already cleared before this function is called;
1295 * B) Call x86_perf_event_update to save PMCx before configuring
1296 * PERFEVTSELx with magic number;
1297 * C) With step 5), we do clear only when the PERFEVTSELx is
1298 * not used currently.
1299 * D) Call x86_perf_event_set_period to restore PMCx;
1302 /* We always operate 4 pairs of PERF Counters */
1303 for (i
= 0; i
< 4; i
++) {
1304 event
= cpuc
->events
[i
];
1306 x86_perf_event_update(event
);
1309 for (i
= 0; i
< 4; i
++) {
1310 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
1311 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
1314 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
1315 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
1317 for (i
= 0; i
< 4; i
++) {
1318 event
= cpuc
->events
[i
];
1321 x86_perf_event_set_period(event
);
1322 __x86_pmu_enable_event(&event
->hw
,
1323 ARCH_PERFMON_EVENTSEL_ENABLE
);
1325 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
1329 static void intel_pmu_nhm_enable_all(int added
)
1332 intel_pmu_nhm_workaround();
1333 intel_pmu_enable_all(added
);
1336 static inline u64
intel_pmu_get_status(void)
1340 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
1345 static inline void intel_pmu_ack_status(u64 ack
)
1347 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
1350 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
1352 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1355 mask
= 0xfULL
<< (idx
* 4);
1357 rdmsrl(hwc
->config_base
, ctrl_val
);
1359 wrmsrl(hwc
->config_base
, ctrl_val
);
1362 static inline bool event_is_checkpointed(struct perf_event
*event
)
1364 return (event
->hw
.config
& HSW_IN_TX_CHECKPOINTED
) != 0;
1367 static void intel_pmu_disable_event(struct perf_event
*event
)
1369 struct hw_perf_event
*hwc
= &event
->hw
;
1370 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1372 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1373 intel_pmu_disable_bts();
1374 intel_pmu_drain_bts_buffer();
1378 cpuc
->intel_ctrl_guest_mask
&= ~(1ull << hwc
->idx
);
1379 cpuc
->intel_ctrl_host_mask
&= ~(1ull << hwc
->idx
);
1380 cpuc
->intel_cp_status
&= ~(1ull << hwc
->idx
);
1383 * must disable before any actual event
1384 * because any event may be combined with LBR
1386 if (needs_branch_stack(event
))
1387 intel_pmu_lbr_disable(event
);
1389 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1390 intel_pmu_disable_fixed(hwc
);
1394 x86_pmu_disable_event(event
);
1396 if (unlikely(event
->attr
.precise_ip
))
1397 intel_pmu_pebs_disable(event
);
1400 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
1402 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1403 u64 ctrl_val
, bits
, mask
;
1406 * Enable IRQ generation (0x8),
1407 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1411 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
1413 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
1417 * ANY bit is supported in v3 and up
1419 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
1423 mask
= 0xfULL
<< (idx
* 4);
1425 rdmsrl(hwc
->config_base
, ctrl_val
);
1428 wrmsrl(hwc
->config_base
, ctrl_val
);
1431 static void intel_pmu_enable_event(struct perf_event
*event
)
1433 struct hw_perf_event
*hwc
= &event
->hw
;
1434 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1436 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1437 if (!__this_cpu_read(cpu_hw_events
.enabled
))
1440 intel_pmu_enable_bts(hwc
->config
);
1444 * must enabled before any actual event
1445 * because any event may be combined with LBR
1447 if (needs_branch_stack(event
))
1448 intel_pmu_lbr_enable(event
);
1450 if (event
->attr
.exclude_host
)
1451 cpuc
->intel_ctrl_guest_mask
|= (1ull << hwc
->idx
);
1452 if (event
->attr
.exclude_guest
)
1453 cpuc
->intel_ctrl_host_mask
|= (1ull << hwc
->idx
);
1455 if (unlikely(event_is_checkpointed(event
)))
1456 cpuc
->intel_cp_status
|= (1ull << hwc
->idx
);
1458 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1459 intel_pmu_enable_fixed(hwc
);
1463 if (unlikely(event
->attr
.precise_ip
))
1464 intel_pmu_pebs_enable(event
);
1466 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1470 * Save and restart an expired event. Called by NMI contexts,
1471 * so it has to be careful about preempting normal event ops:
1473 int intel_pmu_save_and_restart(struct perf_event
*event
)
1475 x86_perf_event_update(event
);
1477 * For a checkpointed counter always reset back to 0. This
1478 * avoids a situation where the counter overflows, aborts the
1479 * transaction and is then set back to shortly before the
1480 * overflow, and overflows and aborts again.
1482 if (unlikely(event_is_checkpointed(event
))) {
1483 /* No race with NMIs because the counter should not be armed */
1484 wrmsrl(event
->hw
.event_base
, 0);
1485 local64_set(&event
->hw
.prev_count
, 0);
1487 return x86_perf_event_set_period(event
);
1490 static void intel_pmu_reset(void)
1492 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
1493 unsigned long flags
;
1496 if (!x86_pmu
.num_counters
)
1499 local_irq_save(flags
);
1501 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1503 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1504 wrmsrl_safe(x86_pmu_config_addr(idx
), 0ull);
1505 wrmsrl_safe(x86_pmu_event_addr(idx
), 0ull);
1507 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
1508 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1511 ds
->bts_index
= ds
->bts_buffer_base
;
1513 local_irq_restore(flags
);
1517 * This handler is triggered by the local APIC, so the APIC IRQ handling
1520 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1522 struct perf_sample_data data
;
1523 struct cpu_hw_events
*cpuc
;
1528 cpuc
= this_cpu_ptr(&cpu_hw_events
);
1531 * No known reason to not always do late ACK,
1532 * but just in case do it opt-in.
1534 if (!x86_pmu
.late_ack
)
1535 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1536 intel_pmu_disable_all();
1537 handled
= intel_pmu_drain_bts_buffer();
1538 status
= intel_pmu_get_status();
1544 intel_pmu_ack_status(status
);
1545 if (++loops
> 100) {
1546 static bool warned
= false;
1548 WARN(1, "perfevents: irq loop stuck!\n");
1549 perf_event_print_debug();
1556 inc_irq_stat(apic_perf_irqs
);
1558 intel_pmu_lbr_read();
1561 * CondChgd bit 63 doesn't mean any overflow status. Ignore
1562 * and clear the bit.
1564 if (__test_and_clear_bit(63, (unsigned long *)&status
)) {
1570 * PEBS overflow sets bit 62 in the global status register
1572 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
1574 x86_pmu
.drain_pebs(regs
);
1578 * Checkpointed counters can lead to 'spurious' PMIs because the
1579 * rollback caused by the PMI will have cleared the overflow status
1580 * bit. Therefore always force probe these counters.
1582 status
|= cpuc
->intel_cp_status
;
1584 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1585 struct perf_event
*event
= cpuc
->events
[bit
];
1589 if (!test_bit(bit
, cpuc
->active_mask
))
1592 if (!intel_pmu_save_and_restart(event
))
1595 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
1597 if (has_branch_stack(event
))
1598 data
.br_stack
= &cpuc
->lbr_stack
;
1600 if (perf_event_overflow(event
, &data
, regs
))
1601 x86_pmu_stop(event
, 0);
1605 * Repeat if there is more work to be done:
1607 status
= intel_pmu_get_status();
1612 intel_pmu_enable_all(0);
1614 * Only unmask the NMI after the overflow counters
1615 * have been reset. This avoids spurious NMIs on
1618 if (x86_pmu
.late_ack
)
1619 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1623 static struct event_constraint
*
1624 intel_bts_constraints(struct perf_event
*event
)
1626 struct hw_perf_event
*hwc
= &event
->hw
;
1627 unsigned int hw_event
, bts_event
;
1629 if (event
->attr
.freq
)
1632 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1633 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1635 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1636 return &bts_constraint
;
1641 static int intel_alt_er(int idx
)
1643 if (!(x86_pmu
.er_flags
& ERF_HAS_RSP_1
))
1646 if (idx
== EXTRA_REG_RSP_0
)
1647 return EXTRA_REG_RSP_1
;
1649 if (idx
== EXTRA_REG_RSP_1
)
1650 return EXTRA_REG_RSP_0
;
1655 static void intel_fixup_er(struct perf_event
*event
, int idx
)
1657 event
->hw
.extra_reg
.idx
= idx
;
1659 if (idx
== EXTRA_REG_RSP_0
) {
1660 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1661 event
->hw
.config
|= x86_pmu
.extra_regs
[EXTRA_REG_RSP_0
].event
;
1662 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_0
;
1663 } else if (idx
== EXTRA_REG_RSP_1
) {
1664 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1665 event
->hw
.config
|= x86_pmu
.extra_regs
[EXTRA_REG_RSP_1
].event
;
1666 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_1
;
1671 * manage allocation of shared extra msr for certain events
1674 * per-cpu: to be shared between the various events on a single PMU
1675 * per-core: per-cpu + shared by HT threads
1677 static struct event_constraint
*
1678 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
1679 struct perf_event
*event
,
1680 struct hw_perf_event_extra
*reg
)
1682 struct event_constraint
*c
= &emptyconstraint
;
1683 struct er_account
*era
;
1684 unsigned long flags
;
1688 * reg->alloc can be set due to existing state, so for fake cpuc we
1689 * need to ignore this, otherwise we might fail to allocate proper fake
1690 * state for this extra reg constraint. Also see the comment below.
1692 if (reg
->alloc
&& !cpuc
->is_fake
)
1693 return NULL
; /* call x86_get_event_constraint() */
1696 era
= &cpuc
->shared_regs
->regs
[idx
];
1698 * we use spin_lock_irqsave() to avoid lockdep issues when
1699 * passing a fake cpuc
1701 raw_spin_lock_irqsave(&era
->lock
, flags
);
1703 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
1706 * If its a fake cpuc -- as per validate_{group,event}() we
1707 * shouldn't touch event state and we can avoid doing so
1708 * since both will only call get_event_constraints() once
1709 * on each event, this avoids the need for reg->alloc.
1711 * Not doing the ER fixup will only result in era->reg being
1712 * wrong, but since we won't actually try and program hardware
1713 * this isn't a problem either.
1715 if (!cpuc
->is_fake
) {
1716 if (idx
!= reg
->idx
)
1717 intel_fixup_er(event
, idx
);
1720 * x86_schedule_events() can call get_event_constraints()
1721 * multiple times on events in the case of incremental
1722 * scheduling(). reg->alloc ensures we only do the ER
1728 /* lock in msr value */
1729 era
->config
= reg
->config
;
1730 era
->reg
= reg
->reg
;
1733 atomic_inc(&era
->ref
);
1736 * need to call x86_get_event_constraint()
1737 * to check if associated event has constraints
1741 idx
= intel_alt_er(idx
);
1742 if (idx
!= reg
->idx
) {
1743 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1747 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1753 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
1754 struct hw_perf_event_extra
*reg
)
1756 struct er_account
*era
;
1759 * Only put constraint if extra reg was actually allocated. Also takes
1760 * care of event which do not use an extra shared reg.
1762 * Also, if this is a fake cpuc we shouldn't touch any event state
1763 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1764 * either since it'll be thrown out.
1766 if (!reg
->alloc
|| cpuc
->is_fake
)
1769 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1771 /* one fewer user */
1772 atomic_dec(&era
->ref
);
1774 /* allocate again next time */
1778 static struct event_constraint
*
1779 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
1780 struct perf_event
*event
)
1782 struct event_constraint
*c
= NULL
, *d
;
1783 struct hw_perf_event_extra
*xreg
, *breg
;
1785 xreg
= &event
->hw
.extra_reg
;
1786 if (xreg
->idx
!= EXTRA_REG_NONE
) {
1787 c
= __intel_shared_reg_get_constraints(cpuc
, event
, xreg
);
1788 if (c
== &emptyconstraint
)
1791 breg
= &event
->hw
.branch_reg
;
1792 if (breg
->idx
!= EXTRA_REG_NONE
) {
1793 d
= __intel_shared_reg_get_constraints(cpuc
, event
, breg
);
1794 if (d
== &emptyconstraint
) {
1795 __intel_shared_reg_put_constraints(cpuc
, xreg
);
1802 struct event_constraint
*
1803 x86_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1805 struct event_constraint
*c
;
1807 if (x86_pmu
.event_constraints
) {
1808 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
1809 if ((event
->hw
.config
& c
->cmask
) == c
->code
) {
1810 event
->hw
.flags
|= c
->flags
;
1816 return &unconstrained
;
1819 static struct event_constraint
*
1820 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1822 struct event_constraint
*c
;
1824 c
= intel_bts_constraints(event
);
1828 c
= intel_pebs_constraints(event
);
1832 c
= intel_shared_regs_constraints(cpuc
, event
);
1836 return x86_get_event_constraints(cpuc
, event
);
1840 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
1841 struct perf_event
*event
)
1843 struct hw_perf_event_extra
*reg
;
1845 reg
= &event
->hw
.extra_reg
;
1846 if (reg
->idx
!= EXTRA_REG_NONE
)
1847 __intel_shared_reg_put_constraints(cpuc
, reg
);
1849 reg
= &event
->hw
.branch_reg
;
1850 if (reg
->idx
!= EXTRA_REG_NONE
)
1851 __intel_shared_reg_put_constraints(cpuc
, reg
);
1854 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1855 struct perf_event
*event
)
1857 intel_put_shared_regs_event_constraints(cpuc
, event
);
1860 static void intel_pebs_aliases_core2(struct perf_event
*event
)
1862 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1864 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1865 * (0x003c) so that we can use it with PEBS.
1867 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1868 * PEBS capable. However we can use INST_RETIRED.ANY_P
1869 * (0x00c0), which is a PEBS capable event, to get the same
1872 * INST_RETIRED.ANY_P counts the number of cycles that retires
1873 * CNTMASK instructions. By setting CNTMASK to a value (16)
1874 * larger than the maximum number of instructions that can be
1875 * retired per cycle (4) and then inverting the condition, we
1876 * count all cycles that retire 16 or less instructions, which
1879 * Thereby we gain a PEBS capable cycle counter.
1881 u64 alt_config
= X86_CONFIG(.event
=0xc0, .inv
=1, .cmask
=16);
1883 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1884 event
->hw
.config
= alt_config
;
1888 static void intel_pebs_aliases_snb(struct perf_event
*event
)
1890 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1892 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1893 * (0x003c) so that we can use it with PEBS.
1895 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1896 * PEBS capable. However we can use UOPS_RETIRED.ALL
1897 * (0x01c2), which is a PEBS capable event, to get the same
1900 * UOPS_RETIRED.ALL counts the number of cycles that retires
1901 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1902 * larger than the maximum number of micro-ops that can be
1903 * retired per cycle (4) and then inverting the condition, we
1904 * count all cycles that retire 16 or less micro-ops, which
1907 * Thereby we gain a PEBS capable cycle counter.
1909 u64 alt_config
= X86_CONFIG(.event
=0xc2, .umask
=0x01, .inv
=1, .cmask
=16);
1911 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1912 event
->hw
.config
= alt_config
;
1916 static int intel_pmu_hw_config(struct perf_event
*event
)
1918 int ret
= x86_pmu_hw_config(event
);
1923 if (event
->attr
.precise_ip
&& x86_pmu
.pebs_aliases
)
1924 x86_pmu
.pebs_aliases(event
);
1926 if (needs_branch_stack(event
)) {
1927 ret
= intel_pmu_setup_lbr_filter(event
);
1932 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1935 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1938 if (x86_pmu
.version
< 3)
1941 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1944 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1949 struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
1951 if (x86_pmu
.guest_get_msrs
)
1952 return x86_pmu
.guest_get_msrs(nr
);
1956 EXPORT_SYMBOL_GPL(perf_guest_get_msrs
);
1958 static struct perf_guest_switch_msr
*intel_guest_get_msrs(int *nr
)
1960 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1961 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1963 arr
[0].msr
= MSR_CORE_PERF_GLOBAL_CTRL
;
1964 arr
[0].host
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
;
1965 arr
[0].guest
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_host_mask
;
1967 * If PMU counter has PEBS enabled it is not enough to disable counter
1968 * on a guest entry since PEBS memory write can overshoot guest entry
1969 * and corrupt guest memory. Disabling PEBS solves the problem.
1971 arr
[1].msr
= MSR_IA32_PEBS_ENABLE
;
1972 arr
[1].host
= cpuc
->pebs_enabled
;
1979 static struct perf_guest_switch_msr
*core_guest_get_msrs(int *nr
)
1981 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1982 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1985 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1986 struct perf_event
*event
= cpuc
->events
[idx
];
1988 arr
[idx
].msr
= x86_pmu_config_addr(idx
);
1989 arr
[idx
].host
= arr
[idx
].guest
= 0;
1991 if (!test_bit(idx
, cpuc
->active_mask
))
1994 arr
[idx
].host
= arr
[idx
].guest
=
1995 event
->hw
.config
| ARCH_PERFMON_EVENTSEL_ENABLE
;
1997 if (event
->attr
.exclude_host
)
1998 arr
[idx
].host
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1999 else if (event
->attr
.exclude_guest
)
2000 arr
[idx
].guest
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
2003 *nr
= x86_pmu
.num_counters
;
2007 static void core_pmu_enable_event(struct perf_event
*event
)
2009 if (!event
->attr
.exclude_host
)
2010 x86_pmu_enable_event(event
);
2013 static void core_pmu_enable_all(int added
)
2015 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
2018 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
2019 struct hw_perf_event
*hwc
= &cpuc
->events
[idx
]->hw
;
2021 if (!test_bit(idx
, cpuc
->active_mask
) ||
2022 cpuc
->events
[idx
]->attr
.exclude_host
)
2025 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
2029 static int hsw_hw_config(struct perf_event
*event
)
2031 int ret
= intel_pmu_hw_config(event
);
2035 if (!boot_cpu_has(X86_FEATURE_RTM
) && !boot_cpu_has(X86_FEATURE_HLE
))
2037 event
->hw
.config
|= event
->attr
.config
& (HSW_IN_TX
|HSW_IN_TX_CHECKPOINTED
);
2040 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
2041 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
2044 if ((event
->hw
.config
& (HSW_IN_TX
|HSW_IN_TX_CHECKPOINTED
)) &&
2045 ((event
->hw
.config
& ARCH_PERFMON_EVENTSEL_ANY
) ||
2046 event
->attr
.precise_ip
> 0))
2049 if (event_is_checkpointed(event
)) {
2051 * Sampling of checkpointed events can cause situations where
2052 * the CPU constantly aborts because of a overflow, which is
2053 * then checkpointed back and ignored. Forbid checkpointing
2056 * But still allow a long sampling period, so that perf stat
2059 if (event
->attr
.sample_period
> 0 &&
2060 event
->attr
.sample_period
< 0x7fffffff)
2066 static struct event_constraint counter2_constraint
=
2067 EVENT_CONSTRAINT(0, 0x4, 0);
2069 static struct event_constraint
*
2070 hsw_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
2072 struct event_constraint
*c
= intel_get_event_constraints(cpuc
, event
);
2074 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
2075 if (event
->hw
.config
& HSW_IN_TX_CHECKPOINTED
) {
2076 if (c
->idxmsk64
& (1U << 2))
2077 return &counter2_constraint
;
2078 return &emptyconstraint
;
2084 PMU_FORMAT_ATTR(event
, "config:0-7" );
2085 PMU_FORMAT_ATTR(umask
, "config:8-15" );
2086 PMU_FORMAT_ATTR(edge
, "config:18" );
2087 PMU_FORMAT_ATTR(pc
, "config:19" );
2088 PMU_FORMAT_ATTR(any
, "config:21" ); /* v3 + */
2089 PMU_FORMAT_ATTR(inv
, "config:23" );
2090 PMU_FORMAT_ATTR(cmask
, "config:24-31" );
2091 PMU_FORMAT_ATTR(in_tx
, "config:32");
2092 PMU_FORMAT_ATTR(in_tx_cp
, "config:33");
2094 static struct attribute
*intel_arch_formats_attr
[] = {
2095 &format_attr_event
.attr
,
2096 &format_attr_umask
.attr
,
2097 &format_attr_edge
.attr
,
2098 &format_attr_pc
.attr
,
2099 &format_attr_inv
.attr
,
2100 &format_attr_cmask
.attr
,
2104 ssize_t
intel_event_sysfs_show(char *page
, u64 config
)
2106 u64 event
= (config
& ARCH_PERFMON_EVENTSEL_EVENT
);
2108 return x86_event_sysfs_show(page
, config
, event
);
2111 static __initconst
const struct x86_pmu core_pmu
= {
2113 .handle_irq
= x86_pmu_handle_irq
,
2114 .disable_all
= x86_pmu_disable_all
,
2115 .enable_all
= core_pmu_enable_all
,
2116 .enable
= core_pmu_enable_event
,
2117 .disable
= x86_pmu_disable_event
,
2118 .hw_config
= x86_pmu_hw_config
,
2119 .schedule_events
= x86_schedule_events
,
2120 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
2121 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
2122 .event_map
= intel_pmu_event_map
,
2123 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
2126 * Intel PMCs cannot be accessed sanely above 32 bit width,
2127 * so we install an artificial 1<<31 period regardless of
2128 * the generic event period:
2130 .max_period
= (1ULL << 31) - 1,
2131 .get_event_constraints
= intel_get_event_constraints
,
2132 .put_event_constraints
= intel_put_event_constraints
,
2133 .event_constraints
= intel_core_event_constraints
,
2134 .guest_get_msrs
= core_guest_get_msrs
,
2135 .format_attrs
= intel_arch_formats_attr
,
2136 .events_sysfs_show
= intel_event_sysfs_show
,
2139 struct intel_shared_regs
*allocate_shared_regs(int cpu
)
2141 struct intel_shared_regs
*regs
;
2144 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
2145 GFP_KERNEL
, cpu_to_node(cpu
));
2148 * initialize the locks to keep lockdep happy
2150 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
2151 raw_spin_lock_init(®s
->regs
[i
].lock
);
2158 static int intel_pmu_cpu_prepare(int cpu
)
2160 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
2162 if (!(x86_pmu
.extra_regs
|| x86_pmu
.lbr_sel_map
))
2165 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
2166 if (!cpuc
->shared_regs
)
2172 static void intel_pmu_cpu_starting(int cpu
)
2174 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
2175 int core_id
= topology_core_id(cpu
);
2178 init_debug_store_on_cpu(cpu
);
2180 * Deal with CPUs that don't clear their LBRs on power-up.
2182 intel_pmu_lbr_reset();
2184 cpuc
->lbr_sel
= NULL
;
2186 if (!cpuc
->shared_regs
)
2189 if (!(x86_pmu
.er_flags
& ERF_NO_HT_SHARING
)) {
2190 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
2191 struct intel_shared_regs
*pc
;
2193 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
2194 if (pc
&& pc
->core_id
== core_id
) {
2195 cpuc
->kfree_on_online
= cpuc
->shared_regs
;
2196 cpuc
->shared_regs
= pc
;
2200 cpuc
->shared_regs
->core_id
= core_id
;
2201 cpuc
->shared_regs
->refcnt
++;
2204 if (x86_pmu
.lbr_sel_map
)
2205 cpuc
->lbr_sel
= &cpuc
->shared_regs
->regs
[EXTRA_REG_LBR
];
2208 static void intel_pmu_cpu_dying(int cpu
)
2210 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
2211 struct intel_shared_regs
*pc
;
2213 pc
= cpuc
->shared_regs
;
2215 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
2217 cpuc
->shared_regs
= NULL
;
2220 fini_debug_store_on_cpu(cpu
);
2223 PMU_FORMAT_ATTR(offcore_rsp
, "config1:0-63");
2225 PMU_FORMAT_ATTR(ldlat
, "config1:0-15");
2227 static struct attribute
*intel_arch3_formats_attr
[] = {
2228 &format_attr_event
.attr
,
2229 &format_attr_umask
.attr
,
2230 &format_attr_edge
.attr
,
2231 &format_attr_pc
.attr
,
2232 &format_attr_any
.attr
,
2233 &format_attr_inv
.attr
,
2234 &format_attr_cmask
.attr
,
2235 &format_attr_in_tx
.attr
,
2236 &format_attr_in_tx_cp
.attr
,
2238 &format_attr_offcore_rsp
.attr
, /* XXX do NHM/WSM + SNB breakout */
2239 &format_attr_ldlat
.attr
, /* PEBS load latency */
2243 static __initconst
const struct x86_pmu intel_pmu
= {
2245 .handle_irq
= intel_pmu_handle_irq
,
2246 .disable_all
= intel_pmu_disable_all
,
2247 .enable_all
= intel_pmu_enable_all
,
2248 .enable
= intel_pmu_enable_event
,
2249 .disable
= intel_pmu_disable_event
,
2250 .hw_config
= intel_pmu_hw_config
,
2251 .schedule_events
= x86_schedule_events
,
2252 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
2253 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
2254 .event_map
= intel_pmu_event_map
,
2255 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
2258 * Intel PMCs cannot be accessed sanely above 32 bit width,
2259 * so we install an artificial 1<<31 period regardless of
2260 * the generic event period:
2262 .max_period
= (1ULL << 31) - 1,
2263 .get_event_constraints
= intel_get_event_constraints
,
2264 .put_event_constraints
= intel_put_event_constraints
,
2265 .pebs_aliases
= intel_pebs_aliases_core2
,
2267 .format_attrs
= intel_arch3_formats_attr
,
2268 .events_sysfs_show
= intel_event_sysfs_show
,
2270 .cpu_prepare
= intel_pmu_cpu_prepare
,
2271 .cpu_starting
= intel_pmu_cpu_starting
,
2272 .cpu_dying
= intel_pmu_cpu_dying
,
2273 .guest_get_msrs
= intel_guest_get_msrs
,
2274 .sched_task
= intel_pmu_lbr_sched_task
,
2277 static __init
void intel_clovertown_quirk(void)
2280 * PEBS is unreliable due to:
2282 * AJ67 - PEBS may experience CPL leaks
2283 * AJ68 - PEBS PMI may be delayed by one event
2284 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
2285 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
2287 * AJ67 could be worked around by restricting the OS/USR flags.
2288 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
2290 * AJ106 could possibly be worked around by not allowing LBR
2291 * usage from PEBS, including the fixup.
2292 * AJ68 could possibly be worked around by always programming
2293 * a pebs_event_reset[0] value and coping with the lost events.
2295 * But taken together it might just make sense to not enable PEBS on
2298 pr_warn("PEBS disabled due to CPU errata\n");
2300 x86_pmu
.pebs_constraints
= NULL
;
2303 static int intel_snb_pebs_broken(int cpu
)
2305 u32 rev
= UINT_MAX
; /* default to broken for unknown models */
2307 switch (cpu_data(cpu
).x86_model
) {
2312 case 45: /* SNB-EP */
2313 switch (cpu_data(cpu
).x86_mask
) {
2314 case 6: rev
= 0x618; break;
2315 case 7: rev
= 0x70c; break;
2319 return (cpu_data(cpu
).microcode
< rev
);
2322 static void intel_snb_check_microcode(void)
2324 int pebs_broken
= 0;
2328 for_each_online_cpu(cpu
) {
2329 if ((pebs_broken
= intel_snb_pebs_broken(cpu
)))
2334 if (pebs_broken
== x86_pmu
.pebs_broken
)
2338 * Serialized by the microcode lock..
2340 if (x86_pmu
.pebs_broken
) {
2341 pr_info("PEBS enabled due to microcode update\n");
2342 x86_pmu
.pebs_broken
= 0;
2344 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
2345 x86_pmu
.pebs_broken
= 1;
2350 * Under certain circumstances, access certain MSR may cause #GP.
2351 * The function tests if the input MSR can be safely accessed.
2353 static bool check_msr(unsigned long msr
, u64 mask
)
2355 u64 val_old
, val_new
, val_tmp
;
2358 * Read the current value, change it and read it back to see if it
2359 * matches, this is needed to detect certain hardware emulators
2360 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
2362 if (rdmsrl_safe(msr
, &val_old
))
2366 * Only change the bits which can be updated by wrmsrl.
2368 val_tmp
= val_old
^ mask
;
2369 if (wrmsrl_safe(msr
, val_tmp
) ||
2370 rdmsrl_safe(msr
, &val_new
))
2373 if (val_new
!= val_tmp
)
2376 /* Here it's sure that the MSR can be safely accessed.
2377 * Restore the old value and return.
2379 wrmsrl(msr
, val_old
);
2384 static __init
void intel_sandybridge_quirk(void)
2386 x86_pmu
.check_microcode
= intel_snb_check_microcode
;
2387 intel_snb_check_microcode();
2390 static const struct { int id
; char *name
; } intel_arch_events_map
[] __initconst
= {
2391 { PERF_COUNT_HW_CPU_CYCLES
, "cpu cycles" },
2392 { PERF_COUNT_HW_INSTRUCTIONS
, "instructions" },
2393 { PERF_COUNT_HW_BUS_CYCLES
, "bus cycles" },
2394 { PERF_COUNT_HW_CACHE_REFERENCES
, "cache references" },
2395 { PERF_COUNT_HW_CACHE_MISSES
, "cache misses" },
2396 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS
, "branch instructions" },
2397 { PERF_COUNT_HW_BRANCH_MISSES
, "branch misses" },
2400 static __init
void intel_arch_events_quirk(void)
2404 /* disable event that reported as not presend by cpuid */
2405 for_each_set_bit(bit
, x86_pmu
.events_mask
, ARRAY_SIZE(intel_arch_events_map
)) {
2406 intel_perfmon_event_map
[intel_arch_events_map
[bit
].id
] = 0;
2407 pr_warn("CPUID marked event: \'%s\' unavailable\n",
2408 intel_arch_events_map
[bit
].name
);
2412 static __init
void intel_nehalem_quirk(void)
2414 union cpuid10_ebx ebx
;
2416 ebx
.full
= x86_pmu
.events_maskl
;
2417 if (ebx
.split
.no_branch_misses_retired
) {
2419 * Erratum AAJ80 detected, we work it around by using
2420 * the BR_MISP_EXEC.ANY event. This will over-count
2421 * branch-misses, but it's still much better than the
2422 * architectural event which is often completely bogus:
2424 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
2425 ebx
.split
.no_branch_misses_retired
= 0;
2426 x86_pmu
.events_maskl
= ebx
.full
;
2427 pr_info("CPU erratum AAJ80 worked around\n");
2431 EVENT_ATTR_STR(mem
-loads
, mem_ld_hsw
, "event=0xcd,umask=0x1,ldlat=3");
2432 EVENT_ATTR_STR(mem
-stores
, mem_st_hsw
, "event=0xd0,umask=0x82")
2434 /* Haswell special events */
2435 EVENT_ATTR_STR(tx
-start
, tx_start
, "event=0xc9,umask=0x1");
2436 EVENT_ATTR_STR(tx
-commit
, tx_commit
, "event=0xc9,umask=0x2");
2437 EVENT_ATTR_STR(tx
-abort
, tx_abort
, "event=0xc9,umask=0x4");
2438 EVENT_ATTR_STR(tx
-capacity
, tx_capacity
, "event=0x54,umask=0x2");
2439 EVENT_ATTR_STR(tx
-conflict
, tx_conflict
, "event=0x54,umask=0x1");
2440 EVENT_ATTR_STR(el
-start
, el_start
, "event=0xc8,umask=0x1");
2441 EVENT_ATTR_STR(el
-commit
, el_commit
, "event=0xc8,umask=0x2");
2442 EVENT_ATTR_STR(el
-abort
, el_abort
, "event=0xc8,umask=0x4");
2443 EVENT_ATTR_STR(el
-capacity
, el_capacity
, "event=0x54,umask=0x2");
2444 EVENT_ATTR_STR(el
-conflict
, el_conflict
, "event=0x54,umask=0x1");
2445 EVENT_ATTR_STR(cycles
-t
, cycles_t
, "event=0x3c,in_tx=1");
2446 EVENT_ATTR_STR(cycles
-ct
, cycles_ct
, "event=0x3c,in_tx=1,in_tx_cp=1");
2448 static struct attribute
*hsw_events_attrs
[] = {
2449 EVENT_PTR(tx_start
),
2450 EVENT_PTR(tx_commit
),
2451 EVENT_PTR(tx_abort
),
2452 EVENT_PTR(tx_capacity
),
2453 EVENT_PTR(tx_conflict
),
2454 EVENT_PTR(el_start
),
2455 EVENT_PTR(el_commit
),
2456 EVENT_PTR(el_abort
),
2457 EVENT_PTR(el_capacity
),
2458 EVENT_PTR(el_conflict
),
2459 EVENT_PTR(cycles_t
),
2460 EVENT_PTR(cycles_ct
),
2461 EVENT_PTR(mem_ld_hsw
),
2462 EVENT_PTR(mem_st_hsw
),
2466 __init
int intel_pmu_init(void)
2468 union cpuid10_edx edx
;
2469 union cpuid10_eax eax
;
2470 union cpuid10_ebx ebx
;
2471 struct event_constraint
*c
;
2472 unsigned int unused
;
2473 struct extra_reg
*er
;
2476 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
2477 switch (boot_cpu_data
.x86
) {
2479 return p6_pmu_init();
2481 return knc_pmu_init();
2483 return p4_pmu_init();
2489 * Check whether the Architectural PerfMon supports
2490 * Branch Misses Retired hw_event or not.
2492 cpuid(10, &eax
.full
, &ebx
.full
, &unused
, &edx
.full
);
2493 if (eax
.split
.mask_length
< ARCH_PERFMON_EVENTS_COUNT
)
2496 version
= eax
.split
.version_id
;
2500 x86_pmu
= intel_pmu
;
2502 x86_pmu
.version
= version
;
2503 x86_pmu
.num_counters
= eax
.split
.num_counters
;
2504 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
2505 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
2507 x86_pmu
.events_maskl
= ebx
.full
;
2508 x86_pmu
.events_mask_len
= eax
.split
.mask_length
;
2510 x86_pmu
.max_pebs_events
= min_t(unsigned, MAX_PEBS_EVENTS
, x86_pmu
.num_counters
);
2513 * Quirk: v2 perfmon does not report fixed-purpose events, so
2514 * assume at least 3 events:
2517 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
2519 if (boot_cpu_has(X86_FEATURE_PDCM
)) {
2522 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
2523 x86_pmu
.intel_cap
.capabilities
= capabilities
;
2528 x86_add_quirk(intel_arch_events_quirk
); /* Install first, so it runs last */
2531 * Install the hw-cache-events table:
2533 switch (boot_cpu_data
.x86_model
) {
2534 case 14: /* 65nm Core "Yonah" */
2535 pr_cont("Core events, ");
2538 case 15: /* 65nm Core2 "Merom" */
2539 x86_add_quirk(intel_clovertown_quirk
);
2540 case 22: /* 65nm Core2 "Merom-L" */
2541 case 23: /* 45nm Core2 "Penryn" */
2542 case 29: /* 45nm Core2 "Dunnington (MP) */
2543 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
2544 sizeof(hw_cache_event_ids
));
2546 intel_pmu_lbr_init_core();
2548 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
2549 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
2550 pr_cont("Core2 events, ");
2553 case 30: /* 45nm Nehalem */
2554 case 26: /* 45nm Nehalem-EP */
2555 case 46: /* 45nm Nehalem-EX */
2556 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
2557 sizeof(hw_cache_event_ids
));
2558 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2559 sizeof(hw_cache_extra_regs
));
2561 intel_pmu_lbr_init_nhm();
2563 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
2564 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
2565 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2566 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
2568 x86_pmu
.cpu_events
= nhm_events_attrs
;
2570 /* UOPS_ISSUED.STALLED_CYCLES */
2571 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2572 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2573 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2574 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2575 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2577 x86_add_quirk(intel_nehalem_quirk
);
2579 pr_cont("Nehalem events, ");
2582 case 28: /* 45nm Atom "Pineview" */
2583 case 38: /* 45nm Atom "Lincroft" */
2584 case 39: /* 32nm Atom "Penwell" */
2585 case 53: /* 32nm Atom "Cloverview" */
2586 case 54: /* 32nm Atom "Cedarview" */
2587 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
2588 sizeof(hw_cache_event_ids
));
2590 intel_pmu_lbr_init_atom();
2592 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2593 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
2594 pr_cont("Atom events, ");
2597 case 55: /* 22nm Atom "Silvermont" */
2598 case 76: /* 14nm Atom "Airmont" */
2599 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
2600 memcpy(hw_cache_event_ids
, slm_hw_cache_event_ids
,
2601 sizeof(hw_cache_event_ids
));
2602 memcpy(hw_cache_extra_regs
, slm_hw_cache_extra_regs
,
2603 sizeof(hw_cache_extra_regs
));
2605 intel_pmu_lbr_init_atom();
2607 x86_pmu
.event_constraints
= intel_slm_event_constraints
;
2608 x86_pmu
.pebs_constraints
= intel_slm_pebs_event_constraints
;
2609 x86_pmu
.extra_regs
= intel_slm_extra_regs
;
2610 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2611 pr_cont("Silvermont events, ");
2614 case 37: /* 32nm Westmere */
2615 case 44: /* 32nm Westmere-EP */
2616 case 47: /* 32nm Westmere-EX */
2617 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
2618 sizeof(hw_cache_event_ids
));
2619 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2620 sizeof(hw_cache_extra_regs
));
2622 intel_pmu_lbr_init_nhm();
2624 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
2625 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2626 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
2627 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
2628 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2630 x86_pmu
.cpu_events
= nhm_events_attrs
;
2632 /* UOPS_ISSUED.STALLED_CYCLES */
2633 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2634 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2635 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2636 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2637 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2639 pr_cont("Westmere events, ");
2642 case 42: /* 32nm SandyBridge */
2643 case 45: /* 32nm SandyBridge-E/EN/EP */
2644 x86_add_quirk(intel_sandybridge_quirk
);
2645 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2646 sizeof(hw_cache_event_ids
));
2647 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2648 sizeof(hw_cache_extra_regs
));
2650 intel_pmu_lbr_init_snb();
2652 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
2653 x86_pmu
.pebs_constraints
= intel_snb_pebs_event_constraints
;
2654 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2655 if (boot_cpu_data
.x86_model
== 45)
2656 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2658 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2659 /* all extra regs are per-cpu when HT is on */
2660 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2661 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2663 x86_pmu
.cpu_events
= snb_events_attrs
;
2665 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2666 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2667 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2668 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
2669 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2670 X86_CONFIG(.event
=0xb1, .umask
=0x01, .inv
=1, .cmask
=1);
2672 pr_cont("SandyBridge events, ");
2675 case 58: /* 22nm IvyBridge */
2676 case 62: /* 22nm IvyBridge-EP/EX */
2677 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2678 sizeof(hw_cache_event_ids
));
2679 /* dTLB-load-misses on IVB is different than SNB */
2680 hw_cache_event_ids
[C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
2682 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2683 sizeof(hw_cache_extra_regs
));
2685 intel_pmu_lbr_init_snb();
2687 x86_pmu
.event_constraints
= intel_ivb_event_constraints
;
2688 x86_pmu
.pebs_constraints
= intel_ivb_pebs_event_constraints
;
2689 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2690 if (boot_cpu_data
.x86_model
== 62)
2691 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2693 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2694 /* all extra regs are per-cpu when HT is on */
2695 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2696 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2698 x86_pmu
.cpu_events
= snb_events_attrs
;
2700 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2701 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2702 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2704 pr_cont("IvyBridge events, ");
2708 case 60: /* 22nm Haswell Core */
2709 case 63: /* 22nm Haswell Server */
2710 case 69: /* 22nm Haswell ULT */
2711 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
2712 x86_pmu
.late_ack
= true;
2713 memcpy(hw_cache_event_ids
, hsw_hw_cache_event_ids
, sizeof(hw_cache_event_ids
));
2714 memcpy(hw_cache_extra_regs
, hsw_hw_cache_extra_regs
, sizeof(hw_cache_extra_regs
));
2716 intel_pmu_lbr_init_hsw();
2718 x86_pmu
.event_constraints
= intel_hsw_event_constraints
;
2719 x86_pmu
.pebs_constraints
= intel_hsw_pebs_event_constraints
;
2720 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2721 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2722 /* all extra regs are per-cpu when HT is on */
2723 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2724 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2726 x86_pmu
.hw_config
= hsw_hw_config
;
2727 x86_pmu
.get_event_constraints
= hsw_get_event_constraints
;
2728 x86_pmu
.cpu_events
= hsw_events_attrs
;
2729 x86_pmu
.lbr_double_abort
= true;
2730 pr_cont("Haswell events, ");
2734 switch (x86_pmu
.version
) {
2736 x86_pmu
.event_constraints
= intel_v1_event_constraints
;
2737 pr_cont("generic architected perfmon v1, ");
2741 * default constraints for v2 and up
2743 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2744 pr_cont("generic architected perfmon, ");
2749 if (x86_pmu
.num_counters
> INTEL_PMC_MAX_GENERIC
) {
2750 WARN(1, KERN_ERR
"hw perf events %d > max(%d), clipping!",
2751 x86_pmu
.num_counters
, INTEL_PMC_MAX_GENERIC
);
2752 x86_pmu
.num_counters
= INTEL_PMC_MAX_GENERIC
;
2754 x86_pmu
.intel_ctrl
= (1 << x86_pmu
.num_counters
) - 1;
2756 if (x86_pmu
.num_counters_fixed
> INTEL_PMC_MAX_FIXED
) {
2757 WARN(1, KERN_ERR
"hw perf events fixed %d > max(%d), clipping!",
2758 x86_pmu
.num_counters_fixed
, INTEL_PMC_MAX_FIXED
);
2759 x86_pmu
.num_counters_fixed
= INTEL_PMC_MAX_FIXED
;
2762 x86_pmu
.intel_ctrl
|=
2763 ((1LL << x86_pmu
.num_counters_fixed
)-1) << INTEL_PMC_IDX_FIXED
;
2765 if (x86_pmu
.event_constraints
) {
2767 * event on fixed counter2 (REF_CYCLES) only works on this
2768 * counter, so do not extend mask to generic counters
2770 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
2771 if (c
->cmask
!= FIXED_EVENT_FLAGS
2772 || c
->idxmsk64
== INTEL_PMC_MSK_FIXED_REF_CYCLES
) {
2776 c
->idxmsk64
|= (1ULL << x86_pmu
.num_counters
) - 1;
2777 c
->weight
+= x86_pmu
.num_counters
;
2782 * Access LBR MSR may cause #GP under certain circumstances.
2783 * E.g. KVM doesn't support LBR MSR
2784 * Check all LBT MSR here.
2785 * Disable LBR access if any LBR MSRs can not be accessed.
2787 if (x86_pmu
.lbr_nr
&& !check_msr(x86_pmu
.lbr_tos
, 0x3UL
))
2789 for (i
= 0; i
< x86_pmu
.lbr_nr
; i
++) {
2790 if (!(check_msr(x86_pmu
.lbr_from
+ i
, 0xffffUL
) &&
2791 check_msr(x86_pmu
.lbr_to
+ i
, 0xffffUL
)))
2796 * Access extra MSR may cause #GP under certain circumstances.
2797 * E.g. KVM doesn't support offcore event
2798 * Check all extra_regs here.
2800 if (x86_pmu
.extra_regs
) {
2801 for (er
= x86_pmu
.extra_regs
; er
->msr
; er
++) {
2802 er
->extra_msr_access
= check_msr(er
->msr
, 0x1ffUL
);
2803 /* Disable LBR select mapping */
2804 if ((er
->idx
== EXTRA_REG_LBR
) && !er
->extra_msr_access
)
2805 x86_pmu
.lbr_sel_map
= NULL
;
2809 /* Support full width counters using alternative MSR range */
2810 if (x86_pmu
.intel_cap
.full_width_write
) {
2811 x86_pmu
.max_period
= x86_pmu
.cntval_mask
;
2812 x86_pmu
.perfctr
= MSR_IA32_PMC0
;
2813 pr_cont("full-width counters, ");