4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/watchdog.h>
17 #include <asm/cpufeature.h>
18 #include <asm/hardirq.h>
21 #include "perf_event.h"
24 * Intel PerfMon, used on Core and later.
26 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
28 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
29 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
30 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
31 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
32 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
33 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
34 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
35 [PERF_COUNT_HW_REF_CPU_CYCLES
] = 0x0300, /* pseudo-encoding */
38 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
40 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
41 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
42 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
43 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
44 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
45 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
49 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
51 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
52 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
53 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
54 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
55 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
56 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
57 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
58 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
59 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
60 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
61 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
62 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
63 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
67 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
69 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
70 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
71 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
72 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
73 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
74 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
75 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
76 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
77 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
78 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
79 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
83 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
85 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
86 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
87 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
91 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
93 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
94 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
95 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
96 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
97 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
98 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
99 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
103 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
105 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
106 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
107 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
108 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
109 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
110 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
111 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
112 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
113 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
114 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
115 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
116 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
118 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
119 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
120 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
121 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
126 static struct event_constraint intel_ivb_event_constraints
[] __read_mostly
=
128 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
129 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
130 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
131 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
132 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
133 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
134 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
135 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
136 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
137 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
138 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
139 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
140 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
142 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
143 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
144 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
145 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
150 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
152 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
153 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
154 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
155 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
159 static struct event_constraint intel_v1_event_constraints
[] __read_mostly
=
164 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
166 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
167 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
168 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
172 static struct event_constraint intel_slm_event_constraints
[] __read_mostly
=
174 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
175 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
176 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
180 struct event_constraint intel_skl_event_constraints
[] = {
181 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
182 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
183 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
184 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
188 static struct extra_reg intel_snb_extra_regs
[] __read_mostly
= {
189 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
190 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x3f807f8fffull
, RSP_0
),
191 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0x3f807f8fffull
, RSP_1
),
192 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
196 static struct extra_reg intel_snbep_extra_regs
[] __read_mostly
= {
197 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
198 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x3fffff8fffull
, RSP_0
),
199 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0x3fffff8fffull
, RSP_1
),
200 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
204 static struct extra_reg intel_skl_extra_regs
[] __read_mostly
= {
205 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x3fffff8fffull
, RSP_0
),
206 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0x3fffff8fffull
, RSP_1
),
207 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
211 EVENT_ATTR_STR(mem
-loads
, mem_ld_nhm
, "event=0x0b,umask=0x10,ldlat=3");
212 EVENT_ATTR_STR(mem
-loads
, mem_ld_snb
, "event=0xcd,umask=0x1,ldlat=3");
213 EVENT_ATTR_STR(mem
-stores
, mem_st_snb
, "event=0xcd,umask=0x2");
215 struct attribute
*nhm_events_attrs
[] = {
216 EVENT_PTR(mem_ld_nhm
),
220 struct attribute
*snb_events_attrs
[] = {
221 EVENT_PTR(mem_ld_snb
),
222 EVENT_PTR(mem_st_snb
),
226 static struct event_constraint intel_hsw_event_constraints
[] = {
227 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
228 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
229 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
230 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
231 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
232 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
233 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
234 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
235 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
236 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
237 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
238 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
240 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
241 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
242 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
243 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
248 struct event_constraint intel_bdw_event_constraints
[] = {
249 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
250 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
251 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
252 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
253 INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */
257 static u64
intel_pmu_event_map(int hw_event
)
259 return intel_perfmon_event_map
[hw_event
];
263 * Notes on the events:
264 * - data reads do not include code reads (comparable to earlier tables)
265 * - data counts include speculative execution (except L1 write, dtlb, bpu)
266 * - remote node access includes remote memory, remote cache, remote mmio.
267 * - prefetches are not included in the counts.
268 * - icache miss does not include decoded icache
271 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
272 #define SKL_DEMAND_RFO BIT_ULL(1)
273 #define SKL_ANY_RESPONSE BIT_ULL(16)
274 #define SKL_SUPPLIER_NONE BIT_ULL(17)
275 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
276 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
277 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
278 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
279 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
280 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
281 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
282 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
283 #define SKL_SPL_HIT BIT_ULL(30)
284 #define SKL_SNOOP_NONE BIT_ULL(31)
285 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
286 #define SKL_SNOOP_MISS BIT_ULL(33)
287 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
288 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
289 #define SKL_SNOOP_HITM BIT_ULL(36)
290 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
291 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
292 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
293 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
294 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
295 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
296 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
297 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
298 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
299 SKL_SNOOP_HITM|SKL_SPL_HIT)
300 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
301 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
302 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
303 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
304 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
306 static __initconst
const u64 skl_hw_cache_event_ids
307 [PERF_COUNT_HW_CACHE_MAX
]
308 [PERF_COUNT_HW_CACHE_OP_MAX
]
309 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
313 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
314 [ C(RESULT_MISS
) ] = 0x151, /* L1D.REPLACEMENT */
317 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
318 [ C(RESULT_MISS
) ] = 0x0,
320 [ C(OP_PREFETCH
) ] = {
321 [ C(RESULT_ACCESS
) ] = 0x0,
322 [ C(RESULT_MISS
) ] = 0x0,
327 [ C(RESULT_ACCESS
) ] = 0x0,
328 [ C(RESULT_MISS
) ] = 0x283, /* ICACHE_64B.MISS */
331 [ C(RESULT_ACCESS
) ] = -1,
332 [ C(RESULT_MISS
) ] = -1,
334 [ C(OP_PREFETCH
) ] = {
335 [ C(RESULT_ACCESS
) ] = 0x0,
336 [ C(RESULT_MISS
) ] = 0x0,
341 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
342 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
345 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
346 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
348 [ C(OP_PREFETCH
) ] = {
349 [ C(RESULT_ACCESS
) ] = 0x0,
350 [ C(RESULT_MISS
) ] = 0x0,
355 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
356 [ C(RESULT_MISS
) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
359 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
360 [ C(RESULT_MISS
) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */
362 [ C(OP_PREFETCH
) ] = {
363 [ C(RESULT_ACCESS
) ] = 0x0,
364 [ C(RESULT_MISS
) ] = 0x0,
369 [ C(RESULT_ACCESS
) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
370 [ C(RESULT_MISS
) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
373 [ C(RESULT_ACCESS
) ] = -1,
374 [ C(RESULT_MISS
) ] = -1,
376 [ C(OP_PREFETCH
) ] = {
377 [ C(RESULT_ACCESS
) ] = -1,
378 [ C(RESULT_MISS
) ] = -1,
383 [ C(RESULT_ACCESS
) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
384 [ C(RESULT_MISS
) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
387 [ C(RESULT_ACCESS
) ] = -1,
388 [ C(RESULT_MISS
) ] = -1,
390 [ C(OP_PREFETCH
) ] = {
391 [ C(RESULT_ACCESS
) ] = -1,
392 [ C(RESULT_MISS
) ] = -1,
397 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
398 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
401 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
402 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
404 [ C(OP_PREFETCH
) ] = {
405 [ C(RESULT_ACCESS
) ] = 0x0,
406 [ C(RESULT_MISS
) ] = 0x0,
411 static __initconst
const u64 skl_hw_cache_extra_regs
412 [PERF_COUNT_HW_CACHE_MAX
]
413 [PERF_COUNT_HW_CACHE_OP_MAX
]
414 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
418 [ C(RESULT_ACCESS
) ] = SKL_DEMAND_READ
|
419 SKL_LLC_ACCESS
|SKL_ANY_SNOOP
,
420 [ C(RESULT_MISS
) ] = SKL_DEMAND_READ
|
421 SKL_L3_MISS
|SKL_ANY_SNOOP
|
425 [ C(RESULT_ACCESS
) ] = SKL_DEMAND_WRITE
|
426 SKL_LLC_ACCESS
|SKL_ANY_SNOOP
,
427 [ C(RESULT_MISS
) ] = SKL_DEMAND_WRITE
|
428 SKL_L3_MISS
|SKL_ANY_SNOOP
|
431 [ C(OP_PREFETCH
) ] = {
432 [ C(RESULT_ACCESS
) ] = 0x0,
433 [ C(RESULT_MISS
) ] = 0x0,
438 [ C(RESULT_ACCESS
) ] = SKL_DEMAND_READ
|
439 SKL_L3_MISS_LOCAL_DRAM
|SKL_SNOOP_DRAM
,
440 [ C(RESULT_MISS
) ] = SKL_DEMAND_READ
|
441 SKL_L3_MISS_REMOTE
|SKL_SNOOP_DRAM
,
444 [ C(RESULT_ACCESS
) ] = SKL_DEMAND_WRITE
|
445 SKL_L3_MISS_LOCAL_DRAM
|SKL_SNOOP_DRAM
,
446 [ C(RESULT_MISS
) ] = SKL_DEMAND_WRITE
|
447 SKL_L3_MISS_REMOTE
|SKL_SNOOP_DRAM
,
449 [ C(OP_PREFETCH
) ] = {
450 [ C(RESULT_ACCESS
) ] = 0x0,
451 [ C(RESULT_MISS
) ] = 0x0,
456 #define SNB_DMND_DATA_RD (1ULL << 0)
457 #define SNB_DMND_RFO (1ULL << 1)
458 #define SNB_DMND_IFETCH (1ULL << 2)
459 #define SNB_DMND_WB (1ULL << 3)
460 #define SNB_PF_DATA_RD (1ULL << 4)
461 #define SNB_PF_RFO (1ULL << 5)
462 #define SNB_PF_IFETCH (1ULL << 6)
463 #define SNB_LLC_DATA_RD (1ULL << 7)
464 #define SNB_LLC_RFO (1ULL << 8)
465 #define SNB_LLC_IFETCH (1ULL << 9)
466 #define SNB_BUS_LOCKS (1ULL << 10)
467 #define SNB_STRM_ST (1ULL << 11)
468 #define SNB_OTHER (1ULL << 15)
469 #define SNB_RESP_ANY (1ULL << 16)
470 #define SNB_NO_SUPP (1ULL << 17)
471 #define SNB_LLC_HITM (1ULL << 18)
472 #define SNB_LLC_HITE (1ULL << 19)
473 #define SNB_LLC_HITS (1ULL << 20)
474 #define SNB_LLC_HITF (1ULL << 21)
475 #define SNB_LOCAL (1ULL << 22)
476 #define SNB_REMOTE (0xffULL << 23)
477 #define SNB_SNP_NONE (1ULL << 31)
478 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
479 #define SNB_SNP_MISS (1ULL << 33)
480 #define SNB_NO_FWD (1ULL << 34)
481 #define SNB_SNP_FWD (1ULL << 35)
482 #define SNB_HITM (1ULL << 36)
483 #define SNB_NON_DRAM (1ULL << 37)
485 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
486 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
487 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
489 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
490 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
493 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
494 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
496 #define SNB_L3_ACCESS SNB_RESP_ANY
497 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
499 static __initconst
const u64 snb_hw_cache_extra_regs
500 [PERF_COUNT_HW_CACHE_MAX
]
501 [PERF_COUNT_HW_CACHE_OP_MAX
]
502 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
506 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_L3_ACCESS
,
507 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_L3_MISS
,
510 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_L3_ACCESS
,
511 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_L3_MISS
,
513 [ C(OP_PREFETCH
) ] = {
514 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_L3_ACCESS
,
515 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_L3_MISS
,
520 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_DRAM_ANY
,
521 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_DRAM_REMOTE
,
524 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_DRAM_ANY
,
525 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_DRAM_REMOTE
,
527 [ C(OP_PREFETCH
) ] = {
528 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_ANY
,
529 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_REMOTE
,
534 static __initconst
const u64 snb_hw_cache_event_ids
535 [PERF_COUNT_HW_CACHE_MAX
]
536 [PERF_COUNT_HW_CACHE_OP_MAX
]
537 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
541 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
542 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
545 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
546 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
548 [ C(OP_PREFETCH
) ] = {
549 [ C(RESULT_ACCESS
) ] = 0x0,
550 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
555 [ C(RESULT_ACCESS
) ] = 0x0,
556 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
559 [ C(RESULT_ACCESS
) ] = -1,
560 [ C(RESULT_MISS
) ] = -1,
562 [ C(OP_PREFETCH
) ] = {
563 [ C(RESULT_ACCESS
) ] = 0x0,
564 [ C(RESULT_MISS
) ] = 0x0,
569 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
570 [ C(RESULT_ACCESS
) ] = 0x01b7,
571 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
572 [ C(RESULT_MISS
) ] = 0x01b7,
575 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
576 [ C(RESULT_ACCESS
) ] = 0x01b7,
577 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
578 [ C(RESULT_MISS
) ] = 0x01b7,
580 [ C(OP_PREFETCH
) ] = {
581 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
582 [ C(RESULT_ACCESS
) ] = 0x01b7,
583 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
584 [ C(RESULT_MISS
) ] = 0x01b7,
589 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
590 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
593 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
594 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
596 [ C(OP_PREFETCH
) ] = {
597 [ C(RESULT_ACCESS
) ] = 0x0,
598 [ C(RESULT_MISS
) ] = 0x0,
603 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
604 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
607 [ C(RESULT_ACCESS
) ] = -1,
608 [ C(RESULT_MISS
) ] = -1,
610 [ C(OP_PREFETCH
) ] = {
611 [ C(RESULT_ACCESS
) ] = -1,
612 [ C(RESULT_MISS
) ] = -1,
617 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
618 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
621 [ C(RESULT_ACCESS
) ] = -1,
622 [ C(RESULT_MISS
) ] = -1,
624 [ C(OP_PREFETCH
) ] = {
625 [ C(RESULT_ACCESS
) ] = -1,
626 [ C(RESULT_MISS
) ] = -1,
631 [ C(RESULT_ACCESS
) ] = 0x01b7,
632 [ C(RESULT_MISS
) ] = 0x01b7,
635 [ C(RESULT_ACCESS
) ] = 0x01b7,
636 [ C(RESULT_MISS
) ] = 0x01b7,
638 [ C(OP_PREFETCH
) ] = {
639 [ C(RESULT_ACCESS
) ] = 0x01b7,
640 [ C(RESULT_MISS
) ] = 0x01b7,
647 * Notes on the events:
648 * - data reads do not include code reads (comparable to earlier tables)
649 * - data counts include speculative execution (except L1 write, dtlb, bpu)
650 * - remote node access includes remote memory, remote cache, remote mmio.
651 * - prefetches are not included in the counts because they are not
655 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
656 #define HSW_DEMAND_RFO BIT_ULL(1)
657 #define HSW_ANY_RESPONSE BIT_ULL(16)
658 #define HSW_SUPPLIER_NONE BIT_ULL(17)
659 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
660 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
661 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
662 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
663 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
664 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
665 HSW_L3_MISS_REMOTE_HOP2P)
666 #define HSW_SNOOP_NONE BIT_ULL(31)
667 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
668 #define HSW_SNOOP_MISS BIT_ULL(33)
669 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
670 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
671 #define HSW_SNOOP_HITM BIT_ULL(36)
672 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
673 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
674 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
675 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
676 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
677 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
678 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
679 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
680 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
681 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
682 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
684 #define BDW_L3_MISS_LOCAL BIT(26)
685 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
686 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
687 HSW_L3_MISS_REMOTE_HOP2P)
690 static __initconst
const u64 hsw_hw_cache_event_ids
691 [PERF_COUNT_HW_CACHE_MAX
]
692 [PERF_COUNT_HW_CACHE_OP_MAX
]
693 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
697 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
698 [ C(RESULT_MISS
) ] = 0x151, /* L1D.REPLACEMENT */
701 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
702 [ C(RESULT_MISS
) ] = 0x0,
704 [ C(OP_PREFETCH
) ] = {
705 [ C(RESULT_ACCESS
) ] = 0x0,
706 [ C(RESULT_MISS
) ] = 0x0,
711 [ C(RESULT_ACCESS
) ] = 0x0,
712 [ C(RESULT_MISS
) ] = 0x280, /* ICACHE.MISSES */
715 [ C(RESULT_ACCESS
) ] = -1,
716 [ C(RESULT_MISS
) ] = -1,
718 [ C(OP_PREFETCH
) ] = {
719 [ C(RESULT_ACCESS
) ] = 0x0,
720 [ C(RESULT_MISS
) ] = 0x0,
725 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
726 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
729 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
730 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
732 [ C(OP_PREFETCH
) ] = {
733 [ C(RESULT_ACCESS
) ] = 0x0,
734 [ C(RESULT_MISS
) ] = 0x0,
739 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
740 [ C(RESULT_MISS
) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
743 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
744 [ C(RESULT_MISS
) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
746 [ C(OP_PREFETCH
) ] = {
747 [ C(RESULT_ACCESS
) ] = 0x0,
748 [ C(RESULT_MISS
) ] = 0x0,
753 [ C(RESULT_ACCESS
) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
754 [ C(RESULT_MISS
) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
757 [ C(RESULT_ACCESS
) ] = -1,
758 [ C(RESULT_MISS
) ] = -1,
760 [ C(OP_PREFETCH
) ] = {
761 [ C(RESULT_ACCESS
) ] = -1,
762 [ C(RESULT_MISS
) ] = -1,
767 [ C(RESULT_ACCESS
) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
768 [ C(RESULT_MISS
) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
771 [ C(RESULT_ACCESS
) ] = -1,
772 [ C(RESULT_MISS
) ] = -1,
774 [ C(OP_PREFETCH
) ] = {
775 [ C(RESULT_ACCESS
) ] = -1,
776 [ C(RESULT_MISS
) ] = -1,
781 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
782 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
785 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
786 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
788 [ C(OP_PREFETCH
) ] = {
789 [ C(RESULT_ACCESS
) ] = 0x0,
790 [ C(RESULT_MISS
) ] = 0x0,
795 static __initconst
const u64 hsw_hw_cache_extra_regs
796 [PERF_COUNT_HW_CACHE_MAX
]
797 [PERF_COUNT_HW_CACHE_OP_MAX
]
798 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
802 [ C(RESULT_ACCESS
) ] = HSW_DEMAND_READ
|
804 [ C(RESULT_MISS
) ] = HSW_DEMAND_READ
|
805 HSW_L3_MISS
|HSW_ANY_SNOOP
,
808 [ C(RESULT_ACCESS
) ] = HSW_DEMAND_WRITE
|
810 [ C(RESULT_MISS
) ] = HSW_DEMAND_WRITE
|
811 HSW_L3_MISS
|HSW_ANY_SNOOP
,
813 [ C(OP_PREFETCH
) ] = {
814 [ C(RESULT_ACCESS
) ] = 0x0,
815 [ C(RESULT_MISS
) ] = 0x0,
820 [ C(RESULT_ACCESS
) ] = HSW_DEMAND_READ
|
821 HSW_L3_MISS_LOCAL_DRAM
|
823 [ C(RESULT_MISS
) ] = HSW_DEMAND_READ
|
828 [ C(RESULT_ACCESS
) ] = HSW_DEMAND_WRITE
|
829 HSW_L3_MISS_LOCAL_DRAM
|
831 [ C(RESULT_MISS
) ] = HSW_DEMAND_WRITE
|
835 [ C(OP_PREFETCH
) ] = {
836 [ C(RESULT_ACCESS
) ] = 0x0,
837 [ C(RESULT_MISS
) ] = 0x0,
842 static __initconst
const u64 westmere_hw_cache_event_ids
843 [PERF_COUNT_HW_CACHE_MAX
]
844 [PERF_COUNT_HW_CACHE_OP_MAX
]
845 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
849 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
850 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
853 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
854 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
856 [ C(OP_PREFETCH
) ] = {
857 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
858 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
863 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
864 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
867 [ C(RESULT_ACCESS
) ] = -1,
868 [ C(RESULT_MISS
) ] = -1,
870 [ C(OP_PREFETCH
) ] = {
871 [ C(RESULT_ACCESS
) ] = 0x0,
872 [ C(RESULT_MISS
) ] = 0x0,
877 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
878 [ C(RESULT_ACCESS
) ] = 0x01b7,
879 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
880 [ C(RESULT_MISS
) ] = 0x01b7,
883 * Use RFO, not WRITEBACK, because a write miss would typically occur
887 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
888 [ C(RESULT_ACCESS
) ] = 0x01b7,
889 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
890 [ C(RESULT_MISS
) ] = 0x01b7,
892 [ C(OP_PREFETCH
) ] = {
893 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
894 [ C(RESULT_ACCESS
) ] = 0x01b7,
895 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
896 [ C(RESULT_MISS
) ] = 0x01b7,
901 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
902 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
905 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
906 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
908 [ C(OP_PREFETCH
) ] = {
909 [ C(RESULT_ACCESS
) ] = 0x0,
910 [ C(RESULT_MISS
) ] = 0x0,
915 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
916 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
919 [ C(RESULT_ACCESS
) ] = -1,
920 [ C(RESULT_MISS
) ] = -1,
922 [ C(OP_PREFETCH
) ] = {
923 [ C(RESULT_ACCESS
) ] = -1,
924 [ C(RESULT_MISS
) ] = -1,
929 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
930 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
933 [ C(RESULT_ACCESS
) ] = -1,
934 [ C(RESULT_MISS
) ] = -1,
936 [ C(OP_PREFETCH
) ] = {
937 [ C(RESULT_ACCESS
) ] = -1,
938 [ C(RESULT_MISS
) ] = -1,
943 [ C(RESULT_ACCESS
) ] = 0x01b7,
944 [ C(RESULT_MISS
) ] = 0x01b7,
947 [ C(RESULT_ACCESS
) ] = 0x01b7,
948 [ C(RESULT_MISS
) ] = 0x01b7,
950 [ C(OP_PREFETCH
) ] = {
951 [ C(RESULT_ACCESS
) ] = 0x01b7,
952 [ C(RESULT_MISS
) ] = 0x01b7,
958 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
959 * See IA32 SDM Vol 3B 30.6.1.3
962 #define NHM_DMND_DATA_RD (1 << 0)
963 #define NHM_DMND_RFO (1 << 1)
964 #define NHM_DMND_IFETCH (1 << 2)
965 #define NHM_DMND_WB (1 << 3)
966 #define NHM_PF_DATA_RD (1 << 4)
967 #define NHM_PF_DATA_RFO (1 << 5)
968 #define NHM_PF_IFETCH (1 << 6)
969 #define NHM_OFFCORE_OTHER (1 << 7)
970 #define NHM_UNCORE_HIT (1 << 8)
971 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
972 #define NHM_OTHER_CORE_HITM (1 << 10)
974 #define NHM_REMOTE_CACHE_FWD (1 << 12)
975 #define NHM_REMOTE_DRAM (1 << 13)
976 #define NHM_LOCAL_DRAM (1 << 14)
977 #define NHM_NON_DRAM (1 << 15)
979 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
980 #define NHM_REMOTE (NHM_REMOTE_DRAM)
982 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
983 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
984 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
986 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
987 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
988 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
990 static __initconst
const u64 nehalem_hw_cache_extra_regs
991 [PERF_COUNT_HW_CACHE_MAX
]
992 [PERF_COUNT_HW_CACHE_OP_MAX
]
993 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
997 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
998 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
1001 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
1002 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
1004 [ C(OP_PREFETCH
) ] = {
1005 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
1006 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
1011 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_LOCAL
|NHM_REMOTE
,
1012 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_REMOTE
,
1015 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_LOCAL
|NHM_REMOTE
,
1016 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_REMOTE
,
1018 [ C(OP_PREFETCH
) ] = {
1019 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_LOCAL
|NHM_REMOTE
,
1020 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_REMOTE
,
1025 static __initconst
const u64 nehalem_hw_cache_event_ids
1026 [PERF_COUNT_HW_CACHE_MAX
]
1027 [PERF_COUNT_HW_CACHE_OP_MAX
]
1028 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1032 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1033 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
1036 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1037 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
1039 [ C(OP_PREFETCH
) ] = {
1040 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1041 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
1046 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
1047 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
1050 [ C(RESULT_ACCESS
) ] = -1,
1051 [ C(RESULT_MISS
) ] = -1,
1053 [ C(OP_PREFETCH
) ] = {
1054 [ C(RESULT_ACCESS
) ] = 0x0,
1055 [ C(RESULT_MISS
) ] = 0x0,
1060 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1061 [ C(RESULT_ACCESS
) ] = 0x01b7,
1062 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1063 [ C(RESULT_MISS
) ] = 0x01b7,
1066 * Use RFO, not WRITEBACK, because a write miss would typically occur
1070 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1071 [ C(RESULT_ACCESS
) ] = 0x01b7,
1072 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1073 [ C(RESULT_MISS
) ] = 0x01b7,
1075 [ C(OP_PREFETCH
) ] = {
1076 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1077 [ C(RESULT_ACCESS
) ] = 0x01b7,
1078 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1079 [ C(RESULT_MISS
) ] = 0x01b7,
1084 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1085 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1088 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1089 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1091 [ C(OP_PREFETCH
) ] = {
1092 [ C(RESULT_ACCESS
) ] = 0x0,
1093 [ C(RESULT_MISS
) ] = 0x0,
1098 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1099 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1102 [ C(RESULT_ACCESS
) ] = -1,
1103 [ C(RESULT_MISS
) ] = -1,
1105 [ C(OP_PREFETCH
) ] = {
1106 [ C(RESULT_ACCESS
) ] = -1,
1107 [ C(RESULT_MISS
) ] = -1,
1112 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1113 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
1116 [ C(RESULT_ACCESS
) ] = -1,
1117 [ C(RESULT_MISS
) ] = -1,
1119 [ C(OP_PREFETCH
) ] = {
1120 [ C(RESULT_ACCESS
) ] = -1,
1121 [ C(RESULT_MISS
) ] = -1,
1126 [ C(RESULT_ACCESS
) ] = 0x01b7,
1127 [ C(RESULT_MISS
) ] = 0x01b7,
1130 [ C(RESULT_ACCESS
) ] = 0x01b7,
1131 [ C(RESULT_MISS
) ] = 0x01b7,
1133 [ C(OP_PREFETCH
) ] = {
1134 [ C(RESULT_ACCESS
) ] = 0x01b7,
1135 [ C(RESULT_MISS
) ] = 0x01b7,
1140 static __initconst
const u64 core2_hw_cache_event_ids
1141 [PERF_COUNT_HW_CACHE_MAX
]
1142 [PERF_COUNT_HW_CACHE_OP_MAX
]
1143 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1147 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1148 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1151 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1152 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1154 [ C(OP_PREFETCH
) ] = {
1155 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1156 [ C(RESULT_MISS
) ] = 0,
1161 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
1162 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
1165 [ C(RESULT_ACCESS
) ] = -1,
1166 [ C(RESULT_MISS
) ] = -1,
1168 [ C(OP_PREFETCH
) ] = {
1169 [ C(RESULT_ACCESS
) ] = 0,
1170 [ C(RESULT_MISS
) ] = 0,
1175 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
1176 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
1179 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
1180 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
1182 [ C(OP_PREFETCH
) ] = {
1183 [ C(RESULT_ACCESS
) ] = 0,
1184 [ C(RESULT_MISS
) ] = 0,
1189 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1190 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1193 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1194 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1196 [ C(OP_PREFETCH
) ] = {
1197 [ C(RESULT_ACCESS
) ] = 0,
1198 [ C(RESULT_MISS
) ] = 0,
1203 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1204 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
1207 [ C(RESULT_ACCESS
) ] = -1,
1208 [ C(RESULT_MISS
) ] = -1,
1210 [ C(OP_PREFETCH
) ] = {
1211 [ C(RESULT_ACCESS
) ] = -1,
1212 [ C(RESULT_MISS
) ] = -1,
1217 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1218 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1221 [ C(RESULT_ACCESS
) ] = -1,
1222 [ C(RESULT_MISS
) ] = -1,
1224 [ C(OP_PREFETCH
) ] = {
1225 [ C(RESULT_ACCESS
) ] = -1,
1226 [ C(RESULT_MISS
) ] = -1,
1231 static __initconst
const u64 atom_hw_cache_event_ids
1232 [PERF_COUNT_HW_CACHE_MAX
]
1233 [PERF_COUNT_HW_CACHE_OP_MAX
]
1234 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1238 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
1239 [ C(RESULT_MISS
) ] = 0,
1242 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
1243 [ C(RESULT_MISS
) ] = 0,
1245 [ C(OP_PREFETCH
) ] = {
1246 [ C(RESULT_ACCESS
) ] = 0x0,
1247 [ C(RESULT_MISS
) ] = 0,
1252 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
1253 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
1256 [ C(RESULT_ACCESS
) ] = -1,
1257 [ C(RESULT_MISS
) ] = -1,
1259 [ C(OP_PREFETCH
) ] = {
1260 [ C(RESULT_ACCESS
) ] = 0,
1261 [ C(RESULT_MISS
) ] = 0,
1266 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
1267 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
1270 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
1271 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
1273 [ C(OP_PREFETCH
) ] = {
1274 [ C(RESULT_ACCESS
) ] = 0,
1275 [ C(RESULT_MISS
) ] = 0,
1280 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1281 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1284 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1285 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1287 [ C(OP_PREFETCH
) ] = {
1288 [ C(RESULT_ACCESS
) ] = 0,
1289 [ C(RESULT_MISS
) ] = 0,
1294 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1295 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
1298 [ C(RESULT_ACCESS
) ] = -1,
1299 [ C(RESULT_MISS
) ] = -1,
1301 [ C(OP_PREFETCH
) ] = {
1302 [ C(RESULT_ACCESS
) ] = -1,
1303 [ C(RESULT_MISS
) ] = -1,
1308 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1309 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1312 [ C(RESULT_ACCESS
) ] = -1,
1313 [ C(RESULT_MISS
) ] = -1,
1315 [ C(OP_PREFETCH
) ] = {
1316 [ C(RESULT_ACCESS
) ] = -1,
1317 [ C(RESULT_MISS
) ] = -1,
1322 static struct extra_reg intel_slm_extra_regs
[] __read_mostly
=
1324 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1325 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x768005ffffull
, RSP_0
),
1326 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1
, 0x368005ffffull
, RSP_1
),
1330 #define SLM_DMND_READ SNB_DMND_DATA_RD
1331 #define SLM_DMND_WRITE SNB_DMND_RFO
1332 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1334 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1335 #define SLM_LLC_ACCESS SNB_RESP_ANY
1336 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1338 static __initconst
const u64 slm_hw_cache_extra_regs
1339 [PERF_COUNT_HW_CACHE_MAX
]
1340 [PERF_COUNT_HW_CACHE_OP_MAX
]
1341 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1345 [ C(RESULT_ACCESS
) ] = SLM_DMND_READ
|SLM_LLC_ACCESS
,
1346 [ C(RESULT_MISS
) ] = 0,
1349 [ C(RESULT_ACCESS
) ] = SLM_DMND_WRITE
|SLM_LLC_ACCESS
,
1350 [ C(RESULT_MISS
) ] = SLM_DMND_WRITE
|SLM_LLC_MISS
,
1352 [ C(OP_PREFETCH
) ] = {
1353 [ C(RESULT_ACCESS
) ] = SLM_DMND_PREFETCH
|SLM_LLC_ACCESS
,
1354 [ C(RESULT_MISS
) ] = SLM_DMND_PREFETCH
|SLM_LLC_MISS
,
1359 static __initconst
const u64 slm_hw_cache_event_ids
1360 [PERF_COUNT_HW_CACHE_MAX
]
1361 [PERF_COUNT_HW_CACHE_OP_MAX
]
1362 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1366 [ C(RESULT_ACCESS
) ] = 0,
1367 [ C(RESULT_MISS
) ] = 0x0104, /* LD_DCU_MISS */
1370 [ C(RESULT_ACCESS
) ] = 0,
1371 [ C(RESULT_MISS
) ] = 0,
1373 [ C(OP_PREFETCH
) ] = {
1374 [ C(RESULT_ACCESS
) ] = 0,
1375 [ C(RESULT_MISS
) ] = 0,
1380 [ C(RESULT_ACCESS
) ] = 0x0380, /* ICACHE.ACCESSES */
1381 [ C(RESULT_MISS
) ] = 0x0280, /* ICACGE.MISSES */
1384 [ C(RESULT_ACCESS
) ] = -1,
1385 [ C(RESULT_MISS
) ] = -1,
1387 [ C(OP_PREFETCH
) ] = {
1388 [ C(RESULT_ACCESS
) ] = 0,
1389 [ C(RESULT_MISS
) ] = 0,
1394 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1395 [ C(RESULT_ACCESS
) ] = 0x01b7,
1396 [ C(RESULT_MISS
) ] = 0,
1399 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1400 [ C(RESULT_ACCESS
) ] = 0x01b7,
1401 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1402 [ C(RESULT_MISS
) ] = 0x01b7,
1404 [ C(OP_PREFETCH
) ] = {
1405 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1406 [ C(RESULT_ACCESS
) ] = 0x01b7,
1407 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1408 [ C(RESULT_MISS
) ] = 0x01b7,
1413 [ C(RESULT_ACCESS
) ] = 0,
1414 [ C(RESULT_MISS
) ] = 0x0804, /* LD_DTLB_MISS */
1417 [ C(RESULT_ACCESS
) ] = 0,
1418 [ C(RESULT_MISS
) ] = 0,
1420 [ C(OP_PREFETCH
) ] = {
1421 [ C(RESULT_ACCESS
) ] = 0,
1422 [ C(RESULT_MISS
) ] = 0,
1427 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1428 [ C(RESULT_MISS
) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1431 [ C(RESULT_ACCESS
) ] = -1,
1432 [ C(RESULT_MISS
) ] = -1,
1434 [ C(OP_PREFETCH
) ] = {
1435 [ C(RESULT_ACCESS
) ] = -1,
1436 [ C(RESULT_MISS
) ] = -1,
1441 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1442 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1445 [ C(RESULT_ACCESS
) ] = -1,
1446 [ C(RESULT_MISS
) ] = -1,
1448 [ C(OP_PREFETCH
) ] = {
1449 [ C(RESULT_ACCESS
) ] = -1,
1450 [ C(RESULT_MISS
) ] = -1,
1456 * Use from PMIs where the LBRs are already disabled.
1458 static void __intel_pmu_disable_all(void)
1460 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1462 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
1464 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
1465 intel_pmu_disable_bts();
1467 intel_bts_disable_local();
1469 intel_pmu_pebs_disable_all();
1472 static void intel_pmu_disable_all(void)
1474 __intel_pmu_disable_all();
1475 intel_pmu_lbr_disable_all();
1478 static void __intel_pmu_enable_all(int added
, bool pmi
)
1480 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1482 intel_pmu_pebs_enable_all();
1483 intel_pmu_lbr_enable_all(pmi
);
1484 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
,
1485 x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
);
1487 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
1488 struct perf_event
*event
=
1489 cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
1491 if (WARN_ON_ONCE(!event
))
1494 intel_pmu_enable_bts(event
->hw
.config
);
1496 intel_bts_enable_local();
1499 static void intel_pmu_enable_all(int added
)
1501 __intel_pmu_enable_all(added
, false);
1506 * Intel Errata AAK100 (model 26)
1507 * Intel Errata AAP53 (model 30)
1508 * Intel Errata BD53 (model 44)
1510 * The official story:
1511 * These chips need to be 'reset' when adding counters by programming the
1512 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1513 * in sequence on the same PMC or on different PMCs.
1515 * In practise it appears some of these events do in fact count, and
1516 * we need to programm all 4 events.
1518 static void intel_pmu_nhm_workaround(void)
1520 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1521 static const unsigned long nhm_magic
[4] = {
1527 struct perf_event
*event
;
1531 * The Errata requires below steps:
1532 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1533 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1534 * the corresponding PMCx;
1535 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1536 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1537 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1541 * The real steps we choose are a little different from above.
1542 * A) To reduce MSR operations, we don't run step 1) as they
1543 * are already cleared before this function is called;
1544 * B) Call x86_perf_event_update to save PMCx before configuring
1545 * PERFEVTSELx with magic number;
1546 * C) With step 5), we do clear only when the PERFEVTSELx is
1547 * not used currently.
1548 * D) Call x86_perf_event_set_period to restore PMCx;
1551 /* We always operate 4 pairs of PERF Counters */
1552 for (i
= 0; i
< 4; i
++) {
1553 event
= cpuc
->events
[i
];
1555 x86_perf_event_update(event
);
1558 for (i
= 0; i
< 4; i
++) {
1559 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
1560 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
1563 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
1564 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
1566 for (i
= 0; i
< 4; i
++) {
1567 event
= cpuc
->events
[i
];
1570 x86_perf_event_set_period(event
);
1571 __x86_pmu_enable_event(&event
->hw
,
1572 ARCH_PERFMON_EVENTSEL_ENABLE
);
1574 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
1578 static void intel_pmu_nhm_enable_all(int added
)
1581 intel_pmu_nhm_workaround();
1582 intel_pmu_enable_all(added
);
1585 static inline u64
intel_pmu_get_status(void)
1589 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
1594 static inline void intel_pmu_ack_status(u64 ack
)
1596 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
1599 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
1601 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1604 mask
= 0xfULL
<< (idx
* 4);
1606 rdmsrl(hwc
->config_base
, ctrl_val
);
1608 wrmsrl(hwc
->config_base
, ctrl_val
);
1611 static inline bool event_is_checkpointed(struct perf_event
*event
)
1613 return (event
->hw
.config
& HSW_IN_TX_CHECKPOINTED
) != 0;
1616 static void intel_pmu_disable_event(struct perf_event
*event
)
1618 struct hw_perf_event
*hwc
= &event
->hw
;
1619 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1621 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1622 intel_pmu_disable_bts();
1623 intel_pmu_drain_bts_buffer();
1627 cpuc
->intel_ctrl_guest_mask
&= ~(1ull << hwc
->idx
);
1628 cpuc
->intel_ctrl_host_mask
&= ~(1ull << hwc
->idx
);
1629 cpuc
->intel_cp_status
&= ~(1ull << hwc
->idx
);
1632 * must disable before any actual event
1633 * because any event may be combined with LBR
1635 if (needs_branch_stack(event
))
1636 intel_pmu_lbr_disable(event
);
1638 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1639 intel_pmu_disable_fixed(hwc
);
1643 x86_pmu_disable_event(event
);
1645 if (unlikely(event
->attr
.precise_ip
))
1646 intel_pmu_pebs_disable(event
);
1649 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
1651 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1652 u64 ctrl_val
, bits
, mask
;
1655 * Enable IRQ generation (0x8),
1656 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1660 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
1662 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
1666 * ANY bit is supported in v3 and up
1668 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
1672 mask
= 0xfULL
<< (idx
* 4);
1674 rdmsrl(hwc
->config_base
, ctrl_val
);
1677 wrmsrl(hwc
->config_base
, ctrl_val
);
1680 static void intel_pmu_enable_event(struct perf_event
*event
)
1682 struct hw_perf_event
*hwc
= &event
->hw
;
1683 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1685 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1686 if (!__this_cpu_read(cpu_hw_events
.enabled
))
1689 intel_pmu_enable_bts(hwc
->config
);
1693 * must enabled before any actual event
1694 * because any event may be combined with LBR
1696 if (needs_branch_stack(event
))
1697 intel_pmu_lbr_enable(event
);
1699 if (event
->attr
.exclude_host
)
1700 cpuc
->intel_ctrl_guest_mask
|= (1ull << hwc
->idx
);
1701 if (event
->attr
.exclude_guest
)
1702 cpuc
->intel_ctrl_host_mask
|= (1ull << hwc
->idx
);
1704 if (unlikely(event_is_checkpointed(event
)))
1705 cpuc
->intel_cp_status
|= (1ull << hwc
->idx
);
1707 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1708 intel_pmu_enable_fixed(hwc
);
1712 if (unlikely(event
->attr
.precise_ip
))
1713 intel_pmu_pebs_enable(event
);
1715 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1719 * Save and restart an expired event. Called by NMI contexts,
1720 * so it has to be careful about preempting normal event ops:
1722 int intel_pmu_save_and_restart(struct perf_event
*event
)
1724 x86_perf_event_update(event
);
1726 * For a checkpointed counter always reset back to 0. This
1727 * avoids a situation where the counter overflows, aborts the
1728 * transaction and is then set back to shortly before the
1729 * overflow, and overflows and aborts again.
1731 if (unlikely(event_is_checkpointed(event
))) {
1732 /* No race with NMIs because the counter should not be armed */
1733 wrmsrl(event
->hw
.event_base
, 0);
1734 local64_set(&event
->hw
.prev_count
, 0);
1736 return x86_perf_event_set_period(event
);
1739 static void intel_pmu_reset(void)
1741 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
1742 unsigned long flags
;
1745 if (!x86_pmu
.num_counters
)
1748 local_irq_save(flags
);
1750 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1752 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1753 wrmsrl_safe(x86_pmu_config_addr(idx
), 0ull);
1754 wrmsrl_safe(x86_pmu_event_addr(idx
), 0ull);
1756 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
1757 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1760 ds
->bts_index
= ds
->bts_buffer_base
;
1762 /* Ack all overflows and disable fixed counters */
1763 if (x86_pmu
.version
>= 2) {
1764 intel_pmu_ack_status(intel_pmu_get_status());
1765 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
1768 /* Reset LBRs and LBR freezing */
1769 if (x86_pmu
.lbr_nr
) {
1770 update_debugctlmsr(get_debugctlmsr() &
1771 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI
|DEBUGCTLMSR_LBR
));
1774 local_irq_restore(flags
);
1778 * This handler is triggered by the local APIC, so the APIC IRQ handling
1781 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1783 struct perf_sample_data data
;
1784 struct cpu_hw_events
*cpuc
;
1789 cpuc
= this_cpu_ptr(&cpu_hw_events
);
1792 * No known reason to not always do late ACK,
1793 * but just in case do it opt-in.
1795 if (!x86_pmu
.late_ack
)
1796 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1797 __intel_pmu_disable_all();
1798 handled
= intel_pmu_drain_bts_buffer();
1799 handled
+= intel_bts_interrupt();
1800 status
= intel_pmu_get_status();
1806 intel_pmu_lbr_read();
1807 intel_pmu_ack_status(status
);
1808 if (++loops
> 100) {
1809 static bool warned
= false;
1811 WARN(1, "perfevents: irq loop stuck!\n");
1812 perf_event_print_debug();
1819 inc_irq_stat(apic_perf_irqs
);
1823 * Ignore a range of extra bits in status that do not indicate
1824 * overflow by themselves.
1826 status
&= ~(GLOBAL_STATUS_COND_CHG
|
1827 GLOBAL_STATUS_ASIF
|
1828 GLOBAL_STATUS_LBRS_FROZEN
);
1833 * PEBS overflow sets bit 62 in the global status register
1835 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
1837 x86_pmu
.drain_pebs(regs
);
1843 if (__test_and_clear_bit(55, (unsigned long *)&status
)) {
1845 intel_pt_interrupt();
1849 * Checkpointed counters can lead to 'spurious' PMIs because the
1850 * rollback caused by the PMI will have cleared the overflow status
1851 * bit. Therefore always force probe these counters.
1853 status
|= cpuc
->intel_cp_status
;
1855 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1856 struct perf_event
*event
= cpuc
->events
[bit
];
1860 if (!test_bit(bit
, cpuc
->active_mask
))
1863 if (!intel_pmu_save_and_restart(event
))
1866 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
1868 if (has_branch_stack(event
))
1869 data
.br_stack
= &cpuc
->lbr_stack
;
1871 if (perf_event_overflow(event
, &data
, regs
))
1872 x86_pmu_stop(event
, 0);
1876 * Repeat if there is more work to be done:
1878 status
= intel_pmu_get_status();
1883 __intel_pmu_enable_all(0, true);
1885 * Only unmask the NMI after the overflow counters
1886 * have been reset. This avoids spurious NMIs on
1889 if (x86_pmu
.late_ack
)
1890 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1894 static struct event_constraint
*
1895 intel_bts_constraints(struct perf_event
*event
)
1897 struct hw_perf_event
*hwc
= &event
->hw
;
1898 unsigned int hw_event
, bts_event
;
1900 if (event
->attr
.freq
)
1903 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1904 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1906 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1907 return &bts_constraint
;
1912 static int intel_alt_er(int idx
, u64 config
)
1915 if (!(x86_pmu
.flags
& PMU_FL_HAS_RSP_1
))
1918 if (idx
== EXTRA_REG_RSP_0
)
1919 alt_idx
= EXTRA_REG_RSP_1
;
1921 if (idx
== EXTRA_REG_RSP_1
)
1922 alt_idx
= EXTRA_REG_RSP_0
;
1924 if (config
& ~x86_pmu
.extra_regs
[alt_idx
].valid_mask
)
1930 static void intel_fixup_er(struct perf_event
*event
, int idx
)
1932 event
->hw
.extra_reg
.idx
= idx
;
1934 if (idx
== EXTRA_REG_RSP_0
) {
1935 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1936 event
->hw
.config
|= x86_pmu
.extra_regs
[EXTRA_REG_RSP_0
].event
;
1937 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_0
;
1938 } else if (idx
== EXTRA_REG_RSP_1
) {
1939 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1940 event
->hw
.config
|= x86_pmu
.extra_regs
[EXTRA_REG_RSP_1
].event
;
1941 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_1
;
1946 * manage allocation of shared extra msr for certain events
1949 * per-cpu: to be shared between the various events on a single PMU
1950 * per-core: per-cpu + shared by HT threads
1952 static struct event_constraint
*
1953 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
1954 struct perf_event
*event
,
1955 struct hw_perf_event_extra
*reg
)
1957 struct event_constraint
*c
= &emptyconstraint
;
1958 struct er_account
*era
;
1959 unsigned long flags
;
1963 * reg->alloc can be set due to existing state, so for fake cpuc we
1964 * need to ignore this, otherwise we might fail to allocate proper fake
1965 * state for this extra reg constraint. Also see the comment below.
1967 if (reg
->alloc
&& !cpuc
->is_fake
)
1968 return NULL
; /* call x86_get_event_constraint() */
1971 era
= &cpuc
->shared_regs
->regs
[idx
];
1973 * we use spin_lock_irqsave() to avoid lockdep issues when
1974 * passing a fake cpuc
1976 raw_spin_lock_irqsave(&era
->lock
, flags
);
1978 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
1981 * If its a fake cpuc -- as per validate_{group,event}() we
1982 * shouldn't touch event state and we can avoid doing so
1983 * since both will only call get_event_constraints() once
1984 * on each event, this avoids the need for reg->alloc.
1986 * Not doing the ER fixup will only result in era->reg being
1987 * wrong, but since we won't actually try and program hardware
1988 * this isn't a problem either.
1990 if (!cpuc
->is_fake
) {
1991 if (idx
!= reg
->idx
)
1992 intel_fixup_er(event
, idx
);
1995 * x86_schedule_events() can call get_event_constraints()
1996 * multiple times on events in the case of incremental
1997 * scheduling(). reg->alloc ensures we only do the ER
2003 /* lock in msr value */
2004 era
->config
= reg
->config
;
2005 era
->reg
= reg
->reg
;
2008 atomic_inc(&era
->ref
);
2011 * need to call x86_get_event_constraint()
2012 * to check if associated event has constraints
2016 idx
= intel_alt_er(idx
, reg
->config
);
2017 if (idx
!= reg
->idx
) {
2018 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
2022 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
2028 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
2029 struct hw_perf_event_extra
*reg
)
2031 struct er_account
*era
;
2034 * Only put constraint if extra reg was actually allocated. Also takes
2035 * care of event which do not use an extra shared reg.
2037 * Also, if this is a fake cpuc we shouldn't touch any event state
2038 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
2039 * either since it'll be thrown out.
2041 if (!reg
->alloc
|| cpuc
->is_fake
)
2044 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
2046 /* one fewer user */
2047 atomic_dec(&era
->ref
);
2049 /* allocate again next time */
2053 static struct event_constraint
*
2054 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
2055 struct perf_event
*event
)
2057 struct event_constraint
*c
= NULL
, *d
;
2058 struct hw_perf_event_extra
*xreg
, *breg
;
2060 xreg
= &event
->hw
.extra_reg
;
2061 if (xreg
->idx
!= EXTRA_REG_NONE
) {
2062 c
= __intel_shared_reg_get_constraints(cpuc
, event
, xreg
);
2063 if (c
== &emptyconstraint
)
2066 breg
= &event
->hw
.branch_reg
;
2067 if (breg
->idx
!= EXTRA_REG_NONE
) {
2068 d
= __intel_shared_reg_get_constraints(cpuc
, event
, breg
);
2069 if (d
== &emptyconstraint
) {
2070 __intel_shared_reg_put_constraints(cpuc
, xreg
);
2077 struct event_constraint
*
2078 x86_get_event_constraints(struct cpu_hw_events
*cpuc
, int idx
,
2079 struct perf_event
*event
)
2081 struct event_constraint
*c
;
2083 if (x86_pmu
.event_constraints
) {
2084 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
2085 if ((event
->hw
.config
& c
->cmask
) == c
->code
) {
2086 event
->hw
.flags
|= c
->flags
;
2092 return &unconstrained
;
2095 static struct event_constraint
*
2096 __intel_get_event_constraints(struct cpu_hw_events
*cpuc
, int idx
,
2097 struct perf_event
*event
)
2099 struct event_constraint
*c
;
2101 c
= intel_bts_constraints(event
);
2105 c
= intel_shared_regs_constraints(cpuc
, event
);
2109 c
= intel_pebs_constraints(event
);
2113 return x86_get_event_constraints(cpuc
, idx
, event
);
2117 intel_start_scheduling(struct cpu_hw_events
*cpuc
)
2119 struct intel_excl_cntrs
*excl_cntrs
= cpuc
->excl_cntrs
;
2120 struct intel_excl_states
*xl
;
2121 int tid
= cpuc
->excl_thread_id
;
2124 * nothing needed if in group validation mode
2126 if (cpuc
->is_fake
|| !is_ht_workaround_enabled())
2130 * no exclusion needed
2132 if (WARN_ON_ONCE(!excl_cntrs
))
2135 xl
= &excl_cntrs
->states
[tid
];
2137 xl
->sched_started
= true;
2139 * lock shared state until we are done scheduling
2140 * in stop_event_scheduling()
2141 * makes scheduling appear as a transaction
2143 raw_spin_lock(&excl_cntrs
->lock
);
2146 static void intel_commit_scheduling(struct cpu_hw_events
*cpuc
, int idx
, int cntr
)
2148 struct intel_excl_cntrs
*excl_cntrs
= cpuc
->excl_cntrs
;
2149 struct event_constraint
*c
= cpuc
->event_constraint
[idx
];
2150 struct intel_excl_states
*xl
;
2151 int tid
= cpuc
->excl_thread_id
;
2153 if (cpuc
->is_fake
|| !is_ht_workaround_enabled())
2156 if (WARN_ON_ONCE(!excl_cntrs
))
2159 if (!(c
->flags
& PERF_X86_EVENT_DYNAMIC
))
2162 xl
= &excl_cntrs
->states
[tid
];
2164 lockdep_assert_held(&excl_cntrs
->lock
);
2166 if (c
->flags
& PERF_X86_EVENT_EXCL
)
2167 xl
->state
[cntr
] = INTEL_EXCL_EXCLUSIVE
;
2169 xl
->state
[cntr
] = INTEL_EXCL_SHARED
;
2173 intel_stop_scheduling(struct cpu_hw_events
*cpuc
)
2175 struct intel_excl_cntrs
*excl_cntrs
= cpuc
->excl_cntrs
;
2176 struct intel_excl_states
*xl
;
2177 int tid
= cpuc
->excl_thread_id
;
2180 * nothing needed if in group validation mode
2182 if (cpuc
->is_fake
|| !is_ht_workaround_enabled())
2185 * no exclusion needed
2187 if (WARN_ON_ONCE(!excl_cntrs
))
2190 xl
= &excl_cntrs
->states
[tid
];
2192 xl
->sched_started
= false;
2194 * release shared state lock (acquired in intel_start_scheduling())
2196 raw_spin_unlock(&excl_cntrs
->lock
);
2199 static struct event_constraint
*
2200 intel_get_excl_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
,
2201 int idx
, struct event_constraint
*c
)
2203 struct intel_excl_cntrs
*excl_cntrs
= cpuc
->excl_cntrs
;
2204 struct intel_excl_states
*xlo
;
2205 int tid
= cpuc
->excl_thread_id
;
2209 * validating a group does not require
2210 * enforcing cross-thread exclusion
2212 if (cpuc
->is_fake
|| !is_ht_workaround_enabled())
2216 * no exclusion needed
2218 if (WARN_ON_ONCE(!excl_cntrs
))
2222 * because we modify the constraint, we need
2223 * to make a copy. Static constraints come
2224 * from static const tables.
2226 * only needed when constraint has not yet
2227 * been cloned (marked dynamic)
2229 if (!(c
->flags
& PERF_X86_EVENT_DYNAMIC
)) {
2230 struct event_constraint
*cx
;
2233 * grab pre-allocated constraint entry
2235 cx
= &cpuc
->constraint_list
[idx
];
2238 * initialize dynamic constraint
2239 * with static constraint
2244 * mark constraint as dynamic, so we
2245 * can free it later on
2247 cx
->flags
|= PERF_X86_EVENT_DYNAMIC
;
2252 * From here on, the constraint is dynamic.
2253 * Either it was just allocated above, or it
2254 * was allocated during a earlier invocation
2259 * state of sibling HT
2261 xlo
= &excl_cntrs
->states
[tid
^ 1];
2264 * event requires exclusive counter access
2267 is_excl
= c
->flags
& PERF_X86_EVENT_EXCL
;
2268 if (is_excl
&& !(event
->hw
.flags
& PERF_X86_EVENT_EXCL_ACCT
)) {
2269 event
->hw
.flags
|= PERF_X86_EVENT_EXCL_ACCT
;
2270 if (!cpuc
->n_excl
++)
2271 WRITE_ONCE(excl_cntrs
->has_exclusive
[tid
], 1);
2275 * Modify static constraint with current dynamic
2278 * EXCLUSIVE: sibling counter measuring exclusive event
2279 * SHARED : sibling counter measuring non-exclusive event
2280 * UNUSED : sibling counter unused
2282 for_each_set_bit(i
, c
->idxmsk
, X86_PMC_IDX_MAX
) {
2284 * exclusive event in sibling counter
2285 * our corresponding counter cannot be used
2286 * regardless of our event
2288 if (xlo
->state
[i
] == INTEL_EXCL_EXCLUSIVE
)
2289 __clear_bit(i
, c
->idxmsk
);
2291 * if measuring an exclusive event, sibling
2292 * measuring non-exclusive, then counter cannot
2295 if (is_excl
&& xlo
->state
[i
] == INTEL_EXCL_SHARED
)
2296 __clear_bit(i
, c
->idxmsk
);
2300 * recompute actual bit weight for scheduling algorithm
2302 c
->weight
= hweight64(c
->idxmsk64
);
2305 * if we return an empty mask, then switch
2306 * back to static empty constraint to avoid
2307 * the cost of freeing later on
2310 c
= &emptyconstraint
;
2315 static struct event_constraint
*
2316 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, int idx
,
2317 struct perf_event
*event
)
2319 struct event_constraint
*c1
= NULL
;
2320 struct event_constraint
*c2
;
2322 if (idx
>= 0) /* fake does < 0 */
2323 c1
= cpuc
->event_constraint
[idx
];
2327 * - static constraint: no change across incremental scheduling calls
2328 * - dynamic constraint: handled by intel_get_excl_constraints()
2330 c2
= __intel_get_event_constraints(cpuc
, idx
, event
);
2331 if (c1
&& (c1
->flags
& PERF_X86_EVENT_DYNAMIC
)) {
2332 bitmap_copy(c1
->idxmsk
, c2
->idxmsk
, X86_PMC_IDX_MAX
);
2333 c1
->weight
= c2
->weight
;
2337 if (cpuc
->excl_cntrs
)
2338 return intel_get_excl_constraints(cpuc
, event
, idx
, c2
);
2343 static void intel_put_excl_constraints(struct cpu_hw_events
*cpuc
,
2344 struct perf_event
*event
)
2346 struct hw_perf_event
*hwc
= &event
->hw
;
2347 struct intel_excl_cntrs
*excl_cntrs
= cpuc
->excl_cntrs
;
2348 int tid
= cpuc
->excl_thread_id
;
2349 struct intel_excl_states
*xl
;
2352 * nothing needed if in group validation mode
2357 if (WARN_ON_ONCE(!excl_cntrs
))
2360 if (hwc
->flags
& PERF_X86_EVENT_EXCL_ACCT
) {
2361 hwc
->flags
&= ~PERF_X86_EVENT_EXCL_ACCT
;
2362 if (!--cpuc
->n_excl
)
2363 WRITE_ONCE(excl_cntrs
->has_exclusive
[tid
], 0);
2367 * If event was actually assigned, then mark the counter state as
2370 if (hwc
->idx
>= 0) {
2371 xl
= &excl_cntrs
->states
[tid
];
2374 * put_constraint may be called from x86_schedule_events()
2375 * which already has the lock held so here make locking
2378 if (!xl
->sched_started
)
2379 raw_spin_lock(&excl_cntrs
->lock
);
2381 xl
->state
[hwc
->idx
] = INTEL_EXCL_UNUSED
;
2383 if (!xl
->sched_started
)
2384 raw_spin_unlock(&excl_cntrs
->lock
);
2389 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
2390 struct perf_event
*event
)
2392 struct hw_perf_event_extra
*reg
;
2394 reg
= &event
->hw
.extra_reg
;
2395 if (reg
->idx
!= EXTRA_REG_NONE
)
2396 __intel_shared_reg_put_constraints(cpuc
, reg
);
2398 reg
= &event
->hw
.branch_reg
;
2399 if (reg
->idx
!= EXTRA_REG_NONE
)
2400 __intel_shared_reg_put_constraints(cpuc
, reg
);
2403 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
2404 struct perf_event
*event
)
2406 intel_put_shared_regs_event_constraints(cpuc
, event
);
2409 * is PMU has exclusive counter restrictions, then
2410 * all events are subject to and must call the
2411 * put_excl_constraints() routine
2413 if (cpuc
->excl_cntrs
)
2414 intel_put_excl_constraints(cpuc
, event
);
2417 static void intel_pebs_aliases_core2(struct perf_event
*event
)
2419 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
2421 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
2422 * (0x003c) so that we can use it with PEBS.
2424 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
2425 * PEBS capable. However we can use INST_RETIRED.ANY_P
2426 * (0x00c0), which is a PEBS capable event, to get the same
2429 * INST_RETIRED.ANY_P counts the number of cycles that retires
2430 * CNTMASK instructions. By setting CNTMASK to a value (16)
2431 * larger than the maximum number of instructions that can be
2432 * retired per cycle (4) and then inverting the condition, we
2433 * count all cycles that retire 16 or less instructions, which
2436 * Thereby we gain a PEBS capable cycle counter.
2438 u64 alt_config
= X86_CONFIG(.event
=0xc0, .inv
=1, .cmask
=16);
2440 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
2441 event
->hw
.config
= alt_config
;
2445 static void intel_pebs_aliases_snb(struct perf_event
*event
)
2447 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
2449 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
2450 * (0x003c) so that we can use it with PEBS.
2452 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
2453 * PEBS capable. However we can use UOPS_RETIRED.ALL
2454 * (0x01c2), which is a PEBS capable event, to get the same
2457 * UOPS_RETIRED.ALL counts the number of cycles that retires
2458 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
2459 * larger than the maximum number of micro-ops that can be
2460 * retired per cycle (4) and then inverting the condition, we
2461 * count all cycles that retire 16 or less micro-ops, which
2464 * Thereby we gain a PEBS capable cycle counter.
2466 u64 alt_config
= X86_CONFIG(.event
=0xc2, .umask
=0x01, .inv
=1, .cmask
=16);
2468 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
2469 event
->hw
.config
= alt_config
;
2473 static unsigned long intel_pmu_free_running_flags(struct perf_event
*event
)
2475 unsigned long flags
= x86_pmu
.free_running_flags
;
2477 if (event
->attr
.use_clockid
)
2478 flags
&= ~PERF_SAMPLE_TIME
;
2482 static int intel_pmu_hw_config(struct perf_event
*event
)
2484 int ret
= x86_pmu_hw_config(event
);
2489 if (event
->attr
.precise_ip
) {
2490 if (!event
->attr
.freq
) {
2491 event
->hw
.flags
|= PERF_X86_EVENT_AUTO_RELOAD
;
2492 if (!(event
->attr
.sample_type
&
2493 ~intel_pmu_free_running_flags(event
)))
2494 event
->hw
.flags
|= PERF_X86_EVENT_FREERUNNING
;
2496 if (x86_pmu
.pebs_aliases
)
2497 x86_pmu
.pebs_aliases(event
);
2500 if (needs_branch_stack(event
)) {
2501 ret
= intel_pmu_setup_lbr_filter(event
);
2506 * BTS is set up earlier in this path, so don't account twice
2508 if (!intel_pmu_has_bts(event
)) {
2509 /* disallow lbr if conflicting events are present */
2510 if (x86_add_exclusive(x86_lbr_exclusive_lbr
))
2513 event
->destroy
= hw_perf_lbr_event_destroy
;
2517 if (event
->attr
.type
!= PERF_TYPE_RAW
)
2520 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
2523 if (x86_pmu
.version
< 3)
2526 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
2529 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
2534 struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
2536 if (x86_pmu
.guest_get_msrs
)
2537 return x86_pmu
.guest_get_msrs(nr
);
2541 EXPORT_SYMBOL_GPL(perf_guest_get_msrs
);
2543 static struct perf_guest_switch_msr
*intel_guest_get_msrs(int *nr
)
2545 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
2546 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
2548 arr
[0].msr
= MSR_CORE_PERF_GLOBAL_CTRL
;
2549 arr
[0].host
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
;
2550 arr
[0].guest
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_host_mask
;
2552 * If PMU counter has PEBS enabled it is not enough to disable counter
2553 * on a guest entry since PEBS memory write can overshoot guest entry
2554 * and corrupt guest memory. Disabling PEBS solves the problem.
2556 arr
[1].msr
= MSR_IA32_PEBS_ENABLE
;
2557 arr
[1].host
= cpuc
->pebs_enabled
;
2564 static struct perf_guest_switch_msr
*core_guest_get_msrs(int *nr
)
2566 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
2567 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
2570 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
2571 struct perf_event
*event
= cpuc
->events
[idx
];
2573 arr
[idx
].msr
= x86_pmu_config_addr(idx
);
2574 arr
[idx
].host
= arr
[idx
].guest
= 0;
2576 if (!test_bit(idx
, cpuc
->active_mask
))
2579 arr
[idx
].host
= arr
[idx
].guest
=
2580 event
->hw
.config
| ARCH_PERFMON_EVENTSEL_ENABLE
;
2582 if (event
->attr
.exclude_host
)
2583 arr
[idx
].host
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
2584 else if (event
->attr
.exclude_guest
)
2585 arr
[idx
].guest
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
2588 *nr
= x86_pmu
.num_counters
;
2592 static void core_pmu_enable_event(struct perf_event
*event
)
2594 if (!event
->attr
.exclude_host
)
2595 x86_pmu_enable_event(event
);
2598 static void core_pmu_enable_all(int added
)
2600 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
2603 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
2604 struct hw_perf_event
*hwc
= &cpuc
->events
[idx
]->hw
;
2606 if (!test_bit(idx
, cpuc
->active_mask
) ||
2607 cpuc
->events
[idx
]->attr
.exclude_host
)
2610 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
2614 static int hsw_hw_config(struct perf_event
*event
)
2616 int ret
= intel_pmu_hw_config(event
);
2620 if (!boot_cpu_has(X86_FEATURE_RTM
) && !boot_cpu_has(X86_FEATURE_HLE
))
2622 event
->hw
.config
|= event
->attr
.config
& (HSW_IN_TX
|HSW_IN_TX_CHECKPOINTED
);
2625 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
2626 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
2629 if ((event
->hw
.config
& (HSW_IN_TX
|HSW_IN_TX_CHECKPOINTED
)) &&
2630 ((event
->hw
.config
& ARCH_PERFMON_EVENTSEL_ANY
) ||
2631 event
->attr
.precise_ip
> 0))
2634 if (event_is_checkpointed(event
)) {
2636 * Sampling of checkpointed events can cause situations where
2637 * the CPU constantly aborts because of a overflow, which is
2638 * then checkpointed back and ignored. Forbid checkpointing
2641 * But still allow a long sampling period, so that perf stat
2644 if (event
->attr
.sample_period
> 0 &&
2645 event
->attr
.sample_period
< 0x7fffffff)
2651 static struct event_constraint counter2_constraint
=
2652 EVENT_CONSTRAINT(0, 0x4, 0);
2654 static struct event_constraint
*
2655 hsw_get_event_constraints(struct cpu_hw_events
*cpuc
, int idx
,
2656 struct perf_event
*event
)
2658 struct event_constraint
*c
;
2660 c
= intel_get_event_constraints(cpuc
, idx
, event
);
2662 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
2663 if (event
->hw
.config
& HSW_IN_TX_CHECKPOINTED
) {
2664 if (c
->idxmsk64
& (1U << 2))
2665 return &counter2_constraint
;
2666 return &emptyconstraint
;
2675 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
2676 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
2677 * the two to enforce a minimum period of 128 (the smallest value that has bits
2678 * 0-5 cleared and >= 100).
2680 * Because of how the code in x86_perf_event_set_period() works, the truncation
2681 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
2682 * to make up for the 'lost' events due to carrying the 'error' in period_left.
2684 * Therefore the effective (average) period matches the requested period,
2685 * despite coarser hardware granularity.
2687 static unsigned bdw_limit_period(struct perf_event
*event
, unsigned left
)
2689 if ((event
->hw
.config
& INTEL_ARCH_EVENT_MASK
) ==
2690 X86_CONFIG(.event
=0xc0, .umask
=0x01)) {
2698 PMU_FORMAT_ATTR(event
, "config:0-7" );
2699 PMU_FORMAT_ATTR(umask
, "config:8-15" );
2700 PMU_FORMAT_ATTR(edge
, "config:18" );
2701 PMU_FORMAT_ATTR(pc
, "config:19" );
2702 PMU_FORMAT_ATTR(any
, "config:21" ); /* v3 + */
2703 PMU_FORMAT_ATTR(inv
, "config:23" );
2704 PMU_FORMAT_ATTR(cmask
, "config:24-31" );
2705 PMU_FORMAT_ATTR(in_tx
, "config:32");
2706 PMU_FORMAT_ATTR(in_tx_cp
, "config:33");
2708 static struct attribute
*intel_arch_formats_attr
[] = {
2709 &format_attr_event
.attr
,
2710 &format_attr_umask
.attr
,
2711 &format_attr_edge
.attr
,
2712 &format_attr_pc
.attr
,
2713 &format_attr_inv
.attr
,
2714 &format_attr_cmask
.attr
,
2718 ssize_t
intel_event_sysfs_show(char *page
, u64 config
)
2720 u64 event
= (config
& ARCH_PERFMON_EVENTSEL_EVENT
);
2722 return x86_event_sysfs_show(page
, config
, event
);
2725 struct intel_shared_regs
*allocate_shared_regs(int cpu
)
2727 struct intel_shared_regs
*regs
;
2730 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
2731 GFP_KERNEL
, cpu_to_node(cpu
));
2734 * initialize the locks to keep lockdep happy
2736 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
2737 raw_spin_lock_init(®s
->regs
[i
].lock
);
2744 static struct intel_excl_cntrs
*allocate_excl_cntrs(int cpu
)
2746 struct intel_excl_cntrs
*c
;
2748 c
= kzalloc_node(sizeof(struct intel_excl_cntrs
),
2749 GFP_KERNEL
, cpu_to_node(cpu
));
2751 raw_spin_lock_init(&c
->lock
);
2757 static int intel_pmu_cpu_prepare(int cpu
)
2759 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
2761 if (x86_pmu
.extra_regs
|| x86_pmu
.lbr_sel_map
) {
2762 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
2763 if (!cpuc
->shared_regs
)
2767 if (x86_pmu
.flags
& PMU_FL_EXCL_CNTRS
) {
2768 size_t sz
= X86_PMC_IDX_MAX
* sizeof(struct event_constraint
);
2770 cpuc
->constraint_list
= kzalloc(sz
, GFP_KERNEL
);
2771 if (!cpuc
->constraint_list
)
2772 goto err_shared_regs
;
2774 cpuc
->excl_cntrs
= allocate_excl_cntrs(cpu
);
2775 if (!cpuc
->excl_cntrs
)
2776 goto err_constraint_list
;
2778 cpuc
->excl_thread_id
= 0;
2783 err_constraint_list
:
2784 kfree(cpuc
->constraint_list
);
2785 cpuc
->constraint_list
= NULL
;
2788 kfree(cpuc
->shared_regs
);
2789 cpuc
->shared_regs
= NULL
;
2795 static void intel_pmu_cpu_starting(int cpu
)
2797 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
2798 int core_id
= topology_core_id(cpu
);
2801 init_debug_store_on_cpu(cpu
);
2803 * Deal with CPUs that don't clear their LBRs on power-up.
2805 intel_pmu_lbr_reset();
2807 cpuc
->lbr_sel
= NULL
;
2809 if (!cpuc
->shared_regs
)
2812 if (!(x86_pmu
.flags
& PMU_FL_NO_HT_SHARING
)) {
2813 void **onln
= &cpuc
->kfree_on_online
[X86_PERF_KFREE_SHARED
];
2815 for_each_cpu(i
, topology_sibling_cpumask(cpu
)) {
2816 struct intel_shared_regs
*pc
;
2818 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
2819 if (pc
&& pc
->core_id
== core_id
) {
2820 *onln
= cpuc
->shared_regs
;
2821 cpuc
->shared_regs
= pc
;
2825 cpuc
->shared_regs
->core_id
= core_id
;
2826 cpuc
->shared_regs
->refcnt
++;
2829 if (x86_pmu
.lbr_sel_map
)
2830 cpuc
->lbr_sel
= &cpuc
->shared_regs
->regs
[EXTRA_REG_LBR
];
2832 if (x86_pmu
.flags
& PMU_FL_EXCL_CNTRS
) {
2833 for_each_cpu(i
, topology_sibling_cpumask(cpu
)) {
2834 struct intel_excl_cntrs
*c
;
2836 c
= per_cpu(cpu_hw_events
, i
).excl_cntrs
;
2837 if (c
&& c
->core_id
== core_id
) {
2838 cpuc
->kfree_on_online
[1] = cpuc
->excl_cntrs
;
2839 cpuc
->excl_cntrs
= c
;
2840 cpuc
->excl_thread_id
= 1;
2844 cpuc
->excl_cntrs
->core_id
= core_id
;
2845 cpuc
->excl_cntrs
->refcnt
++;
2849 static void free_excl_cntrs(int cpu
)
2851 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
2852 struct intel_excl_cntrs
*c
;
2854 c
= cpuc
->excl_cntrs
;
2856 if (c
->core_id
== -1 || --c
->refcnt
== 0)
2858 cpuc
->excl_cntrs
= NULL
;
2859 kfree(cpuc
->constraint_list
);
2860 cpuc
->constraint_list
= NULL
;
2864 static void intel_pmu_cpu_dying(int cpu
)
2866 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
2867 struct intel_shared_regs
*pc
;
2869 pc
= cpuc
->shared_regs
;
2871 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
2873 cpuc
->shared_regs
= NULL
;
2876 free_excl_cntrs(cpu
);
2878 fini_debug_store_on_cpu(cpu
);
2881 static void intel_pmu_sched_task(struct perf_event_context
*ctx
,
2884 if (x86_pmu
.pebs_active
)
2885 intel_pmu_pebs_sched_task(ctx
, sched_in
);
2887 intel_pmu_lbr_sched_task(ctx
, sched_in
);
2890 PMU_FORMAT_ATTR(offcore_rsp
, "config1:0-63");
2892 PMU_FORMAT_ATTR(ldlat
, "config1:0-15");
2894 static struct attribute
*intel_arch3_formats_attr
[] = {
2895 &format_attr_event
.attr
,
2896 &format_attr_umask
.attr
,
2897 &format_attr_edge
.attr
,
2898 &format_attr_pc
.attr
,
2899 &format_attr_any
.attr
,
2900 &format_attr_inv
.attr
,
2901 &format_attr_cmask
.attr
,
2902 &format_attr_in_tx
.attr
,
2903 &format_attr_in_tx_cp
.attr
,
2905 &format_attr_offcore_rsp
.attr
, /* XXX do NHM/WSM + SNB breakout */
2906 &format_attr_ldlat
.attr
, /* PEBS load latency */
2910 static __initconst
const struct x86_pmu core_pmu
= {
2912 .handle_irq
= x86_pmu_handle_irq
,
2913 .disable_all
= x86_pmu_disable_all
,
2914 .enable_all
= core_pmu_enable_all
,
2915 .enable
= core_pmu_enable_event
,
2916 .disable
= x86_pmu_disable_event
,
2917 .hw_config
= x86_pmu_hw_config
,
2918 .schedule_events
= x86_schedule_events
,
2919 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
2920 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
2921 .event_map
= intel_pmu_event_map
,
2922 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
2924 .free_running_flags
= PEBS_FREERUNNING_FLAGS
,
2927 * Intel PMCs cannot be accessed sanely above 32-bit width,
2928 * so we install an artificial 1<<31 period regardless of
2929 * the generic event period:
2931 .max_period
= (1ULL<<31) - 1,
2932 .get_event_constraints
= intel_get_event_constraints
,
2933 .put_event_constraints
= intel_put_event_constraints
,
2934 .event_constraints
= intel_core_event_constraints
,
2935 .guest_get_msrs
= core_guest_get_msrs
,
2936 .format_attrs
= intel_arch_formats_attr
,
2937 .events_sysfs_show
= intel_event_sysfs_show
,
2940 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
2941 * together with PMU version 1 and thus be using core_pmu with
2942 * shared_regs. We need following callbacks here to allocate
2945 .cpu_prepare
= intel_pmu_cpu_prepare
,
2946 .cpu_starting
= intel_pmu_cpu_starting
,
2947 .cpu_dying
= intel_pmu_cpu_dying
,
2950 static __initconst
const struct x86_pmu intel_pmu
= {
2952 .handle_irq
= intel_pmu_handle_irq
,
2953 .disable_all
= intel_pmu_disable_all
,
2954 .enable_all
= intel_pmu_enable_all
,
2955 .enable
= intel_pmu_enable_event
,
2956 .disable
= intel_pmu_disable_event
,
2957 .hw_config
= intel_pmu_hw_config
,
2958 .schedule_events
= x86_schedule_events
,
2959 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
2960 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
2961 .event_map
= intel_pmu_event_map
,
2962 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
2964 .free_running_flags
= PEBS_FREERUNNING_FLAGS
,
2966 * Intel PMCs cannot be accessed sanely above 32 bit width,
2967 * so we install an artificial 1<<31 period regardless of
2968 * the generic event period:
2970 .max_period
= (1ULL << 31) - 1,
2971 .get_event_constraints
= intel_get_event_constraints
,
2972 .put_event_constraints
= intel_put_event_constraints
,
2973 .pebs_aliases
= intel_pebs_aliases_core2
,
2975 .format_attrs
= intel_arch3_formats_attr
,
2976 .events_sysfs_show
= intel_event_sysfs_show
,
2978 .cpu_prepare
= intel_pmu_cpu_prepare
,
2979 .cpu_starting
= intel_pmu_cpu_starting
,
2980 .cpu_dying
= intel_pmu_cpu_dying
,
2981 .guest_get_msrs
= intel_guest_get_msrs
,
2982 .sched_task
= intel_pmu_sched_task
,
2985 static __init
void intel_clovertown_quirk(void)
2988 * PEBS is unreliable due to:
2990 * AJ67 - PEBS may experience CPL leaks
2991 * AJ68 - PEBS PMI may be delayed by one event
2992 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
2993 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
2995 * AJ67 could be worked around by restricting the OS/USR flags.
2996 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
2998 * AJ106 could possibly be worked around by not allowing LBR
2999 * usage from PEBS, including the fixup.
3000 * AJ68 could possibly be worked around by always programming
3001 * a pebs_event_reset[0] value and coping with the lost events.
3003 * But taken together it might just make sense to not enable PEBS on
3006 pr_warn("PEBS disabled due to CPU errata\n");
3008 x86_pmu
.pebs_constraints
= NULL
;
3011 static int intel_snb_pebs_broken(int cpu
)
3013 u32 rev
= UINT_MAX
; /* default to broken for unknown models */
3015 switch (cpu_data(cpu
).x86_model
) {
3020 case 45: /* SNB-EP */
3021 switch (cpu_data(cpu
).x86_mask
) {
3022 case 6: rev
= 0x618; break;
3023 case 7: rev
= 0x70c; break;
3027 return (cpu_data(cpu
).microcode
< rev
);
3030 static void intel_snb_check_microcode(void)
3032 int pebs_broken
= 0;
3036 for_each_online_cpu(cpu
) {
3037 if ((pebs_broken
= intel_snb_pebs_broken(cpu
)))
3042 if (pebs_broken
== x86_pmu
.pebs_broken
)
3046 * Serialized by the microcode lock..
3048 if (x86_pmu
.pebs_broken
) {
3049 pr_info("PEBS enabled due to microcode update\n");
3050 x86_pmu
.pebs_broken
= 0;
3052 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
3053 x86_pmu
.pebs_broken
= 1;
3058 * Under certain circumstances, access certain MSR may cause #GP.
3059 * The function tests if the input MSR can be safely accessed.
3061 static bool check_msr(unsigned long msr
, u64 mask
)
3063 u64 val_old
, val_new
, val_tmp
;
3066 * Read the current value, change it and read it back to see if it
3067 * matches, this is needed to detect certain hardware emulators
3068 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
3070 if (rdmsrl_safe(msr
, &val_old
))
3074 * Only change the bits which can be updated by wrmsrl.
3076 val_tmp
= val_old
^ mask
;
3077 if (wrmsrl_safe(msr
, val_tmp
) ||
3078 rdmsrl_safe(msr
, &val_new
))
3081 if (val_new
!= val_tmp
)
3084 /* Here it's sure that the MSR can be safely accessed.
3085 * Restore the old value and return.
3087 wrmsrl(msr
, val_old
);
3092 static __init
void intel_sandybridge_quirk(void)
3094 x86_pmu
.check_microcode
= intel_snb_check_microcode
;
3095 intel_snb_check_microcode();
3098 static const struct { int id
; char *name
; } intel_arch_events_map
[] __initconst
= {
3099 { PERF_COUNT_HW_CPU_CYCLES
, "cpu cycles" },
3100 { PERF_COUNT_HW_INSTRUCTIONS
, "instructions" },
3101 { PERF_COUNT_HW_BUS_CYCLES
, "bus cycles" },
3102 { PERF_COUNT_HW_CACHE_REFERENCES
, "cache references" },
3103 { PERF_COUNT_HW_CACHE_MISSES
, "cache misses" },
3104 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS
, "branch instructions" },
3105 { PERF_COUNT_HW_BRANCH_MISSES
, "branch misses" },
3108 static __init
void intel_arch_events_quirk(void)
3112 /* disable event that reported as not presend by cpuid */
3113 for_each_set_bit(bit
, x86_pmu
.events_mask
, ARRAY_SIZE(intel_arch_events_map
)) {
3114 intel_perfmon_event_map
[intel_arch_events_map
[bit
].id
] = 0;
3115 pr_warn("CPUID marked event: \'%s\' unavailable\n",
3116 intel_arch_events_map
[bit
].name
);
3120 static __init
void intel_nehalem_quirk(void)
3122 union cpuid10_ebx ebx
;
3124 ebx
.full
= x86_pmu
.events_maskl
;
3125 if (ebx
.split
.no_branch_misses_retired
) {
3127 * Erratum AAJ80 detected, we work it around by using
3128 * the BR_MISP_EXEC.ANY event. This will over-count
3129 * branch-misses, but it's still much better than the
3130 * architectural event which is often completely bogus:
3132 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
3133 ebx
.split
.no_branch_misses_retired
= 0;
3134 x86_pmu
.events_maskl
= ebx
.full
;
3135 pr_info("CPU erratum AAJ80 worked around\n");
3140 * enable software workaround for errata:
3145 * Only needed when HT is enabled. However detecting
3146 * if HT is enabled is difficult (model specific). So instead,
3147 * we enable the workaround in the early boot, and verify if
3148 * it is needed in a later initcall phase once we have valid
3149 * topology information to check if HT is actually enabled
3151 static __init
void intel_ht_bug(void)
3153 x86_pmu
.flags
|= PMU_FL_EXCL_CNTRS
| PMU_FL_EXCL_ENABLED
;
3155 x86_pmu
.start_scheduling
= intel_start_scheduling
;
3156 x86_pmu
.commit_scheduling
= intel_commit_scheduling
;
3157 x86_pmu
.stop_scheduling
= intel_stop_scheduling
;
3160 EVENT_ATTR_STR(mem
-loads
, mem_ld_hsw
, "event=0xcd,umask=0x1,ldlat=3");
3161 EVENT_ATTR_STR(mem
-stores
, mem_st_hsw
, "event=0xd0,umask=0x82")
3163 /* Haswell special events */
3164 EVENT_ATTR_STR(tx
-start
, tx_start
, "event=0xc9,umask=0x1");
3165 EVENT_ATTR_STR(tx
-commit
, tx_commit
, "event=0xc9,umask=0x2");
3166 EVENT_ATTR_STR(tx
-abort
, tx_abort
, "event=0xc9,umask=0x4");
3167 EVENT_ATTR_STR(tx
-capacity
, tx_capacity
, "event=0x54,umask=0x2");
3168 EVENT_ATTR_STR(tx
-conflict
, tx_conflict
, "event=0x54,umask=0x1");
3169 EVENT_ATTR_STR(el
-start
, el_start
, "event=0xc8,umask=0x1");
3170 EVENT_ATTR_STR(el
-commit
, el_commit
, "event=0xc8,umask=0x2");
3171 EVENT_ATTR_STR(el
-abort
, el_abort
, "event=0xc8,umask=0x4");
3172 EVENT_ATTR_STR(el
-capacity
, el_capacity
, "event=0x54,umask=0x2");
3173 EVENT_ATTR_STR(el
-conflict
, el_conflict
, "event=0x54,umask=0x1");
3174 EVENT_ATTR_STR(cycles
-t
, cycles_t
, "event=0x3c,in_tx=1");
3175 EVENT_ATTR_STR(cycles
-ct
, cycles_ct
, "event=0x3c,in_tx=1,in_tx_cp=1");
3177 static struct attribute
*hsw_events_attrs
[] = {
3178 EVENT_PTR(tx_start
),
3179 EVENT_PTR(tx_commit
),
3180 EVENT_PTR(tx_abort
),
3181 EVENT_PTR(tx_capacity
),
3182 EVENT_PTR(tx_conflict
),
3183 EVENT_PTR(el_start
),
3184 EVENT_PTR(el_commit
),
3185 EVENT_PTR(el_abort
),
3186 EVENT_PTR(el_capacity
),
3187 EVENT_PTR(el_conflict
),
3188 EVENT_PTR(cycles_t
),
3189 EVENT_PTR(cycles_ct
),
3190 EVENT_PTR(mem_ld_hsw
),
3191 EVENT_PTR(mem_st_hsw
),
3195 __init
int intel_pmu_init(void)
3197 union cpuid10_edx edx
;
3198 union cpuid10_eax eax
;
3199 union cpuid10_ebx ebx
;
3200 struct event_constraint
*c
;
3201 unsigned int unused
;
3202 struct extra_reg
*er
;
3205 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
3206 switch (boot_cpu_data
.x86
) {
3208 return p6_pmu_init();
3210 return knc_pmu_init();
3212 return p4_pmu_init();
3218 * Check whether the Architectural PerfMon supports
3219 * Branch Misses Retired hw_event or not.
3221 cpuid(10, &eax
.full
, &ebx
.full
, &unused
, &edx
.full
);
3222 if (eax
.split
.mask_length
< ARCH_PERFMON_EVENTS_COUNT
)
3225 version
= eax
.split
.version_id
;
3229 x86_pmu
= intel_pmu
;
3231 x86_pmu
.version
= version
;
3232 x86_pmu
.num_counters
= eax
.split
.num_counters
;
3233 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
3234 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
3236 x86_pmu
.events_maskl
= ebx
.full
;
3237 x86_pmu
.events_mask_len
= eax
.split
.mask_length
;
3239 x86_pmu
.max_pebs_events
= min_t(unsigned, MAX_PEBS_EVENTS
, x86_pmu
.num_counters
);
3242 * Quirk: v2 perfmon does not report fixed-purpose events, so
3243 * assume at least 3 events:
3246 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
3248 if (boot_cpu_has(X86_FEATURE_PDCM
)) {
3251 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
3252 x86_pmu
.intel_cap
.capabilities
= capabilities
;
3257 x86_add_quirk(intel_arch_events_quirk
); /* Install first, so it runs last */
3260 * Install the hw-cache-events table:
3262 switch (boot_cpu_data
.x86_model
) {
3263 case 14: /* 65nm Core "Yonah" */
3264 pr_cont("Core events, ");
3267 case 15: /* 65nm Core2 "Merom" */
3268 x86_add_quirk(intel_clovertown_quirk
);
3269 case 22: /* 65nm Core2 "Merom-L" */
3270 case 23: /* 45nm Core2 "Penryn" */
3271 case 29: /* 45nm Core2 "Dunnington (MP) */
3272 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
3273 sizeof(hw_cache_event_ids
));
3275 intel_pmu_lbr_init_core();
3277 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
3278 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
3279 pr_cont("Core2 events, ");
3282 case 30: /* 45nm Nehalem */
3283 case 26: /* 45nm Nehalem-EP */
3284 case 46: /* 45nm Nehalem-EX */
3285 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
3286 sizeof(hw_cache_event_ids
));
3287 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
3288 sizeof(hw_cache_extra_regs
));
3290 intel_pmu_lbr_init_nhm();
3292 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
3293 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
3294 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
3295 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
3297 x86_pmu
.cpu_events
= nhm_events_attrs
;
3299 /* UOPS_ISSUED.STALLED_CYCLES */
3300 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
3301 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
3302 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
3303 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
3304 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
3306 x86_add_quirk(intel_nehalem_quirk
);
3308 pr_cont("Nehalem events, ");
3311 case 28: /* 45nm Atom "Pineview" */
3312 case 38: /* 45nm Atom "Lincroft" */
3313 case 39: /* 32nm Atom "Penwell" */
3314 case 53: /* 32nm Atom "Cloverview" */
3315 case 54: /* 32nm Atom "Cedarview" */
3316 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
3317 sizeof(hw_cache_event_ids
));
3319 intel_pmu_lbr_init_atom();
3321 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
3322 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
3323 pr_cont("Atom events, ");
3326 case 55: /* 22nm Atom "Silvermont" */
3327 case 76: /* 14nm Atom "Airmont" */
3328 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
3329 memcpy(hw_cache_event_ids
, slm_hw_cache_event_ids
,
3330 sizeof(hw_cache_event_ids
));
3331 memcpy(hw_cache_extra_regs
, slm_hw_cache_extra_regs
,
3332 sizeof(hw_cache_extra_regs
));
3334 intel_pmu_lbr_init_atom();
3336 x86_pmu
.event_constraints
= intel_slm_event_constraints
;
3337 x86_pmu
.pebs_constraints
= intel_slm_pebs_event_constraints
;
3338 x86_pmu
.extra_regs
= intel_slm_extra_regs
;
3339 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
3340 pr_cont("Silvermont events, ");
3343 case 37: /* 32nm Westmere */
3344 case 44: /* 32nm Westmere-EP */
3345 case 47: /* 32nm Westmere-EX */
3346 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
3347 sizeof(hw_cache_event_ids
));
3348 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
3349 sizeof(hw_cache_extra_regs
));
3351 intel_pmu_lbr_init_nhm();
3353 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
3354 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
3355 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
3356 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
3357 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
3359 x86_pmu
.cpu_events
= nhm_events_attrs
;
3361 /* UOPS_ISSUED.STALLED_CYCLES */
3362 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
3363 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
3364 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
3365 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
3366 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
3368 pr_cont("Westmere events, ");
3371 case 42: /* 32nm SandyBridge */
3372 case 45: /* 32nm SandyBridge-E/EN/EP */
3373 x86_add_quirk(intel_sandybridge_quirk
);
3374 x86_add_quirk(intel_ht_bug
);
3375 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
3376 sizeof(hw_cache_event_ids
));
3377 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
3378 sizeof(hw_cache_extra_regs
));
3380 intel_pmu_lbr_init_snb();
3382 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
3383 x86_pmu
.pebs_constraints
= intel_snb_pebs_event_constraints
;
3384 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
3385 if (boot_cpu_data
.x86_model
== 45)
3386 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
3388 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
3391 /* all extra regs are per-cpu when HT is on */
3392 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
3393 x86_pmu
.flags
|= PMU_FL_NO_HT_SHARING
;
3395 x86_pmu
.cpu_events
= snb_events_attrs
;
3397 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
3398 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
3399 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
3400 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
3401 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
3402 X86_CONFIG(.event
=0xb1, .umask
=0x01, .inv
=1, .cmask
=1);
3404 pr_cont("SandyBridge events, ");
3407 case 58: /* 22nm IvyBridge */
3408 case 62: /* 22nm IvyBridge-EP/EX */
3409 x86_add_quirk(intel_ht_bug
);
3410 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
3411 sizeof(hw_cache_event_ids
));
3412 /* dTLB-load-misses on IVB is different than SNB */
3413 hw_cache_event_ids
[C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
3415 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
3416 sizeof(hw_cache_extra_regs
));
3418 intel_pmu_lbr_init_snb();
3420 x86_pmu
.event_constraints
= intel_ivb_event_constraints
;
3421 x86_pmu
.pebs_constraints
= intel_ivb_pebs_event_constraints
;
3422 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
3423 if (boot_cpu_data
.x86_model
== 62)
3424 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
3426 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
3427 /* all extra regs are per-cpu when HT is on */
3428 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
3429 x86_pmu
.flags
|= PMU_FL_NO_HT_SHARING
;
3431 x86_pmu
.cpu_events
= snb_events_attrs
;
3433 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
3434 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
3435 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
3437 pr_cont("IvyBridge events, ");
3441 case 60: /* 22nm Haswell Core */
3442 case 63: /* 22nm Haswell Server */
3443 case 69: /* 22nm Haswell ULT */
3444 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
3445 x86_add_quirk(intel_ht_bug
);
3446 x86_pmu
.late_ack
= true;
3447 memcpy(hw_cache_event_ids
, hsw_hw_cache_event_ids
, sizeof(hw_cache_event_ids
));
3448 memcpy(hw_cache_extra_regs
, hsw_hw_cache_extra_regs
, sizeof(hw_cache_extra_regs
));
3450 intel_pmu_lbr_init_hsw();
3452 x86_pmu
.event_constraints
= intel_hsw_event_constraints
;
3453 x86_pmu
.pebs_constraints
= intel_hsw_pebs_event_constraints
;
3454 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
3455 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
3456 /* all extra regs are per-cpu when HT is on */
3457 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
3458 x86_pmu
.flags
|= PMU_FL_NO_HT_SHARING
;
3460 x86_pmu
.hw_config
= hsw_hw_config
;
3461 x86_pmu
.get_event_constraints
= hsw_get_event_constraints
;
3462 x86_pmu
.cpu_events
= hsw_events_attrs
;
3463 x86_pmu
.lbr_double_abort
= true;
3464 pr_cont("Haswell events, ");
3467 case 61: /* 14nm Broadwell Core-M */
3468 case 86: /* 14nm Broadwell Xeon D */
3469 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
3470 case 79: /* 14nm Broadwell Server */
3471 x86_pmu
.late_ack
= true;
3472 memcpy(hw_cache_event_ids
, hsw_hw_cache_event_ids
, sizeof(hw_cache_event_ids
));
3473 memcpy(hw_cache_extra_regs
, hsw_hw_cache_extra_regs
, sizeof(hw_cache_extra_regs
));
3475 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
3476 hw_cache_extra_regs
[C(LL
)][C(OP_READ
)][C(RESULT_MISS
)] = HSW_DEMAND_READ
|
3477 BDW_L3_MISS
|HSW_SNOOP_DRAM
;
3478 hw_cache_extra_regs
[C(LL
)][C(OP_WRITE
)][C(RESULT_MISS
)] = HSW_DEMAND_WRITE
|BDW_L3_MISS
|
3480 hw_cache_extra_regs
[C(NODE
)][C(OP_READ
)][C(RESULT_ACCESS
)] = HSW_DEMAND_READ
|
3481 BDW_L3_MISS_LOCAL
|HSW_SNOOP_DRAM
;
3482 hw_cache_extra_regs
[C(NODE
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = HSW_DEMAND_WRITE
|
3483 BDW_L3_MISS_LOCAL
|HSW_SNOOP_DRAM
;
3485 intel_pmu_lbr_init_hsw();
3487 x86_pmu
.event_constraints
= intel_bdw_event_constraints
;
3488 x86_pmu
.pebs_constraints
= intel_hsw_pebs_event_constraints
;
3489 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
3490 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
3491 /* all extra regs are per-cpu when HT is on */
3492 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
3493 x86_pmu
.flags
|= PMU_FL_NO_HT_SHARING
;
3495 x86_pmu
.hw_config
= hsw_hw_config
;
3496 x86_pmu
.get_event_constraints
= hsw_get_event_constraints
;
3497 x86_pmu
.cpu_events
= hsw_events_attrs
;
3498 x86_pmu
.limit_period
= bdw_limit_period
;
3499 pr_cont("Broadwell events, ");
3502 case 78: /* 14nm Skylake Mobile */
3503 case 94: /* 14nm Skylake Desktop */
3504 x86_pmu
.late_ack
= true;
3505 memcpy(hw_cache_event_ids
, skl_hw_cache_event_ids
, sizeof(hw_cache_event_ids
));
3506 memcpy(hw_cache_extra_regs
, skl_hw_cache_extra_regs
, sizeof(hw_cache_extra_regs
));
3507 intel_pmu_lbr_init_skl();
3509 x86_pmu
.event_constraints
= intel_skl_event_constraints
;
3510 x86_pmu
.pebs_constraints
= intel_skl_pebs_event_constraints
;
3511 x86_pmu
.extra_regs
= intel_skl_extra_regs
;
3512 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
3513 /* all extra regs are per-cpu when HT is on */
3514 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
3515 x86_pmu
.flags
|= PMU_FL_NO_HT_SHARING
;
3517 x86_pmu
.hw_config
= hsw_hw_config
;
3518 x86_pmu
.get_event_constraints
= hsw_get_event_constraints
;
3519 x86_pmu
.cpu_events
= hsw_events_attrs
;
3520 WARN_ON(!x86_pmu
.format_attrs
);
3521 x86_pmu
.cpu_events
= hsw_events_attrs
;
3522 pr_cont("Skylake events, ");
3526 switch (x86_pmu
.version
) {
3528 x86_pmu
.event_constraints
= intel_v1_event_constraints
;
3529 pr_cont("generic architected perfmon v1, ");
3533 * default constraints for v2 and up
3535 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
3536 pr_cont("generic architected perfmon, ");
3541 if (x86_pmu
.num_counters
> INTEL_PMC_MAX_GENERIC
) {
3542 WARN(1, KERN_ERR
"hw perf events %d > max(%d), clipping!",
3543 x86_pmu
.num_counters
, INTEL_PMC_MAX_GENERIC
);
3544 x86_pmu
.num_counters
= INTEL_PMC_MAX_GENERIC
;
3546 x86_pmu
.intel_ctrl
= (1 << x86_pmu
.num_counters
) - 1;
3548 if (x86_pmu
.num_counters_fixed
> INTEL_PMC_MAX_FIXED
) {
3549 WARN(1, KERN_ERR
"hw perf events fixed %d > max(%d), clipping!",
3550 x86_pmu
.num_counters_fixed
, INTEL_PMC_MAX_FIXED
);
3551 x86_pmu
.num_counters_fixed
= INTEL_PMC_MAX_FIXED
;
3554 x86_pmu
.intel_ctrl
|=
3555 ((1LL << x86_pmu
.num_counters_fixed
)-1) << INTEL_PMC_IDX_FIXED
;
3557 if (x86_pmu
.event_constraints
) {
3559 * event on fixed counter2 (REF_CYCLES) only works on this
3560 * counter, so do not extend mask to generic counters
3562 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
3563 if (c
->cmask
== FIXED_EVENT_FLAGS
3564 && c
->idxmsk64
!= INTEL_PMC_MSK_FIXED_REF_CYCLES
) {
3565 c
->idxmsk64
|= (1ULL << x86_pmu
.num_counters
) - 1;
3568 ~(~0UL << (INTEL_PMC_IDX_FIXED
+ x86_pmu
.num_counters_fixed
));
3569 c
->weight
= hweight64(c
->idxmsk64
);
3574 * Access LBR MSR may cause #GP under certain circumstances.
3575 * E.g. KVM doesn't support LBR MSR
3576 * Check all LBT MSR here.
3577 * Disable LBR access if any LBR MSRs can not be accessed.
3579 if (x86_pmu
.lbr_nr
&& !check_msr(x86_pmu
.lbr_tos
, 0x3UL
))
3581 for (i
= 0; i
< x86_pmu
.lbr_nr
; i
++) {
3582 if (!(check_msr(x86_pmu
.lbr_from
+ i
, 0xffffUL
) &&
3583 check_msr(x86_pmu
.lbr_to
+ i
, 0xffffUL
)))
3588 * Access extra MSR may cause #GP under certain circumstances.
3589 * E.g. KVM doesn't support offcore event
3590 * Check all extra_regs here.
3592 if (x86_pmu
.extra_regs
) {
3593 for (er
= x86_pmu
.extra_regs
; er
->msr
; er
++) {
3594 er
->extra_msr_access
= check_msr(er
->msr
, 0x11UL
);
3595 /* Disable LBR select mapping */
3596 if ((er
->idx
== EXTRA_REG_LBR
) && !er
->extra_msr_access
)
3597 x86_pmu
.lbr_sel_map
= NULL
;
3601 /* Support full width counters using alternative MSR range */
3602 if (x86_pmu
.intel_cap
.full_width_write
) {
3603 x86_pmu
.max_period
= x86_pmu
.cntval_mask
;
3604 x86_pmu
.perfctr
= MSR_IA32_PMC0
;
3605 pr_cont("full-width counters, ");
3612 * HT bug: phase 2 init
3613 * Called once we have valid topology information to check
3614 * whether or not HT is enabled
3615 * If HT is off, then we disable the workaround
3617 static __init
int fixup_ht_bug(void)
3619 int cpu
= smp_processor_id();
3622 * problem not present on this CPU model, nothing to do
3624 if (!(x86_pmu
.flags
& PMU_FL_EXCL_ENABLED
))
3627 w
= cpumask_weight(topology_sibling_cpumask(cpu
));
3629 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
3633 watchdog_nmi_disable_all();
3635 x86_pmu
.flags
&= ~(PMU_FL_EXCL_CNTRS
| PMU_FL_EXCL_ENABLED
);
3637 x86_pmu
.start_scheduling
= NULL
;
3638 x86_pmu
.commit_scheduling
= NULL
;
3639 x86_pmu
.stop_scheduling
= NULL
;
3641 watchdog_nmi_enable_all();
3645 for_each_online_cpu(c
) {
3650 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
3653 subsys_initcall(fixup_ht_bug
)