perf/x86/intel: Add new cache events table for Haswell
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_event_intel.c
CommitLineData
a7e3ed1e 1/*
efc9f05d
SE
2 * Per core/cpu state
3 *
4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
a7e3ed1e 6 */
de0428a7 7
c767a54b
JP
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
de0428a7
KW
10#include <linux/stddef.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/slab.h>
69c60c88 14#include <linux/export.h>
de0428a7 15
3a632cb2 16#include <asm/cpufeature.h>
de0428a7
KW
17#include <asm/hardirq.h>
18#include <asm/apic.h>
19
20#include "perf_event.h"
a7e3ed1e 21
f22f54f4 22/*
b622d644 23 * Intel PerfMon, used on Core and later.
f22f54f4 24 */
ec75a716 25static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
f22f54f4 26{
c3b7cdf1
PE
27 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
28 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
29 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
30 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
31 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
32 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
33 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
34 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
f22f54f4
PZ
35};
36
5c543e3c 37static struct event_constraint intel_core_event_constraints[] __read_mostly =
f22f54f4
PZ
38{
39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
44 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
45 EVENT_CONSTRAINT_END
46};
47
5c543e3c 48static struct event_constraint intel_core2_event_constraints[] __read_mostly =
f22f54f4 49{
b622d644
PZ
50 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
51 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 52 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
53 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
54 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
55 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
56 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
57 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
58 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
59 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
60 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
b622d644 61 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
f22f54f4
PZ
62 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
63 EVENT_CONSTRAINT_END
64};
65
5c543e3c 66static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
f22f54f4 67{
b622d644
PZ
68 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
69 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 70 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
71 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
72 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
73 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
74 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
75 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
76 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
77 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
78 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
79 EVENT_CONSTRAINT_END
80};
81
5c543e3c 82static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
a7e3ed1e 83{
53ad0447
YZ
84 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
85 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
f20093ee 86 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
a7e3ed1e
AK
87 EVENT_EXTRA_END
88};
89
5c543e3c 90static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
f22f54f4 91{
b622d644
PZ
92 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
93 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 94 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
95 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
96 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
97 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
d1100770 98 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
f22f54f4
PZ
99 EVENT_CONSTRAINT_END
100};
101
5c543e3c 102static struct event_constraint intel_snb_event_constraints[] __read_mostly =
b06b3d49
LM
103{
104 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
105 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 106 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
fd4a5aef
SE
107 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
108 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
109 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
110 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
b06b3d49 111 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
b06b3d49
LM
112 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
113 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
f8378f52
AK
114 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
115 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
b06b3d49
LM
116 EVENT_CONSTRAINT_END
117};
118
69943182
SE
119static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
120{
121 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
122 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
123 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
124 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
125 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
126 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
6113af14 127 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
69943182
SE
128 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
129 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
130 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
131 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
132 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
133 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
741a698f
PZ
134 /*
135 * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
136 * siblings; disable these events because they can corrupt unrelated
137 * counters.
138 */
139 INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
140 INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
141 INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
142 INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
69943182
SE
143 EVENT_CONSTRAINT_END
144};
145
5c543e3c 146static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
a7e3ed1e 147{
53ad0447
YZ
148 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
149 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
150 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
f20093ee 151 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
a7e3ed1e
AK
152 EVENT_EXTRA_END
153};
154
0af3ac1f
AK
155static struct event_constraint intel_v1_event_constraints[] __read_mostly =
156{
157 EVENT_CONSTRAINT_END
158};
159
5c543e3c 160static struct event_constraint intel_gen_event_constraints[] __read_mostly =
f22f54f4 161{
b622d644
PZ
162 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
163 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 164 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
165 EVENT_CONSTRAINT_END
166};
167
1fa64180
YZ
168static struct event_constraint intel_slm_event_constraints[] __read_mostly =
169{
170 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
171 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
1fa64180
YZ
172 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
173 EVENT_CONSTRAINT_END
174};
175
ee89cbc2 176static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
53ad0447
YZ
177 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
178 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
179 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
f20093ee 180 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
f1923820
SE
181 EVENT_EXTRA_END
182};
183
184static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
53ad0447
YZ
185 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
186 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
187 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
f1a52789 188 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
ee89cbc2
SE
189 EVENT_EXTRA_END
190};
191
7f2ee91f
IM
192EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
193EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
194EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
f20093ee
SE
195
196struct attribute *nhm_events_attrs[] = {
197 EVENT_PTR(mem_ld_nhm),
198 NULL,
199};
200
201struct attribute *snb_events_attrs[] = {
202 EVENT_PTR(mem_ld_snb),
9ad64c0f 203 EVENT_PTR(mem_st_snb),
f20093ee
SE
204 NULL,
205};
206
3a632cb2
AK
207static struct event_constraint intel_hsw_event_constraints[] = {
208 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
209 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
210 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
211 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
212 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
213 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
214 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
215 INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
216 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
217 INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
218 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
219 INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
220 EVENT_CONSTRAINT_END
221};
222
f22f54f4
PZ
223static u64 intel_pmu_event_map(int hw_event)
224{
225 return intel_perfmon_event_map[hw_event];
226}
227
74e6543f
YZ
228#define SNB_DMND_DATA_RD (1ULL << 0)
229#define SNB_DMND_RFO (1ULL << 1)
230#define SNB_DMND_IFETCH (1ULL << 2)
231#define SNB_DMND_WB (1ULL << 3)
232#define SNB_PF_DATA_RD (1ULL << 4)
233#define SNB_PF_RFO (1ULL << 5)
234#define SNB_PF_IFETCH (1ULL << 6)
235#define SNB_LLC_DATA_RD (1ULL << 7)
236#define SNB_LLC_RFO (1ULL << 8)
237#define SNB_LLC_IFETCH (1ULL << 9)
238#define SNB_BUS_LOCKS (1ULL << 10)
239#define SNB_STRM_ST (1ULL << 11)
240#define SNB_OTHER (1ULL << 15)
241#define SNB_RESP_ANY (1ULL << 16)
242#define SNB_NO_SUPP (1ULL << 17)
243#define SNB_LLC_HITM (1ULL << 18)
244#define SNB_LLC_HITE (1ULL << 19)
245#define SNB_LLC_HITS (1ULL << 20)
246#define SNB_LLC_HITF (1ULL << 21)
247#define SNB_LOCAL (1ULL << 22)
248#define SNB_REMOTE (0xffULL << 23)
249#define SNB_SNP_NONE (1ULL << 31)
250#define SNB_SNP_NOT_NEEDED (1ULL << 32)
251#define SNB_SNP_MISS (1ULL << 33)
252#define SNB_NO_FWD (1ULL << 34)
253#define SNB_SNP_FWD (1ULL << 35)
254#define SNB_HITM (1ULL << 36)
255#define SNB_NON_DRAM (1ULL << 37)
256
257#define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
258#define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
259#define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
260
261#define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
262 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
263 SNB_HITM)
264
265#define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
266#define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
267
268#define SNB_L3_ACCESS SNB_RESP_ANY
269#define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
270
271static __initconst const u64 snb_hw_cache_extra_regs
272 [PERF_COUNT_HW_CACHE_MAX]
273 [PERF_COUNT_HW_CACHE_OP_MAX]
274 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
275{
276 [ C(LL ) ] = {
277 [ C(OP_READ) ] = {
278 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
279 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
280 },
281 [ C(OP_WRITE) ] = {
282 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
283 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
284 },
285 [ C(OP_PREFETCH) ] = {
286 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
287 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
288 },
289 },
290 [ C(NODE) ] = {
291 [ C(OP_READ) ] = {
292 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
293 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
294 },
295 [ C(OP_WRITE) ] = {
296 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
297 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
298 },
299 [ C(OP_PREFETCH) ] = {
300 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
301 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
302 },
303 },
304};
305
b06b3d49
LM
306static __initconst const u64 snb_hw_cache_event_ids
307 [PERF_COUNT_HW_CACHE_MAX]
308 [PERF_COUNT_HW_CACHE_OP_MAX]
309 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
310{
311 [ C(L1D) ] = {
312 [ C(OP_READ) ] = {
313 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
314 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
315 },
316 [ C(OP_WRITE) ] = {
317 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
318 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
319 },
320 [ C(OP_PREFETCH) ] = {
321 [ C(RESULT_ACCESS) ] = 0x0,
322 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
323 },
324 },
325 [ C(L1I ) ] = {
326 [ C(OP_READ) ] = {
327 [ C(RESULT_ACCESS) ] = 0x0,
328 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
329 },
330 [ C(OP_WRITE) ] = {
331 [ C(RESULT_ACCESS) ] = -1,
332 [ C(RESULT_MISS) ] = -1,
333 },
334 [ C(OP_PREFETCH) ] = {
335 [ C(RESULT_ACCESS) ] = 0x0,
336 [ C(RESULT_MISS) ] = 0x0,
337 },
338 },
339 [ C(LL ) ] = {
b06b3d49 340 [ C(OP_READ) ] = {
63b6a675 341 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
b06b3d49 342 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
343 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
344 [ C(RESULT_MISS) ] = 0x01b7,
b06b3d49
LM
345 },
346 [ C(OP_WRITE) ] = {
63b6a675 347 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
b06b3d49 348 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
349 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
350 [ C(RESULT_MISS) ] = 0x01b7,
b06b3d49
LM
351 },
352 [ C(OP_PREFETCH) ] = {
63b6a675 353 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
b06b3d49 354 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
355 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
356 [ C(RESULT_MISS) ] = 0x01b7,
b06b3d49
LM
357 },
358 },
359 [ C(DTLB) ] = {
360 [ C(OP_READ) ] = {
361 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
362 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
363 },
364 [ C(OP_WRITE) ] = {
365 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
366 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
367 },
368 [ C(OP_PREFETCH) ] = {
369 [ C(RESULT_ACCESS) ] = 0x0,
370 [ C(RESULT_MISS) ] = 0x0,
371 },
372 },
373 [ C(ITLB) ] = {
374 [ C(OP_READ) ] = {
375 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
376 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
377 },
378 [ C(OP_WRITE) ] = {
379 [ C(RESULT_ACCESS) ] = -1,
380 [ C(RESULT_MISS) ] = -1,
381 },
382 [ C(OP_PREFETCH) ] = {
383 [ C(RESULT_ACCESS) ] = -1,
384 [ C(RESULT_MISS) ] = -1,
385 },
386 },
387 [ C(BPU ) ] = {
388 [ C(OP_READ) ] = {
389 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
390 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
391 },
392 [ C(OP_WRITE) ] = {
393 [ C(RESULT_ACCESS) ] = -1,
394 [ C(RESULT_MISS) ] = -1,
395 },
396 [ C(OP_PREFETCH) ] = {
397 [ C(RESULT_ACCESS) ] = -1,
398 [ C(RESULT_MISS) ] = -1,
399 },
400 },
89d6c0b5
PZ
401 [ C(NODE) ] = {
402 [ C(OP_READ) ] = {
74e6543f
YZ
403 [ C(RESULT_ACCESS) ] = 0x01b7,
404 [ C(RESULT_MISS) ] = 0x01b7,
89d6c0b5
PZ
405 },
406 [ C(OP_WRITE) ] = {
74e6543f
YZ
407 [ C(RESULT_ACCESS) ] = 0x01b7,
408 [ C(RESULT_MISS) ] = 0x01b7,
89d6c0b5
PZ
409 },
410 [ C(OP_PREFETCH) ] = {
74e6543f
YZ
411 [ C(RESULT_ACCESS) ] = 0x01b7,
412 [ C(RESULT_MISS) ] = 0x01b7,
89d6c0b5
PZ
413 },
414 },
415
b06b3d49
LM
416};
417
0f1b5ca2
AK
418/*
419 * Notes on the events:
420 * - data reads do not include code reads (comparable to earlier tables)
421 * - data counts include speculative execution (except L1 write, dtlb, bpu)
422 * - remote node access includes remote memory, remote cache, remote mmio.
423 * - prefetches are not included in the counts because they are not
424 * reliably counted.
425 */
426
427#define HSW_DEMAND_DATA_RD BIT_ULL(0)
428#define HSW_DEMAND_RFO BIT_ULL(1)
429#define HSW_ANY_RESPONSE BIT_ULL(16)
430#define HSW_SUPPLIER_NONE BIT_ULL(17)
431#define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
432#define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
433#define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
434#define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
435#define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
436 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
437 HSW_L3_MISS_REMOTE_HOP2P)
438#define HSW_SNOOP_NONE BIT_ULL(31)
439#define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
440#define HSW_SNOOP_MISS BIT_ULL(33)
441#define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
442#define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
443#define HSW_SNOOP_HITM BIT_ULL(36)
444#define HSW_SNOOP_NON_DRAM BIT_ULL(37)
445#define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
446 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
447 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
448 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
449#define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
450#define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
451#define HSW_DEMAND_WRITE HSW_DEMAND_RFO
452#define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
453 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
454#define HSW_LLC_ACCESS HSW_ANY_RESPONSE
455
456static __initconst const u64 hsw_hw_cache_event_ids
457 [PERF_COUNT_HW_CACHE_MAX]
458 [PERF_COUNT_HW_CACHE_OP_MAX]
459 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
460{
461 [ C(L1D ) ] = {
462 [ C(OP_READ) ] = {
463 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
464 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
465 },
466 [ C(OP_WRITE) ] = {
467 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
468 [ C(RESULT_MISS) ] = 0x0,
469 },
470 [ C(OP_PREFETCH) ] = {
471 [ C(RESULT_ACCESS) ] = 0x0,
472 [ C(RESULT_MISS) ] = 0x0,
473 },
474 },
475 [ C(L1I ) ] = {
476 [ C(OP_READ) ] = {
477 [ C(RESULT_ACCESS) ] = 0x0,
478 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
479 },
480 [ C(OP_WRITE) ] = {
481 [ C(RESULT_ACCESS) ] = -1,
482 [ C(RESULT_MISS) ] = -1,
483 },
484 [ C(OP_PREFETCH) ] = {
485 [ C(RESULT_ACCESS) ] = 0x0,
486 [ C(RESULT_MISS) ] = 0x0,
487 },
488 },
489 [ C(LL ) ] = {
490 [ C(OP_READ) ] = {
491 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
492 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
493 },
494 [ C(OP_WRITE) ] = {
495 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
496 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
497 },
498 [ C(OP_PREFETCH) ] = {
499 [ C(RESULT_ACCESS) ] = 0x0,
500 [ C(RESULT_MISS) ] = 0x0,
501 },
502 },
503 [ C(DTLB) ] = {
504 [ C(OP_READ) ] = {
505 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
506 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
507 },
508 [ C(OP_WRITE) ] = {
509 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
510 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
511 },
512 [ C(OP_PREFETCH) ] = {
513 [ C(RESULT_ACCESS) ] = 0x0,
514 [ C(RESULT_MISS) ] = 0x0,
515 },
516 },
517 [ C(ITLB) ] = {
518 [ C(OP_READ) ] = {
519 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
520 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
521 },
522 [ C(OP_WRITE) ] = {
523 [ C(RESULT_ACCESS) ] = -1,
524 [ C(RESULT_MISS) ] = -1,
525 },
526 [ C(OP_PREFETCH) ] = {
527 [ C(RESULT_ACCESS) ] = -1,
528 [ C(RESULT_MISS) ] = -1,
529 },
530 },
531 [ C(BPU ) ] = {
532 [ C(OP_READ) ] = {
533 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
534 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
535 },
536 [ C(OP_WRITE) ] = {
537 [ C(RESULT_ACCESS) ] = -1,
538 [ C(RESULT_MISS) ] = -1,
539 },
540 [ C(OP_PREFETCH) ] = {
541 [ C(RESULT_ACCESS) ] = -1,
542 [ C(RESULT_MISS) ] = -1,
543 },
544 },
545 [ C(NODE) ] = {
546 [ C(OP_READ) ] = {
547 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
548 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
549 },
550 [ C(OP_WRITE) ] = {
551 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
552 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
553 },
554 [ C(OP_PREFETCH) ] = {
555 [ C(RESULT_ACCESS) ] = 0x0,
556 [ C(RESULT_MISS) ] = 0x0,
557 },
558 },
559};
560
561static __initconst const u64 hsw_hw_cache_extra_regs
562 [PERF_COUNT_HW_CACHE_MAX]
563 [PERF_COUNT_HW_CACHE_OP_MAX]
564 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
565{
566 [ C(LL ) ] = {
567 [ C(OP_READ) ] = {
568 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
569 HSW_LLC_ACCESS,
570 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
571 HSW_L3_MISS|HSW_ANY_SNOOP,
572 },
573 [ C(OP_WRITE) ] = {
574 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
575 HSW_LLC_ACCESS,
576 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
577 HSW_L3_MISS|HSW_ANY_SNOOP,
578 },
579 [ C(OP_PREFETCH) ] = {
580 [ C(RESULT_ACCESS) ] = 0x0,
581 [ C(RESULT_MISS) ] = 0x0,
582 },
583 },
584 [ C(NODE) ] = {
585 [ C(OP_READ) ] = {
586 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
587 HSW_L3_MISS_LOCAL_DRAM|
588 HSW_SNOOP_DRAM,
589 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
590 HSW_L3_MISS_REMOTE|
591 HSW_SNOOP_DRAM,
592 },
593 [ C(OP_WRITE) ] = {
594 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
595 HSW_L3_MISS_LOCAL_DRAM|
596 HSW_SNOOP_DRAM,
597 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
598 HSW_L3_MISS_REMOTE|
599 HSW_SNOOP_DRAM,
600 },
601 [ C(OP_PREFETCH) ] = {
602 [ C(RESULT_ACCESS) ] = 0x0,
603 [ C(RESULT_MISS) ] = 0x0,
604 },
605 },
606};
607
caaa8be3 608static __initconst const u64 westmere_hw_cache_event_ids
f22f54f4
PZ
609 [PERF_COUNT_HW_CACHE_MAX]
610 [PERF_COUNT_HW_CACHE_OP_MAX]
611 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
612{
613 [ C(L1D) ] = {
614 [ C(OP_READ) ] = {
615 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
616 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
617 },
618 [ C(OP_WRITE) ] = {
619 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
620 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
621 },
622 [ C(OP_PREFETCH) ] = {
623 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
624 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
625 },
626 },
627 [ C(L1I ) ] = {
628 [ C(OP_READ) ] = {
629 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
630 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
631 },
632 [ C(OP_WRITE) ] = {
633 [ C(RESULT_ACCESS) ] = -1,
634 [ C(RESULT_MISS) ] = -1,
635 },
636 [ C(OP_PREFETCH) ] = {
637 [ C(RESULT_ACCESS) ] = 0x0,
638 [ C(RESULT_MISS) ] = 0x0,
639 },
640 },
641 [ C(LL ) ] = {
642 [ C(OP_READ) ] = {
63b6a675 643 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
e994d7d2 644 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
645 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
646 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4 647 },
e994d7d2
AK
648 /*
649 * Use RFO, not WRITEBACK, because a write miss would typically occur
650 * on RFO.
651 */
f22f54f4 652 [ C(OP_WRITE) ] = {
63b6a675
PZ
653 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
654 [ C(RESULT_ACCESS) ] = 0x01b7,
655 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
e994d7d2 656 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
657 },
658 [ C(OP_PREFETCH) ] = {
63b6a675 659 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
e994d7d2 660 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
661 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
662 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
663 },
664 },
665 [ C(DTLB) ] = {
666 [ C(OP_READ) ] = {
667 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
668 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
669 },
670 [ C(OP_WRITE) ] = {
671 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
672 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
673 },
674 [ C(OP_PREFETCH) ] = {
675 [ C(RESULT_ACCESS) ] = 0x0,
676 [ C(RESULT_MISS) ] = 0x0,
677 },
678 },
679 [ C(ITLB) ] = {
680 [ C(OP_READ) ] = {
681 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
682 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
683 },
684 [ C(OP_WRITE) ] = {
685 [ C(RESULT_ACCESS) ] = -1,
686 [ C(RESULT_MISS) ] = -1,
687 },
688 [ C(OP_PREFETCH) ] = {
689 [ C(RESULT_ACCESS) ] = -1,
690 [ C(RESULT_MISS) ] = -1,
691 },
692 },
693 [ C(BPU ) ] = {
694 [ C(OP_READ) ] = {
695 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
696 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
697 },
698 [ C(OP_WRITE) ] = {
699 [ C(RESULT_ACCESS) ] = -1,
700 [ C(RESULT_MISS) ] = -1,
701 },
702 [ C(OP_PREFETCH) ] = {
703 [ C(RESULT_ACCESS) ] = -1,
704 [ C(RESULT_MISS) ] = -1,
705 },
706 },
89d6c0b5
PZ
707 [ C(NODE) ] = {
708 [ C(OP_READ) ] = {
709 [ C(RESULT_ACCESS) ] = 0x01b7,
710 [ C(RESULT_MISS) ] = 0x01b7,
711 },
712 [ C(OP_WRITE) ] = {
713 [ C(RESULT_ACCESS) ] = 0x01b7,
714 [ C(RESULT_MISS) ] = 0x01b7,
715 },
716 [ C(OP_PREFETCH) ] = {
717 [ C(RESULT_ACCESS) ] = 0x01b7,
718 [ C(RESULT_MISS) ] = 0x01b7,
719 },
720 },
f22f54f4
PZ
721};
722
e994d7d2 723/*
63b6a675
PZ
724 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
725 * See IA32 SDM Vol 3B 30.6.1.3
e994d7d2
AK
726 */
727
63b6a675
PZ
728#define NHM_DMND_DATA_RD (1 << 0)
729#define NHM_DMND_RFO (1 << 1)
730#define NHM_DMND_IFETCH (1 << 2)
731#define NHM_DMND_WB (1 << 3)
732#define NHM_PF_DATA_RD (1 << 4)
733#define NHM_PF_DATA_RFO (1 << 5)
734#define NHM_PF_IFETCH (1 << 6)
735#define NHM_OFFCORE_OTHER (1 << 7)
736#define NHM_UNCORE_HIT (1 << 8)
737#define NHM_OTHER_CORE_HIT_SNP (1 << 9)
738#define NHM_OTHER_CORE_HITM (1 << 10)
739 /* reserved */
740#define NHM_REMOTE_CACHE_FWD (1 << 12)
741#define NHM_REMOTE_DRAM (1 << 13)
742#define NHM_LOCAL_DRAM (1 << 14)
743#define NHM_NON_DRAM (1 << 15)
744
87e24f4b
PZ
745#define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
746#define NHM_REMOTE (NHM_REMOTE_DRAM)
63b6a675
PZ
747
748#define NHM_DMND_READ (NHM_DMND_DATA_RD)
749#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
750#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
751
752#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
87e24f4b 753#define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
63b6a675 754#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
e994d7d2
AK
755
756static __initconst const u64 nehalem_hw_cache_extra_regs
757 [PERF_COUNT_HW_CACHE_MAX]
758 [PERF_COUNT_HW_CACHE_OP_MAX]
759 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
760{
761 [ C(LL ) ] = {
762 [ C(OP_READ) ] = {
63b6a675
PZ
763 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
764 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
e994d7d2
AK
765 },
766 [ C(OP_WRITE) ] = {
63b6a675
PZ
767 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
768 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
e994d7d2
AK
769 },
770 [ C(OP_PREFETCH) ] = {
63b6a675
PZ
771 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
772 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
e994d7d2 773 },
89d6c0b5
PZ
774 },
775 [ C(NODE) ] = {
776 [ C(OP_READ) ] = {
87e24f4b
PZ
777 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
778 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
89d6c0b5
PZ
779 },
780 [ C(OP_WRITE) ] = {
87e24f4b
PZ
781 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
782 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
89d6c0b5
PZ
783 },
784 [ C(OP_PREFETCH) ] = {
87e24f4b
PZ
785 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
786 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
89d6c0b5
PZ
787 },
788 },
e994d7d2
AK
789};
790
caaa8be3 791static __initconst const u64 nehalem_hw_cache_event_ids
f22f54f4
PZ
792 [PERF_COUNT_HW_CACHE_MAX]
793 [PERF_COUNT_HW_CACHE_OP_MAX]
794 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
795{
796 [ C(L1D) ] = {
797 [ C(OP_READ) ] = {
f4929bd3
PZ
798 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
799 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
f22f54f4
PZ
800 },
801 [ C(OP_WRITE) ] = {
f4929bd3
PZ
802 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
803 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
f22f54f4
PZ
804 },
805 [ C(OP_PREFETCH) ] = {
806 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
807 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
808 },
809 },
810 [ C(L1I ) ] = {
811 [ C(OP_READ) ] = {
812 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
813 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
814 },
815 [ C(OP_WRITE) ] = {
816 [ C(RESULT_ACCESS) ] = -1,
817 [ C(RESULT_MISS) ] = -1,
818 },
819 [ C(OP_PREFETCH) ] = {
820 [ C(RESULT_ACCESS) ] = 0x0,
821 [ C(RESULT_MISS) ] = 0x0,
822 },
823 },
824 [ C(LL ) ] = {
825 [ C(OP_READ) ] = {
e994d7d2
AK
826 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
827 [ C(RESULT_ACCESS) ] = 0x01b7,
828 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
829 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4 830 },
e994d7d2
AK
831 /*
832 * Use RFO, not WRITEBACK, because a write miss would typically occur
833 * on RFO.
834 */
f22f54f4 835 [ C(OP_WRITE) ] = {
e994d7d2
AK
836 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
837 [ C(RESULT_ACCESS) ] = 0x01b7,
838 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
839 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
840 },
841 [ C(OP_PREFETCH) ] = {
e994d7d2
AK
842 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
843 [ C(RESULT_ACCESS) ] = 0x01b7,
844 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
845 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
846 },
847 },
848 [ C(DTLB) ] = {
849 [ C(OP_READ) ] = {
850 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
851 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
852 },
853 [ C(OP_WRITE) ] = {
854 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
855 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
856 },
857 [ C(OP_PREFETCH) ] = {
858 [ C(RESULT_ACCESS) ] = 0x0,
859 [ C(RESULT_MISS) ] = 0x0,
860 },
861 },
862 [ C(ITLB) ] = {
863 [ C(OP_READ) ] = {
864 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
865 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
866 },
867 [ C(OP_WRITE) ] = {
868 [ C(RESULT_ACCESS) ] = -1,
869 [ C(RESULT_MISS) ] = -1,
870 },
871 [ C(OP_PREFETCH) ] = {
872 [ C(RESULT_ACCESS) ] = -1,
873 [ C(RESULT_MISS) ] = -1,
874 },
875 },
876 [ C(BPU ) ] = {
877 [ C(OP_READ) ] = {
878 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
879 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
880 },
881 [ C(OP_WRITE) ] = {
882 [ C(RESULT_ACCESS) ] = -1,
883 [ C(RESULT_MISS) ] = -1,
884 },
885 [ C(OP_PREFETCH) ] = {
886 [ C(RESULT_ACCESS) ] = -1,
887 [ C(RESULT_MISS) ] = -1,
888 },
889 },
89d6c0b5
PZ
890 [ C(NODE) ] = {
891 [ C(OP_READ) ] = {
892 [ C(RESULT_ACCESS) ] = 0x01b7,
893 [ C(RESULT_MISS) ] = 0x01b7,
894 },
895 [ C(OP_WRITE) ] = {
896 [ C(RESULT_ACCESS) ] = 0x01b7,
897 [ C(RESULT_MISS) ] = 0x01b7,
898 },
899 [ C(OP_PREFETCH) ] = {
900 [ C(RESULT_ACCESS) ] = 0x01b7,
901 [ C(RESULT_MISS) ] = 0x01b7,
902 },
903 },
f22f54f4
PZ
904};
905
caaa8be3 906static __initconst const u64 core2_hw_cache_event_ids
f22f54f4
PZ
907 [PERF_COUNT_HW_CACHE_MAX]
908 [PERF_COUNT_HW_CACHE_OP_MAX]
909 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
910{
911 [ C(L1D) ] = {
912 [ C(OP_READ) ] = {
913 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
914 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
915 },
916 [ C(OP_WRITE) ] = {
917 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
918 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
919 },
920 [ C(OP_PREFETCH) ] = {
921 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
922 [ C(RESULT_MISS) ] = 0,
923 },
924 },
925 [ C(L1I ) ] = {
926 [ C(OP_READ) ] = {
927 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
928 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
929 },
930 [ C(OP_WRITE) ] = {
931 [ C(RESULT_ACCESS) ] = -1,
932 [ C(RESULT_MISS) ] = -1,
933 },
934 [ C(OP_PREFETCH) ] = {
935 [ C(RESULT_ACCESS) ] = 0,
936 [ C(RESULT_MISS) ] = 0,
937 },
938 },
939 [ C(LL ) ] = {
940 [ C(OP_READ) ] = {
941 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
942 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
943 },
944 [ C(OP_WRITE) ] = {
945 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
946 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
947 },
948 [ C(OP_PREFETCH) ] = {
949 [ C(RESULT_ACCESS) ] = 0,
950 [ C(RESULT_MISS) ] = 0,
951 },
952 },
953 [ C(DTLB) ] = {
954 [ C(OP_READ) ] = {
955 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
956 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
957 },
958 [ C(OP_WRITE) ] = {
959 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
960 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
961 },
962 [ C(OP_PREFETCH) ] = {
963 [ C(RESULT_ACCESS) ] = 0,
964 [ C(RESULT_MISS) ] = 0,
965 },
966 },
967 [ C(ITLB) ] = {
968 [ C(OP_READ) ] = {
969 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
970 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
971 },
972 [ C(OP_WRITE) ] = {
973 [ C(RESULT_ACCESS) ] = -1,
974 [ C(RESULT_MISS) ] = -1,
975 },
976 [ C(OP_PREFETCH) ] = {
977 [ C(RESULT_ACCESS) ] = -1,
978 [ C(RESULT_MISS) ] = -1,
979 },
980 },
981 [ C(BPU ) ] = {
982 [ C(OP_READ) ] = {
983 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
984 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
985 },
986 [ C(OP_WRITE) ] = {
987 [ C(RESULT_ACCESS) ] = -1,
988 [ C(RESULT_MISS) ] = -1,
989 },
990 [ C(OP_PREFETCH) ] = {
991 [ C(RESULT_ACCESS) ] = -1,
992 [ C(RESULT_MISS) ] = -1,
993 },
994 },
995};
996
caaa8be3 997static __initconst const u64 atom_hw_cache_event_ids
f22f54f4
PZ
998 [PERF_COUNT_HW_CACHE_MAX]
999 [PERF_COUNT_HW_CACHE_OP_MAX]
1000 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1001{
1002 [ C(L1D) ] = {
1003 [ C(OP_READ) ] = {
1004 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1005 [ C(RESULT_MISS) ] = 0,
1006 },
1007 [ C(OP_WRITE) ] = {
1008 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1009 [ C(RESULT_MISS) ] = 0,
1010 },
1011 [ C(OP_PREFETCH) ] = {
1012 [ C(RESULT_ACCESS) ] = 0x0,
1013 [ C(RESULT_MISS) ] = 0,
1014 },
1015 },
1016 [ C(L1I ) ] = {
1017 [ C(OP_READ) ] = {
1018 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1019 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1020 },
1021 [ C(OP_WRITE) ] = {
1022 [ C(RESULT_ACCESS) ] = -1,
1023 [ C(RESULT_MISS) ] = -1,
1024 },
1025 [ C(OP_PREFETCH) ] = {
1026 [ C(RESULT_ACCESS) ] = 0,
1027 [ C(RESULT_MISS) ] = 0,
1028 },
1029 },
1030 [ C(LL ) ] = {
1031 [ C(OP_READ) ] = {
1032 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1033 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1034 },
1035 [ C(OP_WRITE) ] = {
1036 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1037 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1038 },
1039 [ C(OP_PREFETCH) ] = {
1040 [ C(RESULT_ACCESS) ] = 0,
1041 [ C(RESULT_MISS) ] = 0,
1042 },
1043 },
1044 [ C(DTLB) ] = {
1045 [ C(OP_READ) ] = {
1046 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1047 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1048 },
1049 [ C(OP_WRITE) ] = {
1050 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1051 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1052 },
1053 [ C(OP_PREFETCH) ] = {
1054 [ C(RESULT_ACCESS) ] = 0,
1055 [ C(RESULT_MISS) ] = 0,
1056 },
1057 },
1058 [ C(ITLB) ] = {
1059 [ C(OP_READ) ] = {
1060 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1061 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1062 },
1063 [ C(OP_WRITE) ] = {
1064 [ C(RESULT_ACCESS) ] = -1,
1065 [ C(RESULT_MISS) ] = -1,
1066 },
1067 [ C(OP_PREFETCH) ] = {
1068 [ C(RESULT_ACCESS) ] = -1,
1069 [ C(RESULT_MISS) ] = -1,
1070 },
1071 },
1072 [ C(BPU ) ] = {
1073 [ C(OP_READ) ] = {
1074 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1075 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1076 },
1077 [ C(OP_WRITE) ] = {
1078 [ C(RESULT_ACCESS) ] = -1,
1079 [ C(RESULT_MISS) ] = -1,
1080 },
1081 [ C(OP_PREFETCH) ] = {
1082 [ C(RESULT_ACCESS) ] = -1,
1083 [ C(RESULT_MISS) ] = -1,
1084 },
1085 },
1086};
1087
1fa64180
YZ
1088static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1089{
1090 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
06c939c1
PZ
1091 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1092 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffffull, RSP_1),
1fa64180
YZ
1093 EVENT_EXTRA_END
1094};
1095
1096#define SLM_DMND_READ SNB_DMND_DATA_RD
1097#define SLM_DMND_WRITE SNB_DMND_RFO
1098#define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1099
1100#define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1101#define SLM_LLC_ACCESS SNB_RESP_ANY
1102#define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1103
1104static __initconst const u64 slm_hw_cache_extra_regs
1105 [PERF_COUNT_HW_CACHE_MAX]
1106 [PERF_COUNT_HW_CACHE_OP_MAX]
1107 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1108{
1109 [ C(LL ) ] = {
1110 [ C(OP_READ) ] = {
1111 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1112 [ C(RESULT_MISS) ] = SLM_DMND_READ|SLM_LLC_MISS,
1113 },
1114 [ C(OP_WRITE) ] = {
1115 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1116 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1117 },
1118 [ C(OP_PREFETCH) ] = {
1119 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1120 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1121 },
1122 },
1123};
1124
1125static __initconst const u64 slm_hw_cache_event_ids
1126 [PERF_COUNT_HW_CACHE_MAX]
1127 [PERF_COUNT_HW_CACHE_OP_MAX]
1128 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1129{
1130 [ C(L1D) ] = {
1131 [ C(OP_READ) ] = {
1132 [ C(RESULT_ACCESS) ] = 0,
1133 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1134 },
1135 [ C(OP_WRITE) ] = {
1136 [ C(RESULT_ACCESS) ] = 0,
1137 [ C(RESULT_MISS) ] = 0,
1138 },
1139 [ C(OP_PREFETCH) ] = {
1140 [ C(RESULT_ACCESS) ] = 0,
1141 [ C(RESULT_MISS) ] = 0,
1142 },
1143 },
1144 [ C(L1I ) ] = {
1145 [ C(OP_READ) ] = {
1146 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1147 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1148 },
1149 [ C(OP_WRITE) ] = {
1150 [ C(RESULT_ACCESS) ] = -1,
1151 [ C(RESULT_MISS) ] = -1,
1152 },
1153 [ C(OP_PREFETCH) ] = {
1154 [ C(RESULT_ACCESS) ] = 0,
1155 [ C(RESULT_MISS) ] = 0,
1156 },
1157 },
1158 [ C(LL ) ] = {
1159 [ C(OP_READ) ] = {
1160 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1161 [ C(RESULT_ACCESS) ] = 0x01b7,
1162 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1163 [ C(RESULT_MISS) ] = 0x01b7,
1164 },
1165 [ C(OP_WRITE) ] = {
1166 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1167 [ C(RESULT_ACCESS) ] = 0x01b7,
1168 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1169 [ C(RESULT_MISS) ] = 0x01b7,
1170 },
1171 [ C(OP_PREFETCH) ] = {
1172 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1173 [ C(RESULT_ACCESS) ] = 0x01b7,
1174 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1175 [ C(RESULT_MISS) ] = 0x01b7,
1176 },
1177 },
1178 [ C(DTLB) ] = {
1179 [ C(OP_READ) ] = {
1180 [ C(RESULT_ACCESS) ] = 0,
1181 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1182 },
1183 [ C(OP_WRITE) ] = {
1184 [ C(RESULT_ACCESS) ] = 0,
1185 [ C(RESULT_MISS) ] = 0,
1186 },
1187 [ C(OP_PREFETCH) ] = {
1188 [ C(RESULT_ACCESS) ] = 0,
1189 [ C(RESULT_MISS) ] = 0,
1190 },
1191 },
1192 [ C(ITLB) ] = {
1193 [ C(OP_READ) ] = {
1194 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1195 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1196 },
1197 [ C(OP_WRITE) ] = {
1198 [ C(RESULT_ACCESS) ] = -1,
1199 [ C(RESULT_MISS) ] = -1,
1200 },
1201 [ C(OP_PREFETCH) ] = {
1202 [ C(RESULT_ACCESS) ] = -1,
1203 [ C(RESULT_MISS) ] = -1,
1204 },
1205 },
1206 [ C(BPU ) ] = {
1207 [ C(OP_READ) ] = {
1208 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1209 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1210 },
1211 [ C(OP_WRITE) ] = {
1212 [ C(RESULT_ACCESS) ] = -1,
1213 [ C(RESULT_MISS) ] = -1,
1214 },
1215 [ C(OP_PREFETCH) ] = {
1216 [ C(RESULT_ACCESS) ] = -1,
1217 [ C(RESULT_MISS) ] = -1,
1218 },
1219 },
1220};
1221
f22f54f4
PZ
1222static void intel_pmu_disable_all(void)
1223{
89cbc767 1224 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
f22f54f4
PZ
1225
1226 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1227
15c7ad51 1228 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
f22f54f4 1229 intel_pmu_disable_bts();
ca037701
PZ
1230
1231 intel_pmu_pebs_disable_all();
caff2bef 1232 intel_pmu_lbr_disable_all();
f22f54f4
PZ
1233}
1234
11164cd4 1235static void intel_pmu_enable_all(int added)
f22f54f4 1236{
89cbc767 1237 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
f22f54f4 1238
d329527e
PZ
1239 intel_pmu_pebs_enable_all();
1240 intel_pmu_lbr_enable_all();
144d31e6
GN
1241 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1242 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
f22f54f4 1243
15c7ad51 1244 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
f22f54f4 1245 struct perf_event *event =
15c7ad51 1246 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
f22f54f4
PZ
1247
1248 if (WARN_ON_ONCE(!event))
1249 return;
1250
1251 intel_pmu_enable_bts(event->hw.config);
1252 }
1253}
1254
11164cd4
PZ
1255/*
1256 * Workaround for:
1257 * Intel Errata AAK100 (model 26)
1258 * Intel Errata AAP53 (model 30)
40b91cd1 1259 * Intel Errata BD53 (model 44)
11164cd4 1260 *
351af072
ZY
1261 * The official story:
1262 * These chips need to be 'reset' when adding counters by programming the
1263 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1264 * in sequence on the same PMC or on different PMCs.
1265 *
1266 * In practise it appears some of these events do in fact count, and
1267 * we need to programm all 4 events.
11164cd4 1268 */
351af072 1269static void intel_pmu_nhm_workaround(void)
11164cd4 1270{
89cbc767 1271 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
351af072
ZY
1272 static const unsigned long nhm_magic[4] = {
1273 0x4300B5,
1274 0x4300D2,
1275 0x4300B1,
1276 0x4300B1
1277 };
1278 struct perf_event *event;
1279 int i;
11164cd4 1280
351af072
ZY
1281 /*
1282 * The Errata requires below steps:
1283 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1284 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1285 * the corresponding PMCx;
1286 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1287 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1288 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1289 */
11164cd4 1290
351af072
ZY
1291 /*
1292 * The real steps we choose are a little different from above.
1293 * A) To reduce MSR operations, we don't run step 1) as they
1294 * are already cleared before this function is called;
1295 * B) Call x86_perf_event_update to save PMCx before configuring
1296 * PERFEVTSELx with magic number;
1297 * C) With step 5), we do clear only when the PERFEVTSELx is
1298 * not used currently.
1299 * D) Call x86_perf_event_set_period to restore PMCx;
1300 */
11164cd4 1301
351af072
ZY
1302 /* We always operate 4 pairs of PERF Counters */
1303 for (i = 0; i < 4; i++) {
1304 event = cpuc->events[i];
1305 if (event)
1306 x86_perf_event_update(event);
1307 }
11164cd4 1308
351af072
ZY
1309 for (i = 0; i < 4; i++) {
1310 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
1311 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
1312 }
1313
1314 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
1315 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
11164cd4 1316
351af072
ZY
1317 for (i = 0; i < 4; i++) {
1318 event = cpuc->events[i];
1319
1320 if (event) {
1321 x86_perf_event_set_period(event);
31fa58af 1322 __x86_pmu_enable_event(&event->hw,
351af072
ZY
1323 ARCH_PERFMON_EVENTSEL_ENABLE);
1324 } else
1325 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
11164cd4 1326 }
351af072
ZY
1327}
1328
1329static void intel_pmu_nhm_enable_all(int added)
1330{
1331 if (added)
1332 intel_pmu_nhm_workaround();
11164cd4
PZ
1333 intel_pmu_enable_all(added);
1334}
1335
f22f54f4
PZ
1336static inline u64 intel_pmu_get_status(void)
1337{
1338 u64 status;
1339
1340 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1341
1342 return status;
1343}
1344
1345static inline void intel_pmu_ack_status(u64 ack)
1346{
1347 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1348}
1349
ca037701 1350static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
f22f54f4 1351{
15c7ad51 1352 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
f22f54f4
PZ
1353 u64 ctrl_val, mask;
1354
1355 mask = 0xfULL << (idx * 4);
1356
1357 rdmsrl(hwc->config_base, ctrl_val);
1358 ctrl_val &= ~mask;
7645a24c 1359 wrmsrl(hwc->config_base, ctrl_val);
f22f54f4
PZ
1360}
1361
2b9e344d
PZ
1362static inline bool event_is_checkpointed(struct perf_event *event)
1363{
1364 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
1365}
1366
ca037701 1367static void intel_pmu_disable_event(struct perf_event *event)
f22f54f4 1368{
aff3d91a 1369 struct hw_perf_event *hwc = &event->hw;
89cbc767 1370 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
aff3d91a 1371
15c7ad51 1372 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
f22f54f4
PZ
1373 intel_pmu_disable_bts();
1374 intel_pmu_drain_bts_buffer();
1375 return;
1376 }
1377
144d31e6
GN
1378 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
1379 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
2b9e344d 1380 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
144d31e6 1381
60ce0fbd
SE
1382 /*
1383 * must disable before any actual event
1384 * because any event may be combined with LBR
1385 */
a46a2300 1386 if (needs_branch_stack(event))
60ce0fbd
SE
1387 intel_pmu_lbr_disable(event);
1388
f22f54f4 1389 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
aff3d91a 1390 intel_pmu_disable_fixed(hwc);
f22f54f4
PZ
1391 return;
1392 }
1393
aff3d91a 1394 x86_pmu_disable_event(event);
ca037701 1395
ab608344 1396 if (unlikely(event->attr.precise_ip))
ef21f683 1397 intel_pmu_pebs_disable(event);
f22f54f4
PZ
1398}
1399
ca037701 1400static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
f22f54f4 1401{
15c7ad51 1402 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
f22f54f4 1403 u64 ctrl_val, bits, mask;
f22f54f4
PZ
1404
1405 /*
1406 * Enable IRQ generation (0x8),
1407 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1408 * if requested:
1409 */
1410 bits = 0x8ULL;
1411 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1412 bits |= 0x2;
1413 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1414 bits |= 0x1;
1415
1416 /*
1417 * ANY bit is supported in v3 and up
1418 */
1419 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1420 bits |= 0x4;
1421
1422 bits <<= (idx * 4);
1423 mask = 0xfULL << (idx * 4);
1424
1425 rdmsrl(hwc->config_base, ctrl_val);
1426 ctrl_val &= ~mask;
1427 ctrl_val |= bits;
7645a24c 1428 wrmsrl(hwc->config_base, ctrl_val);
f22f54f4
PZ
1429}
1430
aff3d91a 1431static void intel_pmu_enable_event(struct perf_event *event)
f22f54f4 1432{
aff3d91a 1433 struct hw_perf_event *hwc = &event->hw;
89cbc767 1434 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
aff3d91a 1435
15c7ad51 1436 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
0a3aee0d 1437 if (!__this_cpu_read(cpu_hw_events.enabled))
f22f54f4
PZ
1438 return;
1439
1440 intel_pmu_enable_bts(hwc->config);
1441 return;
1442 }
60ce0fbd
SE
1443 /*
1444 * must enabled before any actual event
1445 * because any event may be combined with LBR
1446 */
a46a2300 1447 if (needs_branch_stack(event))
60ce0fbd 1448 intel_pmu_lbr_enable(event);
f22f54f4 1449
144d31e6
GN
1450 if (event->attr.exclude_host)
1451 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
1452 if (event->attr.exclude_guest)
1453 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
1454
2b9e344d
PZ
1455 if (unlikely(event_is_checkpointed(event)))
1456 cpuc->intel_cp_status |= (1ull << hwc->idx);
1457
f22f54f4 1458 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
aff3d91a 1459 intel_pmu_enable_fixed(hwc);
f22f54f4
PZ
1460 return;
1461 }
1462
ab608344 1463 if (unlikely(event->attr.precise_ip))
ef21f683 1464 intel_pmu_pebs_enable(event);
ca037701 1465
31fa58af 1466 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
f22f54f4
PZ
1467}
1468
1469/*
1470 * Save and restart an expired event. Called by NMI contexts,
1471 * so it has to be careful about preempting normal event ops:
1472 */
de0428a7 1473int intel_pmu_save_and_restart(struct perf_event *event)
f22f54f4 1474{
cc2ad4ba 1475 x86_perf_event_update(event);
2dbf0116
AK
1476 /*
1477 * For a checkpointed counter always reset back to 0. This
1478 * avoids a situation where the counter overflows, aborts the
1479 * transaction and is then set back to shortly before the
1480 * overflow, and overflows and aborts again.
1481 */
1482 if (unlikely(event_is_checkpointed(event))) {
1483 /* No race with NMIs because the counter should not be armed */
1484 wrmsrl(event->hw.event_base, 0);
1485 local64_set(&event->hw.prev_count, 0);
1486 }
cc2ad4ba 1487 return x86_perf_event_set_period(event);
f22f54f4
PZ
1488}
1489
1490static void intel_pmu_reset(void)
1491{
0a3aee0d 1492 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
f22f54f4
PZ
1493 unsigned long flags;
1494 int idx;
1495
948b1bb8 1496 if (!x86_pmu.num_counters)
f22f54f4
PZ
1497 return;
1498
1499 local_irq_save(flags);
1500
c767a54b 1501 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
f22f54f4 1502
948b1bb8 1503 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
715c85b1
PA
1504 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
1505 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
f22f54f4 1506 }
948b1bb8 1507 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
715c85b1 1508 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
948b1bb8 1509
f22f54f4
PZ
1510 if (ds)
1511 ds->bts_index = ds->bts_buffer_base;
1512
1513 local_irq_restore(flags);
1514}
1515
1516/*
1517 * This handler is triggered by the local APIC, so the APIC IRQ handling
1518 * rules apply:
1519 */
1520static int intel_pmu_handle_irq(struct pt_regs *regs)
1521{
1522 struct perf_sample_data data;
1523 struct cpu_hw_events *cpuc;
1524 int bit, loops;
2e556b5b 1525 u64 status;
b0b2072d 1526 int handled;
f22f54f4 1527
89cbc767 1528 cpuc = this_cpu_ptr(&cpu_hw_events);
f22f54f4 1529
2bce5dac 1530 /*
72db5596
AK
1531 * No known reason to not always do late ACK,
1532 * but just in case do it opt-in.
2bce5dac 1533 */
72db5596
AK
1534 if (!x86_pmu.late_ack)
1535 apic_write(APIC_LVTPC, APIC_DM_NMI);
3fb2b8dd 1536 intel_pmu_disable_all();
b0b2072d 1537 handled = intel_pmu_drain_bts_buffer();
f22f54f4 1538 status = intel_pmu_get_status();
a3ef2229
MM
1539 if (!status)
1540 goto done;
f22f54f4
PZ
1541
1542 loops = 0;
1543again:
2e556b5b 1544 intel_pmu_ack_status(status);
f22f54f4 1545 if (++loops > 100) {
ae0def05
DH
1546 static bool warned = false;
1547 if (!warned) {
1548 WARN(1, "perfevents: irq loop stuck!\n");
1549 perf_event_print_debug();
1550 warned = true;
1551 }
f22f54f4 1552 intel_pmu_reset();
3fb2b8dd 1553 goto done;
f22f54f4
PZ
1554 }
1555
1556 inc_irq_stat(apic_perf_irqs);
ca037701 1557
caff2bef
PZ
1558 intel_pmu_lbr_read();
1559
b292d7a1
HD
1560 /*
1561 * CondChgd bit 63 doesn't mean any overflow status. Ignore
1562 * and clear the bit.
1563 */
1564 if (__test_and_clear_bit(63, (unsigned long *)&status)) {
1565 if (!status)
1566 goto done;
1567 }
1568
ca037701
PZ
1569 /*
1570 * PEBS overflow sets bit 62 in the global status register
1571 */
de725dec
PZ
1572 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1573 handled++;
ca037701 1574 x86_pmu.drain_pebs(regs);
de725dec 1575 }
ca037701 1576
2dbf0116 1577 /*
2b9e344d
PZ
1578 * Checkpointed counters can lead to 'spurious' PMIs because the
1579 * rollback caused by the PMI will have cleared the overflow status
1580 * bit. Therefore always force probe these counters.
2dbf0116 1581 */
2b9e344d 1582 status |= cpuc->intel_cp_status;
2dbf0116 1583
984b3f57 1584 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
f22f54f4
PZ
1585 struct perf_event *event = cpuc->events[bit];
1586
de725dec
PZ
1587 handled++;
1588
f22f54f4
PZ
1589 if (!test_bit(bit, cpuc->active_mask))
1590 continue;
1591
1592 if (!intel_pmu_save_and_restart(event))
1593 continue;
1594
fd0d000b 1595 perf_sample_data_init(&data, 0, event->hw.last_period);
f22f54f4 1596
60ce0fbd
SE
1597 if (has_branch_stack(event))
1598 data.br_stack = &cpuc->lbr_stack;
1599
a8b0ca17 1600 if (perf_event_overflow(event, &data, regs))
a4eaf7f1 1601 x86_pmu_stop(event, 0);
f22f54f4
PZ
1602 }
1603
f22f54f4
PZ
1604 /*
1605 * Repeat if there is more work to be done:
1606 */
1607 status = intel_pmu_get_status();
1608 if (status)
1609 goto again;
1610
3fb2b8dd 1611done:
11164cd4 1612 intel_pmu_enable_all(0);
72db5596
AK
1613 /*
1614 * Only unmask the NMI after the overflow counters
1615 * have been reset. This avoids spurious NMIs on
1616 * Haswell CPUs.
1617 */
1618 if (x86_pmu.late_ack)
1619 apic_write(APIC_LVTPC, APIC_DM_NMI);
de725dec 1620 return handled;
f22f54f4
PZ
1621}
1622
f22f54f4 1623static struct event_constraint *
ca037701 1624intel_bts_constraints(struct perf_event *event)
f22f54f4 1625{
ca037701
PZ
1626 struct hw_perf_event *hwc = &event->hw;
1627 unsigned int hw_event, bts_event;
f22f54f4 1628
18a073a3
PZ
1629 if (event->attr.freq)
1630 return NULL;
1631
ca037701
PZ
1632 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1633 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
f22f54f4 1634
ca037701 1635 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
f22f54f4 1636 return &bts_constraint;
ca037701 1637
f22f54f4
PZ
1638 return NULL;
1639}
1640
5a425294 1641static int intel_alt_er(int idx)
b79e8941
PZ
1642{
1643 if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
5a425294 1644 return idx;
b79e8941 1645
5a425294
PZ
1646 if (idx == EXTRA_REG_RSP_0)
1647 return EXTRA_REG_RSP_1;
1648
1649 if (idx == EXTRA_REG_RSP_1)
1650 return EXTRA_REG_RSP_0;
1651
1652 return idx;
1653}
1654
1655static void intel_fixup_er(struct perf_event *event, int idx)
1656{
1657 event->hw.extra_reg.idx = idx;
1658
1659 if (idx == EXTRA_REG_RSP_0) {
b79e8941 1660 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
53ad0447 1661 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
b79e8941 1662 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
5a425294
PZ
1663 } else if (idx == EXTRA_REG_RSP_1) {
1664 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
53ad0447 1665 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
5a425294 1666 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
b79e8941 1667 }
b79e8941
PZ
1668}
1669
efc9f05d
SE
1670/*
1671 * manage allocation of shared extra msr for certain events
1672 *
1673 * sharing can be:
1674 * per-cpu: to be shared between the various events on a single PMU
1675 * per-core: per-cpu + shared by HT threads
1676 */
a7e3ed1e 1677static struct event_constraint *
efc9f05d 1678__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
b36817e8
SE
1679 struct perf_event *event,
1680 struct hw_perf_event_extra *reg)
a7e3ed1e 1681{
efc9f05d 1682 struct event_constraint *c = &emptyconstraint;
a7e3ed1e 1683 struct er_account *era;
cd8a38d3 1684 unsigned long flags;
5a425294 1685 int idx = reg->idx;
a7e3ed1e 1686
5a425294
PZ
1687 /*
1688 * reg->alloc can be set due to existing state, so for fake cpuc we
1689 * need to ignore this, otherwise we might fail to allocate proper fake
1690 * state for this extra reg constraint. Also see the comment below.
1691 */
1692 if (reg->alloc && !cpuc->is_fake)
b36817e8 1693 return NULL; /* call x86_get_event_constraint() */
a7e3ed1e 1694
b79e8941 1695again:
5a425294 1696 era = &cpuc->shared_regs->regs[idx];
cd8a38d3
SE
1697 /*
1698 * we use spin_lock_irqsave() to avoid lockdep issues when
1699 * passing a fake cpuc
1700 */
1701 raw_spin_lock_irqsave(&era->lock, flags);
efc9f05d
SE
1702
1703 if (!atomic_read(&era->ref) || era->config == reg->config) {
1704
5a425294
PZ
1705 /*
1706 * If its a fake cpuc -- as per validate_{group,event}() we
1707 * shouldn't touch event state and we can avoid doing so
1708 * since both will only call get_event_constraints() once
1709 * on each event, this avoids the need for reg->alloc.
1710 *
1711 * Not doing the ER fixup will only result in era->reg being
1712 * wrong, but since we won't actually try and program hardware
1713 * this isn't a problem either.
1714 */
1715 if (!cpuc->is_fake) {
1716 if (idx != reg->idx)
1717 intel_fixup_er(event, idx);
1718
1719 /*
1720 * x86_schedule_events() can call get_event_constraints()
1721 * multiple times on events in the case of incremental
1722 * scheduling(). reg->alloc ensures we only do the ER
1723 * allocation once.
1724 */
1725 reg->alloc = 1;
1726 }
1727
efc9f05d
SE
1728 /* lock in msr value */
1729 era->config = reg->config;
1730 era->reg = reg->reg;
1731
1732 /* one more user */
1733 atomic_inc(&era->ref);
1734
a7e3ed1e 1735 /*
b36817e8
SE
1736 * need to call x86_get_event_constraint()
1737 * to check if associated event has constraints
a7e3ed1e 1738 */
b36817e8 1739 c = NULL;
5a425294
PZ
1740 } else {
1741 idx = intel_alt_er(idx);
1742 if (idx != reg->idx) {
1743 raw_spin_unlock_irqrestore(&era->lock, flags);
1744 goto again;
1745 }
a7e3ed1e 1746 }
cd8a38d3 1747 raw_spin_unlock_irqrestore(&era->lock, flags);
a7e3ed1e 1748
efc9f05d
SE
1749 return c;
1750}
1751
1752static void
1753__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1754 struct hw_perf_event_extra *reg)
1755{
1756 struct er_account *era;
1757
1758 /*
5a425294
PZ
1759 * Only put constraint if extra reg was actually allocated. Also takes
1760 * care of event which do not use an extra shared reg.
1761 *
1762 * Also, if this is a fake cpuc we shouldn't touch any event state
1763 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1764 * either since it'll be thrown out.
efc9f05d 1765 */
5a425294 1766 if (!reg->alloc || cpuc->is_fake)
efc9f05d
SE
1767 return;
1768
1769 era = &cpuc->shared_regs->regs[reg->idx];
1770
1771 /* one fewer user */
1772 atomic_dec(&era->ref);
1773
1774 /* allocate again next time */
1775 reg->alloc = 0;
1776}
1777
1778static struct event_constraint *
1779intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1780 struct perf_event *event)
1781{
b36817e8
SE
1782 struct event_constraint *c = NULL, *d;
1783 struct hw_perf_event_extra *xreg, *breg;
1784
1785 xreg = &event->hw.extra_reg;
1786 if (xreg->idx != EXTRA_REG_NONE) {
1787 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
1788 if (c == &emptyconstraint)
1789 return c;
1790 }
1791 breg = &event->hw.branch_reg;
1792 if (breg->idx != EXTRA_REG_NONE) {
1793 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
1794 if (d == &emptyconstraint) {
1795 __intel_shared_reg_put_constraints(cpuc, xreg);
1796 c = d;
1797 }
1798 }
efc9f05d 1799 return c;
a7e3ed1e
AK
1800}
1801
de0428a7
KW
1802struct event_constraint *
1803x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1804{
1805 struct event_constraint *c;
1806
1807 if (x86_pmu.event_constraints) {
1808 for_each_event_constraint(c, x86_pmu.event_constraints) {
9fac2cf3 1809 if ((event->hw.config & c->cmask) == c->code) {
9fac2cf3 1810 event->hw.flags |= c->flags;
de0428a7 1811 return c;
9fac2cf3 1812 }
de0428a7
KW
1813 }
1814 }
1815
1816 return &unconstrained;
1817}
1818
f22f54f4
PZ
1819static struct event_constraint *
1820intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1821{
1822 struct event_constraint *c;
1823
ca037701
PZ
1824 c = intel_bts_constraints(event);
1825 if (c)
1826 return c;
1827
1828 c = intel_pebs_constraints(event);
f22f54f4
PZ
1829 if (c)
1830 return c;
1831
efc9f05d 1832 c = intel_shared_regs_constraints(cpuc, event);
a7e3ed1e
AK
1833 if (c)
1834 return c;
1835
f22f54f4
PZ
1836 return x86_get_event_constraints(cpuc, event);
1837}
1838
efc9f05d
SE
1839static void
1840intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
a7e3ed1e
AK
1841 struct perf_event *event)
1842{
efc9f05d 1843 struct hw_perf_event_extra *reg;
a7e3ed1e 1844
efc9f05d
SE
1845 reg = &event->hw.extra_reg;
1846 if (reg->idx != EXTRA_REG_NONE)
1847 __intel_shared_reg_put_constraints(cpuc, reg);
b36817e8
SE
1848
1849 reg = &event->hw.branch_reg;
1850 if (reg->idx != EXTRA_REG_NONE)
1851 __intel_shared_reg_put_constraints(cpuc, reg);
efc9f05d 1852}
a7e3ed1e 1853
efc9f05d
SE
1854static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1855 struct perf_event *event)
1856{
1857 intel_put_shared_regs_event_constraints(cpuc, event);
a7e3ed1e
AK
1858}
1859
0780c927 1860static void intel_pebs_aliases_core2(struct perf_event *event)
b4cdc5c2 1861{
0780c927 1862 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
7639dae0
PZ
1863 /*
1864 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1865 * (0x003c) so that we can use it with PEBS.
1866 *
1867 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1868 * PEBS capable. However we can use INST_RETIRED.ANY_P
1869 * (0x00c0), which is a PEBS capable event, to get the same
1870 * count.
1871 *
1872 * INST_RETIRED.ANY_P counts the number of cycles that retires
1873 * CNTMASK instructions. By setting CNTMASK to a value (16)
1874 * larger than the maximum number of instructions that can be
1875 * retired per cycle (4) and then inverting the condition, we
1876 * count all cycles that retire 16 or less instructions, which
1877 * is every cycle.
1878 *
1879 * Thereby we gain a PEBS capable cycle counter.
1880 */
f9b4eeb8
PZ
1881 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1882
0780c927
PZ
1883 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1884 event->hw.config = alt_config;
1885 }
1886}
1887
1888static void intel_pebs_aliases_snb(struct perf_event *event)
1889{
1890 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1891 /*
1892 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1893 * (0x003c) so that we can use it with PEBS.
1894 *
1895 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1896 * PEBS capable. However we can use UOPS_RETIRED.ALL
1897 * (0x01c2), which is a PEBS capable event, to get the same
1898 * count.
1899 *
1900 * UOPS_RETIRED.ALL counts the number of cycles that retires
1901 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1902 * larger than the maximum number of micro-ops that can be
1903 * retired per cycle (4) and then inverting the condition, we
1904 * count all cycles that retire 16 or less micro-ops, which
1905 * is every cycle.
1906 *
1907 * Thereby we gain a PEBS capable cycle counter.
1908 */
1909 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
7639dae0
PZ
1910
1911 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1912 event->hw.config = alt_config;
1913 }
0780c927
PZ
1914}
1915
1916static int intel_pmu_hw_config(struct perf_event *event)
1917{
1918 int ret = x86_pmu_hw_config(event);
1919
1920 if (ret)
1921 return ret;
1922
1923 if (event->attr.precise_ip && x86_pmu.pebs_aliases)
1924 x86_pmu.pebs_aliases(event);
7639dae0 1925
a46a2300 1926 if (needs_branch_stack(event)) {
60ce0fbd
SE
1927 ret = intel_pmu_setup_lbr_filter(event);
1928 if (ret)
1929 return ret;
1930 }
1931
b4cdc5c2
PZ
1932 if (event->attr.type != PERF_TYPE_RAW)
1933 return 0;
1934
1935 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
1936 return 0;
1937
1938 if (x86_pmu.version < 3)
1939 return -EINVAL;
1940
1941 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1942 return -EACCES;
1943
1944 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
1945
1946 return 0;
1947}
1948
144d31e6
GN
1949struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
1950{
1951 if (x86_pmu.guest_get_msrs)
1952 return x86_pmu.guest_get_msrs(nr);
1953 *nr = 0;
1954 return NULL;
1955}
1956EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1957
1958static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1959{
89cbc767 1960 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
144d31e6
GN
1961 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1962
1963 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
1964 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
1965 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
26a4f3c0
GN
1966 /*
1967 * If PMU counter has PEBS enabled it is not enough to disable counter
1968 * on a guest entry since PEBS memory write can overshoot guest entry
1969 * and corrupt guest memory. Disabling PEBS solves the problem.
1970 */
1971 arr[1].msr = MSR_IA32_PEBS_ENABLE;
1972 arr[1].host = cpuc->pebs_enabled;
1973 arr[1].guest = 0;
144d31e6 1974
26a4f3c0 1975 *nr = 2;
144d31e6
GN
1976 return arr;
1977}
1978
1979static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1980{
89cbc767 1981 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
144d31e6
GN
1982 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1983 int idx;
1984
1985 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1986 struct perf_event *event = cpuc->events[idx];
1987
1988 arr[idx].msr = x86_pmu_config_addr(idx);
1989 arr[idx].host = arr[idx].guest = 0;
1990
1991 if (!test_bit(idx, cpuc->active_mask))
1992 continue;
1993
1994 arr[idx].host = arr[idx].guest =
1995 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
1996
1997 if (event->attr.exclude_host)
1998 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1999 else if (event->attr.exclude_guest)
2000 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
2001 }
2002
2003 *nr = x86_pmu.num_counters;
2004 return arr;
2005}
2006
2007static void core_pmu_enable_event(struct perf_event *event)
2008{
2009 if (!event->attr.exclude_host)
2010 x86_pmu_enable_event(event);
2011}
2012
2013static void core_pmu_enable_all(int added)
2014{
89cbc767 2015 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
144d31e6
GN
2016 int idx;
2017
2018 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2019 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
2020
2021 if (!test_bit(idx, cpuc->active_mask) ||
2022 cpuc->events[idx]->attr.exclude_host)
2023 continue;
2024
2025 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2026 }
2027}
2028
3a632cb2
AK
2029static int hsw_hw_config(struct perf_event *event)
2030{
2031 int ret = intel_pmu_hw_config(event);
2032
2033 if (ret)
2034 return ret;
2035 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
2036 return 0;
2037 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
2038
2039 /*
2040 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
2041 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
2042 * this combination.
2043 */
2044 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
2045 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
2046 event->attr.precise_ip > 0))
2047 return -EOPNOTSUPP;
2048
2dbf0116
AK
2049 if (event_is_checkpointed(event)) {
2050 /*
2051 * Sampling of checkpointed events can cause situations where
2052 * the CPU constantly aborts because of a overflow, which is
2053 * then checkpointed back and ignored. Forbid checkpointing
2054 * for sampling.
2055 *
2056 * But still allow a long sampling period, so that perf stat
2057 * from KVM works.
2058 */
2059 if (event->attr.sample_period > 0 &&
2060 event->attr.sample_period < 0x7fffffff)
2061 return -EOPNOTSUPP;
2062 }
3a632cb2
AK
2063 return 0;
2064}
2065
2066static struct event_constraint counter2_constraint =
2067 EVENT_CONSTRAINT(0, 0x4, 0);
2068
2069static struct event_constraint *
2070hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2071{
2072 struct event_constraint *c = intel_get_event_constraints(cpuc, event);
2073
2074 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
2075 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
2076 if (c->idxmsk64 & (1U << 2))
2077 return &counter2_constraint;
2078 return &emptyconstraint;
2079 }
2080
2081 return c;
2082}
2083
641cc938
JO
2084PMU_FORMAT_ATTR(event, "config:0-7" );
2085PMU_FORMAT_ATTR(umask, "config:8-15" );
2086PMU_FORMAT_ATTR(edge, "config:18" );
2087PMU_FORMAT_ATTR(pc, "config:19" );
2088PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
2089PMU_FORMAT_ATTR(inv, "config:23" );
2090PMU_FORMAT_ATTR(cmask, "config:24-31" );
3a632cb2
AK
2091PMU_FORMAT_ATTR(in_tx, "config:32");
2092PMU_FORMAT_ATTR(in_tx_cp, "config:33");
641cc938
JO
2093
2094static struct attribute *intel_arch_formats_attr[] = {
2095 &format_attr_event.attr,
2096 &format_attr_umask.attr,
2097 &format_attr_edge.attr,
2098 &format_attr_pc.attr,
2099 &format_attr_inv.attr,
2100 &format_attr_cmask.attr,
2101 NULL,
2102};
2103
0bf79d44
JO
2104ssize_t intel_event_sysfs_show(char *page, u64 config)
2105{
2106 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
2107
2108 return x86_event_sysfs_show(page, config, event);
2109}
2110
caaa8be3 2111static __initconst const struct x86_pmu core_pmu = {
f22f54f4
PZ
2112 .name = "core",
2113 .handle_irq = x86_pmu_handle_irq,
2114 .disable_all = x86_pmu_disable_all,
144d31e6
GN
2115 .enable_all = core_pmu_enable_all,
2116 .enable = core_pmu_enable_event,
f22f54f4 2117 .disable = x86_pmu_disable_event,
b4cdc5c2 2118 .hw_config = x86_pmu_hw_config,
a072738e 2119 .schedule_events = x86_schedule_events,
f22f54f4
PZ
2120 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2121 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2122 .event_map = intel_pmu_event_map,
f22f54f4
PZ
2123 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2124 .apic = 1,
2125 /*
2126 * Intel PMCs cannot be accessed sanely above 32 bit width,
2127 * so we install an artificial 1<<31 period regardless of
2128 * the generic event period:
2129 */
2130 .max_period = (1ULL << 31) - 1,
2131 .get_event_constraints = intel_get_event_constraints,
a7e3ed1e 2132 .put_event_constraints = intel_put_event_constraints,
f22f54f4 2133 .event_constraints = intel_core_event_constraints,
144d31e6 2134 .guest_get_msrs = core_guest_get_msrs,
641cc938 2135 .format_attrs = intel_arch_formats_attr,
0bf79d44 2136 .events_sysfs_show = intel_event_sysfs_show,
f22f54f4
PZ
2137};
2138
de0428a7 2139struct intel_shared_regs *allocate_shared_regs(int cpu)
efc9f05d
SE
2140{
2141 struct intel_shared_regs *regs;
2142 int i;
2143
2144 regs = kzalloc_node(sizeof(struct intel_shared_regs),
2145 GFP_KERNEL, cpu_to_node(cpu));
2146 if (regs) {
2147 /*
2148 * initialize the locks to keep lockdep happy
2149 */
2150 for (i = 0; i < EXTRA_REG_MAX; i++)
2151 raw_spin_lock_init(&regs->regs[i].lock);
2152
2153 regs->core_id = -1;
2154 }
2155 return regs;
2156}
2157
a7e3ed1e
AK
2158static int intel_pmu_cpu_prepare(int cpu)
2159{
2160 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2161
b36817e8 2162 if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
69092624
LM
2163 return NOTIFY_OK;
2164
efc9f05d
SE
2165 cpuc->shared_regs = allocate_shared_regs(cpu);
2166 if (!cpuc->shared_regs)
a7e3ed1e
AK
2167 return NOTIFY_BAD;
2168
a7e3ed1e
AK
2169 return NOTIFY_OK;
2170}
2171
74846d35
PZ
2172static void intel_pmu_cpu_starting(int cpu)
2173{
a7e3ed1e
AK
2174 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2175 int core_id = topology_core_id(cpu);
2176 int i;
2177
69092624
LM
2178 init_debug_store_on_cpu(cpu);
2179 /*
2180 * Deal with CPUs that don't clear their LBRs on power-up.
2181 */
2182 intel_pmu_lbr_reset();
2183
b36817e8
SE
2184 cpuc->lbr_sel = NULL;
2185
2186 if (!cpuc->shared_regs)
69092624
LM
2187 return;
2188
b36817e8
SE
2189 if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
2190 for_each_cpu(i, topology_thread_cpumask(cpu)) {
2191 struct intel_shared_regs *pc;
a7e3ed1e 2192
b36817e8
SE
2193 pc = per_cpu(cpu_hw_events, i).shared_regs;
2194 if (pc && pc->core_id == core_id) {
2195 cpuc->kfree_on_online = cpuc->shared_regs;
2196 cpuc->shared_regs = pc;
2197 break;
2198 }
a7e3ed1e 2199 }
b36817e8
SE
2200 cpuc->shared_regs->core_id = core_id;
2201 cpuc->shared_regs->refcnt++;
a7e3ed1e
AK
2202 }
2203
b36817e8
SE
2204 if (x86_pmu.lbr_sel_map)
2205 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
74846d35
PZ
2206}
2207
2208static void intel_pmu_cpu_dying(int cpu)
2209{
a7e3ed1e 2210 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
efc9f05d 2211 struct intel_shared_regs *pc;
a7e3ed1e 2212
efc9f05d 2213 pc = cpuc->shared_regs;
a7e3ed1e
AK
2214 if (pc) {
2215 if (pc->core_id == -1 || --pc->refcnt == 0)
2216 kfree(pc);
efc9f05d 2217 cpuc->shared_regs = NULL;
a7e3ed1e
AK
2218 }
2219
74846d35
PZ
2220 fini_debug_store_on_cpu(cpu);
2221}
2222
641cc938
JO
2223PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
2224
a63fcab4
SE
2225PMU_FORMAT_ATTR(ldlat, "config1:0-15");
2226
641cc938
JO
2227static struct attribute *intel_arch3_formats_attr[] = {
2228 &format_attr_event.attr,
2229 &format_attr_umask.attr,
2230 &format_attr_edge.attr,
2231 &format_attr_pc.attr,
2232 &format_attr_any.attr,
2233 &format_attr_inv.attr,
2234 &format_attr_cmask.attr,
3a632cb2
AK
2235 &format_attr_in_tx.attr,
2236 &format_attr_in_tx_cp.attr,
641cc938
JO
2237
2238 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
a63fcab4 2239 &format_attr_ldlat.attr, /* PEBS load latency */
641cc938
JO
2240 NULL,
2241};
2242
caaa8be3 2243static __initconst const struct x86_pmu intel_pmu = {
f22f54f4
PZ
2244 .name = "Intel",
2245 .handle_irq = intel_pmu_handle_irq,
2246 .disable_all = intel_pmu_disable_all,
2247 .enable_all = intel_pmu_enable_all,
2248 .enable = intel_pmu_enable_event,
2249 .disable = intel_pmu_disable_event,
b4cdc5c2 2250 .hw_config = intel_pmu_hw_config,
a072738e 2251 .schedule_events = x86_schedule_events,
f22f54f4
PZ
2252 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2253 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2254 .event_map = intel_pmu_event_map,
f22f54f4
PZ
2255 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2256 .apic = 1,
2257 /*
2258 * Intel PMCs cannot be accessed sanely above 32 bit width,
2259 * so we install an artificial 1<<31 period regardless of
2260 * the generic event period:
2261 */
2262 .max_period = (1ULL << 31) - 1,
3f6da390 2263 .get_event_constraints = intel_get_event_constraints,
a7e3ed1e 2264 .put_event_constraints = intel_put_event_constraints,
0780c927 2265 .pebs_aliases = intel_pebs_aliases_core2,
3f6da390 2266
641cc938 2267 .format_attrs = intel_arch3_formats_attr,
0bf79d44 2268 .events_sysfs_show = intel_event_sysfs_show,
641cc938 2269
a7e3ed1e 2270 .cpu_prepare = intel_pmu_cpu_prepare,
74846d35
PZ
2271 .cpu_starting = intel_pmu_cpu_starting,
2272 .cpu_dying = intel_pmu_cpu_dying,
144d31e6 2273 .guest_get_msrs = intel_guest_get_msrs,
2a0ad3b3 2274 .sched_task = intel_pmu_lbr_sched_task,
f22f54f4
PZ
2275};
2276
c1d6f42f 2277static __init void intel_clovertown_quirk(void)
3c44780b
PZ
2278{
2279 /*
2280 * PEBS is unreliable due to:
2281 *
2282 * AJ67 - PEBS may experience CPL leaks
2283 * AJ68 - PEBS PMI may be delayed by one event
2284 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
2285 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
2286 *
2287 * AJ67 could be worked around by restricting the OS/USR flags.
2288 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
2289 *
2290 * AJ106 could possibly be worked around by not allowing LBR
2291 * usage from PEBS, including the fixup.
2292 * AJ68 could possibly be worked around by always programming
ec75a716 2293 * a pebs_event_reset[0] value and coping with the lost events.
3c44780b
PZ
2294 *
2295 * But taken together it might just make sense to not enable PEBS on
2296 * these chips.
2297 */
c767a54b 2298 pr_warn("PEBS disabled due to CPU errata\n");
3c44780b
PZ
2299 x86_pmu.pebs = 0;
2300 x86_pmu.pebs_constraints = NULL;
2301}
2302
c93dc84c
PZ
2303static int intel_snb_pebs_broken(int cpu)
2304{
2305 u32 rev = UINT_MAX; /* default to broken for unknown models */
2306
2307 switch (cpu_data(cpu).x86_model) {
2308 case 42: /* SNB */
2309 rev = 0x28;
2310 break;
2311
2312 case 45: /* SNB-EP */
2313 switch (cpu_data(cpu).x86_mask) {
2314 case 6: rev = 0x618; break;
2315 case 7: rev = 0x70c; break;
2316 }
2317 }
2318
2319 return (cpu_data(cpu).microcode < rev);
2320}
2321
2322static void intel_snb_check_microcode(void)
2323{
2324 int pebs_broken = 0;
2325 int cpu;
2326
2327 get_online_cpus();
2328 for_each_online_cpu(cpu) {
2329 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
2330 break;
2331 }
2332 put_online_cpus();
2333
2334 if (pebs_broken == x86_pmu.pebs_broken)
2335 return;
2336
2337 /*
2338 * Serialized by the microcode lock..
2339 */
2340 if (x86_pmu.pebs_broken) {
2341 pr_info("PEBS enabled due to microcode update\n");
2342 x86_pmu.pebs_broken = 0;
2343 } else {
2344 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
2345 x86_pmu.pebs_broken = 1;
2346 }
2347}
2348
338b522c
KL
2349/*
2350 * Under certain circumstances, access certain MSR may cause #GP.
2351 * The function tests if the input MSR can be safely accessed.
2352 */
2353static bool check_msr(unsigned long msr, u64 mask)
2354{
2355 u64 val_old, val_new, val_tmp;
2356
2357 /*
2358 * Read the current value, change it and read it back to see if it
2359 * matches, this is needed to detect certain hardware emulators
2360 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
2361 */
2362 if (rdmsrl_safe(msr, &val_old))
2363 return false;
2364
2365 /*
2366 * Only change the bits which can be updated by wrmsrl.
2367 */
2368 val_tmp = val_old ^ mask;
2369 if (wrmsrl_safe(msr, val_tmp) ||
2370 rdmsrl_safe(msr, &val_new))
2371 return false;
2372
2373 if (val_new != val_tmp)
2374 return false;
2375
2376 /* Here it's sure that the MSR can be safely accessed.
2377 * Restore the old value and return.
2378 */
2379 wrmsrl(msr, val_old);
2380
2381 return true;
2382}
2383
c1d6f42f 2384static __init void intel_sandybridge_quirk(void)
6a600a8b 2385{
c93dc84c
PZ
2386 x86_pmu.check_microcode = intel_snb_check_microcode;
2387 intel_snb_check_microcode();
6a600a8b
PZ
2388}
2389
c1d6f42f
PZ
2390static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
2391 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
2392 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
2393 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
2394 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
2395 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
2396 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
2397 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
ffb871bc
GN
2398};
2399
c1d6f42f
PZ
2400static __init void intel_arch_events_quirk(void)
2401{
2402 int bit;
2403
2404 /* disable event that reported as not presend by cpuid */
2405 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
2406 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
c767a54b
JP
2407 pr_warn("CPUID marked event: \'%s\' unavailable\n",
2408 intel_arch_events_map[bit].name);
c1d6f42f
PZ
2409 }
2410}
2411
2412static __init void intel_nehalem_quirk(void)
2413{
2414 union cpuid10_ebx ebx;
2415
2416 ebx.full = x86_pmu.events_maskl;
2417 if (ebx.split.no_branch_misses_retired) {
2418 /*
2419 * Erratum AAJ80 detected, we work it around by using
2420 * the BR_MISP_EXEC.ANY event. This will over-count
2421 * branch-misses, but it's still much better than the
2422 * architectural event which is often completely bogus:
2423 */
2424 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
2425 ebx.split.no_branch_misses_retired = 0;
2426 x86_pmu.events_maskl = ebx.full;
c767a54b 2427 pr_info("CPU erratum AAJ80 worked around\n");
c1d6f42f
PZ
2428 }
2429}
2430
7f2ee91f
IM
2431EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
2432EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
f9134f36 2433
4b2c4f1f 2434/* Haswell special events */
7f2ee91f
IM
2435EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
2436EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
2437EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
2438EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
2439EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
2440EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
2441EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
2442EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
2443EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
2444EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
2445EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
2446EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
4b2c4f1f 2447
f9134f36 2448static struct attribute *hsw_events_attrs[] = {
4b2c4f1f
AK
2449 EVENT_PTR(tx_start),
2450 EVENT_PTR(tx_commit),
2451 EVENT_PTR(tx_abort),
2452 EVENT_PTR(tx_capacity),
2453 EVENT_PTR(tx_conflict),
2454 EVENT_PTR(el_start),
2455 EVENT_PTR(el_commit),
2456 EVENT_PTR(el_abort),
2457 EVENT_PTR(el_capacity),
2458 EVENT_PTR(el_conflict),
2459 EVENT_PTR(cycles_t),
2460 EVENT_PTR(cycles_ct),
f9134f36
AK
2461 EVENT_PTR(mem_ld_hsw),
2462 EVENT_PTR(mem_st_hsw),
2463 NULL
2464};
2465
de0428a7 2466__init int intel_pmu_init(void)
f22f54f4
PZ
2467{
2468 union cpuid10_edx edx;
2469 union cpuid10_eax eax;
ffb871bc 2470 union cpuid10_ebx ebx;
a1eac7ac 2471 struct event_constraint *c;
f22f54f4 2472 unsigned int unused;
338b522c
KL
2473 struct extra_reg *er;
2474 int version, i;
f22f54f4
PZ
2475
2476 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
a072738e
CG
2477 switch (boot_cpu_data.x86) {
2478 case 0x6:
2479 return p6_pmu_init();
e717bf4e
VW
2480 case 0xb:
2481 return knc_pmu_init();
a072738e
CG
2482 case 0xf:
2483 return p4_pmu_init();
2484 }
f22f54f4 2485 return -ENODEV;
f22f54f4
PZ
2486 }
2487
2488 /*
2489 * Check whether the Architectural PerfMon supports
2490 * Branch Misses Retired hw_event or not.
2491 */
ffb871bc
GN
2492 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
2493 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
f22f54f4
PZ
2494 return -ENODEV;
2495
2496 version = eax.split.version_id;
2497 if (version < 2)
2498 x86_pmu = core_pmu;
2499 else
2500 x86_pmu = intel_pmu;
2501
2502 x86_pmu.version = version;
948b1bb8
RR
2503 x86_pmu.num_counters = eax.split.num_counters;
2504 x86_pmu.cntval_bits = eax.split.bit_width;
2505 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
f22f54f4 2506
c1d6f42f
PZ
2507 x86_pmu.events_maskl = ebx.full;
2508 x86_pmu.events_mask_len = eax.split.mask_length;
2509
70ab7003
AK
2510 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
2511
f22f54f4
PZ
2512 /*
2513 * Quirk: v2 perfmon does not report fixed-purpose events, so
2514 * assume at least 3 events:
2515 */
2516 if (version > 1)
948b1bb8 2517 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
f22f54f4 2518
c9b08884 2519 if (boot_cpu_has(X86_FEATURE_PDCM)) {
8db909a7
PZ
2520 u64 capabilities;
2521
2522 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
2523 x86_pmu.intel_cap.capabilities = capabilities;
2524 }
2525
ca037701
PZ
2526 intel_ds_init();
2527
c1d6f42f
PZ
2528 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
2529
f22f54f4
PZ
2530 /*
2531 * Install the hw-cache-events table:
2532 */
2533 switch (boot_cpu_data.x86_model) {
0f7c29ce 2534 case 14: /* 65nm Core "Yonah" */
f22f54f4
PZ
2535 pr_cont("Core events, ");
2536 break;
2537
0f7c29ce 2538 case 15: /* 65nm Core2 "Merom" */
c1d6f42f 2539 x86_add_quirk(intel_clovertown_quirk);
0f7c29ce
PZ
2540 case 22: /* 65nm Core2 "Merom-L" */
2541 case 23: /* 45nm Core2 "Penryn" */
2542 case 29: /* 45nm Core2 "Dunnington (MP) */
f22f54f4
PZ
2543 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2544 sizeof(hw_cache_event_ids));
2545
caff2bef
PZ
2546 intel_pmu_lbr_init_core();
2547
f22f54f4 2548 x86_pmu.event_constraints = intel_core2_event_constraints;
17e31629 2549 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
f22f54f4
PZ
2550 pr_cont("Core2 events, ");
2551 break;
2552
0f7c29ce
PZ
2553 case 30: /* 45nm Nehalem */
2554 case 26: /* 45nm Nehalem-EP */
2555 case 46: /* 45nm Nehalem-EX */
f22f54f4
PZ
2556 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2557 sizeof(hw_cache_event_ids));
e994d7d2
AK
2558 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2559 sizeof(hw_cache_extra_regs));
f22f54f4 2560
caff2bef
PZ
2561 intel_pmu_lbr_init_nhm();
2562
f22f54f4 2563 x86_pmu.event_constraints = intel_nehalem_event_constraints;
17e31629 2564 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
11164cd4 2565 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
a7e3ed1e 2566 x86_pmu.extra_regs = intel_nehalem_extra_regs;
ec75a716 2567
f20093ee
SE
2568 x86_pmu.cpu_events = nhm_events_attrs;
2569
91fc4cc0 2570 /* UOPS_ISSUED.STALLED_CYCLES */
f9b4eeb8
PZ
2571 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2572 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
91fc4cc0 2573 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
f9b4eeb8
PZ
2574 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2575 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
94403f88 2576
c1d6f42f 2577 x86_add_quirk(intel_nehalem_quirk);
ec75a716 2578
11164cd4 2579 pr_cont("Nehalem events, ");
f22f54f4 2580 break;
caff2bef 2581
0f7c29ce
PZ
2582 case 28: /* 45nm Atom "Pineview" */
2583 case 38: /* 45nm Atom "Lincroft" */
2584 case 39: /* 32nm Atom "Penwell" */
2585 case 53: /* 32nm Atom "Cloverview" */
2586 case 54: /* 32nm Atom "Cedarview" */
f22f54f4
PZ
2587 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2588 sizeof(hw_cache_event_ids));
2589
caff2bef
PZ
2590 intel_pmu_lbr_init_atom();
2591
f22f54f4 2592 x86_pmu.event_constraints = intel_gen_event_constraints;
17e31629 2593 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
f22f54f4
PZ
2594 pr_cont("Atom events, ");
2595 break;
2596
0f7c29ce 2597 case 55: /* 22nm Atom "Silvermont" */
ef454cae 2598 case 76: /* 14nm Atom "Airmont" */
0f7c29ce 2599 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
1fa64180
YZ
2600 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
2601 sizeof(hw_cache_event_ids));
2602 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
2603 sizeof(hw_cache_extra_regs));
2604
2605 intel_pmu_lbr_init_atom();
2606
2607 x86_pmu.event_constraints = intel_slm_event_constraints;
2608 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
2609 x86_pmu.extra_regs = intel_slm_extra_regs;
2610 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2611 pr_cont("Silvermont events, ");
2612 break;
2613
0f7c29ce
PZ
2614 case 37: /* 32nm Westmere */
2615 case 44: /* 32nm Westmere-EP */
2616 case 47: /* 32nm Westmere-EX */
f22f54f4
PZ
2617 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2618 sizeof(hw_cache_event_ids));
e994d7d2
AK
2619 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2620 sizeof(hw_cache_extra_regs));
f22f54f4 2621
caff2bef
PZ
2622 intel_pmu_lbr_init_nhm();
2623
f22f54f4 2624 x86_pmu.event_constraints = intel_westmere_event_constraints;
40b91cd1 2625 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
17e31629 2626 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
a7e3ed1e 2627 x86_pmu.extra_regs = intel_westmere_extra_regs;
b79e8941 2628 x86_pmu.er_flags |= ERF_HAS_RSP_1;
30112039 2629
f20093ee
SE
2630 x86_pmu.cpu_events = nhm_events_attrs;
2631
30112039 2632 /* UOPS_ISSUED.STALLED_CYCLES */
f9b4eeb8
PZ
2633 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2634 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
30112039 2635 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
f9b4eeb8
PZ
2636 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2637 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
30112039 2638
f22f54f4
PZ
2639 pr_cont("Westmere events, ");
2640 break;
b622d644 2641
0f7c29ce
PZ
2642 case 42: /* 32nm SandyBridge */
2643 case 45: /* 32nm SandyBridge-E/EN/EP */
47a8863d 2644 x86_add_quirk(intel_sandybridge_quirk);
b06b3d49
LM
2645 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2646 sizeof(hw_cache_event_ids));
74e6543f
YZ
2647 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2648 sizeof(hw_cache_extra_regs));
b06b3d49 2649
c5cc2cd9 2650 intel_pmu_lbr_init_snb();
b06b3d49
LM
2651
2652 x86_pmu.event_constraints = intel_snb_event_constraints;
de0428a7 2653 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
0780c927 2654 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
f1923820
SE
2655 if (boot_cpu_data.x86_model == 45)
2656 x86_pmu.extra_regs = intel_snbep_extra_regs;
2657 else
2658 x86_pmu.extra_regs = intel_snb_extra_regs;
ee89cbc2 2659 /* all extra regs are per-cpu when HT is on */
b79e8941
PZ
2660 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2661 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
e04d1b23 2662
f20093ee
SE
2663 x86_pmu.cpu_events = snb_events_attrs;
2664
e04d1b23 2665 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
f9b4eeb8
PZ
2666 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2667 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
e04d1b23 2668 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
f9b4eeb8
PZ
2669 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2670 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
e04d1b23 2671
b06b3d49
LM
2672 pr_cont("SandyBridge events, ");
2673 break;
0f7c29ce
PZ
2674
2675 case 58: /* 22nm IvyBridge */
2676 case 62: /* 22nm IvyBridge-EP/EX */
20a36e39
SE
2677 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2678 sizeof(hw_cache_event_ids));
1996388e
VW
2679 /* dTLB-load-misses on IVB is different than SNB */
2680 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
2681
20a36e39
SE
2682 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2683 sizeof(hw_cache_extra_regs));
2684
2685 intel_pmu_lbr_init_snb();
2686
69943182 2687 x86_pmu.event_constraints = intel_ivb_event_constraints;
20a36e39
SE
2688 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
2689 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
f1923820
SE
2690 if (boot_cpu_data.x86_model == 62)
2691 x86_pmu.extra_regs = intel_snbep_extra_regs;
2692 else
2693 x86_pmu.extra_regs = intel_snb_extra_regs;
20a36e39
SE
2694 /* all extra regs are per-cpu when HT is on */
2695 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2696 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2697
f20093ee
SE
2698 x86_pmu.cpu_events = snb_events_attrs;
2699
20a36e39
SE
2700 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2701 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2702 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2703
2704 pr_cont("IvyBridge events, ");
2705 break;
2706
b06b3d49 2707
d86c8eaf
AK
2708 case 60: /* 22nm Haswell Core */
2709 case 63: /* 22nm Haswell Server */
2710 case 69: /* 22nm Haswell ULT */
2711 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
72db5596 2712 x86_pmu.late_ack = true;
0f1b5ca2
AK
2713 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2714 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
3a632cb2 2715
e9d7f7cd 2716 intel_pmu_lbr_init_hsw();
3a632cb2
AK
2717
2718 x86_pmu.event_constraints = intel_hsw_event_constraints;
3044318f 2719 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
36bbb2f2 2720 x86_pmu.extra_regs = intel_snbep_extra_regs;
3044318f 2721 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
3a632cb2
AK
2722 /* all extra regs are per-cpu when HT is on */
2723 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2724 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2725
2726 x86_pmu.hw_config = hsw_hw_config;
2727 x86_pmu.get_event_constraints = hsw_get_event_constraints;
f9134f36 2728 x86_pmu.cpu_events = hsw_events_attrs;
b7af41a1 2729 x86_pmu.lbr_double_abort = true;
3a632cb2
AK
2730 pr_cont("Haswell events, ");
2731 break;
2732
f22f54f4 2733 default:
0af3ac1f
AK
2734 switch (x86_pmu.version) {
2735 case 1:
2736 x86_pmu.event_constraints = intel_v1_event_constraints;
2737 pr_cont("generic architected perfmon v1, ");
2738 break;
2739 default:
2740 /*
2741 * default constraints for v2 and up
2742 */
2743 x86_pmu.event_constraints = intel_gen_event_constraints;
2744 pr_cont("generic architected perfmon, ");
2745 break;
2746 }
f22f54f4 2747 }
ffb871bc 2748
a1eac7ac
RR
2749 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
2750 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2751 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
2752 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
2753 }
2754 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
2755
2756 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
2757 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2758 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
2759 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
2760 }
2761
2762 x86_pmu.intel_ctrl |=
2763 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
2764
2765 if (x86_pmu.event_constraints) {
2766 /*
2767 * event on fixed counter2 (REF_CYCLES) only works on this
2768 * counter, so do not extend mask to generic counters
2769 */
2770 for_each_event_constraint(c, x86_pmu.event_constraints) {
3a632cb2 2771 if (c->cmask != FIXED_EVENT_FLAGS
a1eac7ac
RR
2772 || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
2773 continue;
2774 }
2775
2776 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
2777 c->weight += x86_pmu.num_counters;
2778 }
2779 }
2780
338b522c
KL
2781 /*
2782 * Access LBR MSR may cause #GP under certain circumstances.
2783 * E.g. KVM doesn't support LBR MSR
2784 * Check all LBT MSR here.
2785 * Disable LBR access if any LBR MSRs can not be accessed.
2786 */
2787 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
2788 x86_pmu.lbr_nr = 0;
2789 for (i = 0; i < x86_pmu.lbr_nr; i++) {
2790 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
2791 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
2792 x86_pmu.lbr_nr = 0;
2793 }
2794
2795 /*
2796 * Access extra MSR may cause #GP under certain circumstances.
2797 * E.g. KVM doesn't support offcore event
2798 * Check all extra_regs here.
2799 */
2800 if (x86_pmu.extra_regs) {
2801 for (er = x86_pmu.extra_regs; er->msr; er++) {
2802 er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
2803 /* Disable LBR select mapping */
2804 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
2805 x86_pmu.lbr_sel_map = NULL;
2806 }
2807 }
2808
069e0c3c
AK
2809 /* Support full width counters using alternative MSR range */
2810 if (x86_pmu.intel_cap.full_width_write) {
2811 x86_pmu.max_period = x86_pmu.cntval_mask;
2812 x86_pmu.perfctr = MSR_IA32_PMC0;
2813 pr_cont("full-width counters, ");
2814 }
2815
f22f54f4
PZ
2816 return 0;
2817}
This page took 0.344318 seconds and 5 git commands to generate.