perf: Fix vmalloc ring buffer pages handling
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_event_intel.c
CommitLineData
a7e3ed1e 1/*
efc9f05d
SE
2 * Per core/cpu state
3 *
4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
a7e3ed1e 6 */
de0428a7 7
c767a54b
JP
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
de0428a7
KW
10#include <linux/stddef.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/slab.h>
69c60c88 14#include <linux/export.h>
de0428a7
KW
15
16#include <asm/hardirq.h>
17#include <asm/apic.h>
18
19#include "perf_event.h"
a7e3ed1e 20
f22f54f4 21/*
b622d644 22 * Intel PerfMon, used on Core and later.
f22f54f4 23 */
ec75a716 24static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
f22f54f4 25{
c3b7cdf1
PE
26 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
27 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
28 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
29 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
30 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
31 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
32 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
33 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
f22f54f4
PZ
34};
35
5c543e3c 36static struct event_constraint intel_core_event_constraints[] __read_mostly =
f22f54f4
PZ
37{
38 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
39 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
40 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
41 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
42 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
43 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
44 EVENT_CONSTRAINT_END
45};
46
5c543e3c 47static struct event_constraint intel_core2_event_constraints[] __read_mostly =
f22f54f4 48{
b622d644
PZ
49 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
50 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 51 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
52 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
53 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
54 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
55 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
56 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
57 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
58 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
59 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
b622d644 60 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
f22f54f4
PZ
61 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
62 EVENT_CONSTRAINT_END
63};
64
5c543e3c 65static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
f22f54f4 66{
b622d644
PZ
67 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
68 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 69 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
70 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
71 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
72 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
73 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
74 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
75 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
76 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
77 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
78 EVENT_CONSTRAINT_END
79};
80
5c543e3c 81static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
a7e3ed1e 82{
efc9f05d 83 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
a7e3ed1e
AK
84 EVENT_EXTRA_END
85};
86
5c543e3c 87static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
f22f54f4 88{
b622d644
PZ
89 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
90 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 91 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
92 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
93 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
94 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
d1100770 95 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
f22f54f4
PZ
96 EVENT_CONSTRAINT_END
97};
98
5c543e3c 99static struct event_constraint intel_snb_event_constraints[] __read_mostly =
b06b3d49
LM
100{
101 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
102 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 103 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
fd4a5aef
SE
104 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
105 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
106 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
107 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
b06b3d49 108 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
b06b3d49
LM
109 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
110 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
111 EVENT_CONSTRAINT_END
112};
113
69943182
SE
114static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
115{
116 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
117 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
118 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
119 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
120 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
121 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
122 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
123 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
124 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
125 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
126 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
127 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
128 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
129 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
130 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
131 INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
132 EVENT_CONSTRAINT_END
133};
134
5c543e3c 135static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
a7e3ed1e 136{
efc9f05d
SE
137 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
138 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
a7e3ed1e
AK
139 EVENT_EXTRA_END
140};
141
0af3ac1f
AK
142static struct event_constraint intel_v1_event_constraints[] __read_mostly =
143{
144 EVENT_CONSTRAINT_END
145};
146
5c543e3c 147static struct event_constraint intel_gen_event_constraints[] __read_mostly =
f22f54f4 148{
b622d644
PZ
149 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
150 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 151 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
152 EVENT_CONSTRAINT_END
153};
154
ee89cbc2 155static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
f1923820
SE
156 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
157 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
158 EVENT_EXTRA_END
159};
160
161static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
162 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
163 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
ee89cbc2
SE
164 EVENT_EXTRA_END
165};
166
f22f54f4
PZ
167static u64 intel_pmu_event_map(int hw_event)
168{
169 return intel_perfmon_event_map[hw_event];
170}
171
74e6543f
YZ
172#define SNB_DMND_DATA_RD (1ULL << 0)
173#define SNB_DMND_RFO (1ULL << 1)
174#define SNB_DMND_IFETCH (1ULL << 2)
175#define SNB_DMND_WB (1ULL << 3)
176#define SNB_PF_DATA_RD (1ULL << 4)
177#define SNB_PF_RFO (1ULL << 5)
178#define SNB_PF_IFETCH (1ULL << 6)
179#define SNB_LLC_DATA_RD (1ULL << 7)
180#define SNB_LLC_RFO (1ULL << 8)
181#define SNB_LLC_IFETCH (1ULL << 9)
182#define SNB_BUS_LOCKS (1ULL << 10)
183#define SNB_STRM_ST (1ULL << 11)
184#define SNB_OTHER (1ULL << 15)
185#define SNB_RESP_ANY (1ULL << 16)
186#define SNB_NO_SUPP (1ULL << 17)
187#define SNB_LLC_HITM (1ULL << 18)
188#define SNB_LLC_HITE (1ULL << 19)
189#define SNB_LLC_HITS (1ULL << 20)
190#define SNB_LLC_HITF (1ULL << 21)
191#define SNB_LOCAL (1ULL << 22)
192#define SNB_REMOTE (0xffULL << 23)
193#define SNB_SNP_NONE (1ULL << 31)
194#define SNB_SNP_NOT_NEEDED (1ULL << 32)
195#define SNB_SNP_MISS (1ULL << 33)
196#define SNB_NO_FWD (1ULL << 34)
197#define SNB_SNP_FWD (1ULL << 35)
198#define SNB_HITM (1ULL << 36)
199#define SNB_NON_DRAM (1ULL << 37)
200
201#define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
202#define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
203#define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
204
205#define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
206 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
207 SNB_HITM)
208
209#define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
210#define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
211
212#define SNB_L3_ACCESS SNB_RESP_ANY
213#define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
214
215static __initconst const u64 snb_hw_cache_extra_regs
216 [PERF_COUNT_HW_CACHE_MAX]
217 [PERF_COUNT_HW_CACHE_OP_MAX]
218 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
219{
220 [ C(LL ) ] = {
221 [ C(OP_READ) ] = {
222 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
223 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
224 },
225 [ C(OP_WRITE) ] = {
226 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
227 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
228 },
229 [ C(OP_PREFETCH) ] = {
230 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
231 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
232 },
233 },
234 [ C(NODE) ] = {
235 [ C(OP_READ) ] = {
236 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
237 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
238 },
239 [ C(OP_WRITE) ] = {
240 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
241 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
242 },
243 [ C(OP_PREFETCH) ] = {
244 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
245 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
246 },
247 },
248};
249
b06b3d49
LM
250static __initconst const u64 snb_hw_cache_event_ids
251 [PERF_COUNT_HW_CACHE_MAX]
252 [PERF_COUNT_HW_CACHE_OP_MAX]
253 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
254{
255 [ C(L1D) ] = {
256 [ C(OP_READ) ] = {
257 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
258 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
259 },
260 [ C(OP_WRITE) ] = {
261 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
262 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
263 },
264 [ C(OP_PREFETCH) ] = {
265 [ C(RESULT_ACCESS) ] = 0x0,
266 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
267 },
268 },
269 [ C(L1I ) ] = {
270 [ C(OP_READ) ] = {
271 [ C(RESULT_ACCESS) ] = 0x0,
272 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
273 },
274 [ C(OP_WRITE) ] = {
275 [ C(RESULT_ACCESS) ] = -1,
276 [ C(RESULT_MISS) ] = -1,
277 },
278 [ C(OP_PREFETCH) ] = {
279 [ C(RESULT_ACCESS) ] = 0x0,
280 [ C(RESULT_MISS) ] = 0x0,
281 },
282 },
283 [ C(LL ) ] = {
b06b3d49 284 [ C(OP_READ) ] = {
63b6a675 285 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
b06b3d49 286 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
287 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
288 [ C(RESULT_MISS) ] = 0x01b7,
b06b3d49
LM
289 },
290 [ C(OP_WRITE) ] = {
63b6a675 291 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
b06b3d49 292 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
293 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
294 [ C(RESULT_MISS) ] = 0x01b7,
b06b3d49
LM
295 },
296 [ C(OP_PREFETCH) ] = {
63b6a675 297 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
b06b3d49 298 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
299 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
300 [ C(RESULT_MISS) ] = 0x01b7,
b06b3d49
LM
301 },
302 },
303 [ C(DTLB) ] = {
304 [ C(OP_READ) ] = {
305 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
306 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
307 },
308 [ C(OP_WRITE) ] = {
309 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
310 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
311 },
312 [ C(OP_PREFETCH) ] = {
313 [ C(RESULT_ACCESS) ] = 0x0,
314 [ C(RESULT_MISS) ] = 0x0,
315 },
316 },
317 [ C(ITLB) ] = {
318 [ C(OP_READ) ] = {
319 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
320 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
321 },
322 [ C(OP_WRITE) ] = {
323 [ C(RESULT_ACCESS) ] = -1,
324 [ C(RESULT_MISS) ] = -1,
325 },
326 [ C(OP_PREFETCH) ] = {
327 [ C(RESULT_ACCESS) ] = -1,
328 [ C(RESULT_MISS) ] = -1,
329 },
330 },
331 [ C(BPU ) ] = {
332 [ C(OP_READ) ] = {
333 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
334 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
335 },
336 [ C(OP_WRITE) ] = {
337 [ C(RESULT_ACCESS) ] = -1,
338 [ C(RESULT_MISS) ] = -1,
339 },
340 [ C(OP_PREFETCH) ] = {
341 [ C(RESULT_ACCESS) ] = -1,
342 [ C(RESULT_MISS) ] = -1,
343 },
344 },
89d6c0b5
PZ
345 [ C(NODE) ] = {
346 [ C(OP_READ) ] = {
74e6543f
YZ
347 [ C(RESULT_ACCESS) ] = 0x01b7,
348 [ C(RESULT_MISS) ] = 0x01b7,
89d6c0b5
PZ
349 },
350 [ C(OP_WRITE) ] = {
74e6543f
YZ
351 [ C(RESULT_ACCESS) ] = 0x01b7,
352 [ C(RESULT_MISS) ] = 0x01b7,
89d6c0b5
PZ
353 },
354 [ C(OP_PREFETCH) ] = {
74e6543f
YZ
355 [ C(RESULT_ACCESS) ] = 0x01b7,
356 [ C(RESULT_MISS) ] = 0x01b7,
89d6c0b5
PZ
357 },
358 },
359
b06b3d49
LM
360};
361
caaa8be3 362static __initconst const u64 westmere_hw_cache_event_ids
f22f54f4
PZ
363 [PERF_COUNT_HW_CACHE_MAX]
364 [PERF_COUNT_HW_CACHE_OP_MAX]
365 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
366{
367 [ C(L1D) ] = {
368 [ C(OP_READ) ] = {
369 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
370 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
371 },
372 [ C(OP_WRITE) ] = {
373 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
374 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
375 },
376 [ C(OP_PREFETCH) ] = {
377 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
378 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
379 },
380 },
381 [ C(L1I ) ] = {
382 [ C(OP_READ) ] = {
383 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
384 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
385 },
386 [ C(OP_WRITE) ] = {
387 [ C(RESULT_ACCESS) ] = -1,
388 [ C(RESULT_MISS) ] = -1,
389 },
390 [ C(OP_PREFETCH) ] = {
391 [ C(RESULT_ACCESS) ] = 0x0,
392 [ C(RESULT_MISS) ] = 0x0,
393 },
394 },
395 [ C(LL ) ] = {
396 [ C(OP_READ) ] = {
63b6a675 397 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
e994d7d2 398 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
399 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
400 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4 401 },
e994d7d2
AK
402 /*
403 * Use RFO, not WRITEBACK, because a write miss would typically occur
404 * on RFO.
405 */
f22f54f4 406 [ C(OP_WRITE) ] = {
63b6a675
PZ
407 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
408 [ C(RESULT_ACCESS) ] = 0x01b7,
409 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
e994d7d2 410 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
411 },
412 [ C(OP_PREFETCH) ] = {
63b6a675 413 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
e994d7d2 414 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
415 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
416 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
417 },
418 },
419 [ C(DTLB) ] = {
420 [ C(OP_READ) ] = {
421 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
422 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
423 },
424 [ C(OP_WRITE) ] = {
425 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
426 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
427 },
428 [ C(OP_PREFETCH) ] = {
429 [ C(RESULT_ACCESS) ] = 0x0,
430 [ C(RESULT_MISS) ] = 0x0,
431 },
432 },
433 [ C(ITLB) ] = {
434 [ C(OP_READ) ] = {
435 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
436 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
437 },
438 [ C(OP_WRITE) ] = {
439 [ C(RESULT_ACCESS) ] = -1,
440 [ C(RESULT_MISS) ] = -1,
441 },
442 [ C(OP_PREFETCH) ] = {
443 [ C(RESULT_ACCESS) ] = -1,
444 [ C(RESULT_MISS) ] = -1,
445 },
446 },
447 [ C(BPU ) ] = {
448 [ C(OP_READ) ] = {
449 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
450 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
451 },
452 [ C(OP_WRITE) ] = {
453 [ C(RESULT_ACCESS) ] = -1,
454 [ C(RESULT_MISS) ] = -1,
455 },
456 [ C(OP_PREFETCH) ] = {
457 [ C(RESULT_ACCESS) ] = -1,
458 [ C(RESULT_MISS) ] = -1,
459 },
460 },
89d6c0b5
PZ
461 [ C(NODE) ] = {
462 [ C(OP_READ) ] = {
463 [ C(RESULT_ACCESS) ] = 0x01b7,
464 [ C(RESULT_MISS) ] = 0x01b7,
465 },
466 [ C(OP_WRITE) ] = {
467 [ C(RESULT_ACCESS) ] = 0x01b7,
468 [ C(RESULT_MISS) ] = 0x01b7,
469 },
470 [ C(OP_PREFETCH) ] = {
471 [ C(RESULT_ACCESS) ] = 0x01b7,
472 [ C(RESULT_MISS) ] = 0x01b7,
473 },
474 },
f22f54f4
PZ
475};
476
e994d7d2 477/*
63b6a675
PZ
478 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
479 * See IA32 SDM Vol 3B 30.6.1.3
e994d7d2
AK
480 */
481
63b6a675
PZ
482#define NHM_DMND_DATA_RD (1 << 0)
483#define NHM_DMND_RFO (1 << 1)
484#define NHM_DMND_IFETCH (1 << 2)
485#define NHM_DMND_WB (1 << 3)
486#define NHM_PF_DATA_RD (1 << 4)
487#define NHM_PF_DATA_RFO (1 << 5)
488#define NHM_PF_IFETCH (1 << 6)
489#define NHM_OFFCORE_OTHER (1 << 7)
490#define NHM_UNCORE_HIT (1 << 8)
491#define NHM_OTHER_CORE_HIT_SNP (1 << 9)
492#define NHM_OTHER_CORE_HITM (1 << 10)
493 /* reserved */
494#define NHM_REMOTE_CACHE_FWD (1 << 12)
495#define NHM_REMOTE_DRAM (1 << 13)
496#define NHM_LOCAL_DRAM (1 << 14)
497#define NHM_NON_DRAM (1 << 15)
498
87e24f4b
PZ
499#define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
500#define NHM_REMOTE (NHM_REMOTE_DRAM)
63b6a675
PZ
501
502#define NHM_DMND_READ (NHM_DMND_DATA_RD)
503#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
504#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
505
506#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
87e24f4b 507#define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
63b6a675 508#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
e994d7d2
AK
509
510static __initconst const u64 nehalem_hw_cache_extra_regs
511 [PERF_COUNT_HW_CACHE_MAX]
512 [PERF_COUNT_HW_CACHE_OP_MAX]
513 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
514{
515 [ C(LL ) ] = {
516 [ C(OP_READ) ] = {
63b6a675
PZ
517 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
518 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
e994d7d2
AK
519 },
520 [ C(OP_WRITE) ] = {
63b6a675
PZ
521 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
522 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
e994d7d2
AK
523 },
524 [ C(OP_PREFETCH) ] = {
63b6a675
PZ
525 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
526 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
e994d7d2 527 },
89d6c0b5
PZ
528 },
529 [ C(NODE) ] = {
530 [ C(OP_READ) ] = {
87e24f4b
PZ
531 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
532 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
89d6c0b5
PZ
533 },
534 [ C(OP_WRITE) ] = {
87e24f4b
PZ
535 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
536 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
89d6c0b5
PZ
537 },
538 [ C(OP_PREFETCH) ] = {
87e24f4b
PZ
539 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
540 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
89d6c0b5
PZ
541 },
542 },
e994d7d2
AK
543};
544
caaa8be3 545static __initconst const u64 nehalem_hw_cache_event_ids
f22f54f4
PZ
546 [PERF_COUNT_HW_CACHE_MAX]
547 [PERF_COUNT_HW_CACHE_OP_MAX]
548 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
549{
550 [ C(L1D) ] = {
551 [ C(OP_READ) ] = {
f4929bd3
PZ
552 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
553 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
f22f54f4
PZ
554 },
555 [ C(OP_WRITE) ] = {
f4929bd3
PZ
556 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
557 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
f22f54f4
PZ
558 },
559 [ C(OP_PREFETCH) ] = {
560 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
561 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
562 },
563 },
564 [ C(L1I ) ] = {
565 [ C(OP_READ) ] = {
566 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
567 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
568 },
569 [ C(OP_WRITE) ] = {
570 [ C(RESULT_ACCESS) ] = -1,
571 [ C(RESULT_MISS) ] = -1,
572 },
573 [ C(OP_PREFETCH) ] = {
574 [ C(RESULT_ACCESS) ] = 0x0,
575 [ C(RESULT_MISS) ] = 0x0,
576 },
577 },
578 [ C(LL ) ] = {
579 [ C(OP_READ) ] = {
e994d7d2
AK
580 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
581 [ C(RESULT_ACCESS) ] = 0x01b7,
582 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
583 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4 584 },
e994d7d2
AK
585 /*
586 * Use RFO, not WRITEBACK, because a write miss would typically occur
587 * on RFO.
588 */
f22f54f4 589 [ C(OP_WRITE) ] = {
e994d7d2
AK
590 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
591 [ C(RESULT_ACCESS) ] = 0x01b7,
592 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
593 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
594 },
595 [ C(OP_PREFETCH) ] = {
e994d7d2
AK
596 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
597 [ C(RESULT_ACCESS) ] = 0x01b7,
598 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
599 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
600 },
601 },
602 [ C(DTLB) ] = {
603 [ C(OP_READ) ] = {
604 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
605 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
606 },
607 [ C(OP_WRITE) ] = {
608 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
609 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
610 },
611 [ C(OP_PREFETCH) ] = {
612 [ C(RESULT_ACCESS) ] = 0x0,
613 [ C(RESULT_MISS) ] = 0x0,
614 },
615 },
616 [ C(ITLB) ] = {
617 [ C(OP_READ) ] = {
618 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
619 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
620 },
621 [ C(OP_WRITE) ] = {
622 [ C(RESULT_ACCESS) ] = -1,
623 [ C(RESULT_MISS) ] = -1,
624 },
625 [ C(OP_PREFETCH) ] = {
626 [ C(RESULT_ACCESS) ] = -1,
627 [ C(RESULT_MISS) ] = -1,
628 },
629 },
630 [ C(BPU ) ] = {
631 [ C(OP_READ) ] = {
632 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
633 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
634 },
635 [ C(OP_WRITE) ] = {
636 [ C(RESULT_ACCESS) ] = -1,
637 [ C(RESULT_MISS) ] = -1,
638 },
639 [ C(OP_PREFETCH) ] = {
640 [ C(RESULT_ACCESS) ] = -1,
641 [ C(RESULT_MISS) ] = -1,
642 },
643 },
89d6c0b5
PZ
644 [ C(NODE) ] = {
645 [ C(OP_READ) ] = {
646 [ C(RESULT_ACCESS) ] = 0x01b7,
647 [ C(RESULT_MISS) ] = 0x01b7,
648 },
649 [ C(OP_WRITE) ] = {
650 [ C(RESULT_ACCESS) ] = 0x01b7,
651 [ C(RESULT_MISS) ] = 0x01b7,
652 },
653 [ C(OP_PREFETCH) ] = {
654 [ C(RESULT_ACCESS) ] = 0x01b7,
655 [ C(RESULT_MISS) ] = 0x01b7,
656 },
657 },
f22f54f4
PZ
658};
659
caaa8be3 660static __initconst const u64 core2_hw_cache_event_ids
f22f54f4
PZ
661 [PERF_COUNT_HW_CACHE_MAX]
662 [PERF_COUNT_HW_CACHE_OP_MAX]
663 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
664{
665 [ C(L1D) ] = {
666 [ C(OP_READ) ] = {
667 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
668 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
669 },
670 [ C(OP_WRITE) ] = {
671 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
672 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
673 },
674 [ C(OP_PREFETCH) ] = {
675 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
676 [ C(RESULT_MISS) ] = 0,
677 },
678 },
679 [ C(L1I ) ] = {
680 [ C(OP_READ) ] = {
681 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
682 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
683 },
684 [ C(OP_WRITE) ] = {
685 [ C(RESULT_ACCESS) ] = -1,
686 [ C(RESULT_MISS) ] = -1,
687 },
688 [ C(OP_PREFETCH) ] = {
689 [ C(RESULT_ACCESS) ] = 0,
690 [ C(RESULT_MISS) ] = 0,
691 },
692 },
693 [ C(LL ) ] = {
694 [ C(OP_READ) ] = {
695 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
696 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
697 },
698 [ C(OP_WRITE) ] = {
699 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
700 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
701 },
702 [ C(OP_PREFETCH) ] = {
703 [ C(RESULT_ACCESS) ] = 0,
704 [ C(RESULT_MISS) ] = 0,
705 },
706 },
707 [ C(DTLB) ] = {
708 [ C(OP_READ) ] = {
709 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
710 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
711 },
712 [ C(OP_WRITE) ] = {
713 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
714 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
715 },
716 [ C(OP_PREFETCH) ] = {
717 [ C(RESULT_ACCESS) ] = 0,
718 [ C(RESULT_MISS) ] = 0,
719 },
720 },
721 [ C(ITLB) ] = {
722 [ C(OP_READ) ] = {
723 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
724 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
725 },
726 [ C(OP_WRITE) ] = {
727 [ C(RESULT_ACCESS) ] = -1,
728 [ C(RESULT_MISS) ] = -1,
729 },
730 [ C(OP_PREFETCH) ] = {
731 [ C(RESULT_ACCESS) ] = -1,
732 [ C(RESULT_MISS) ] = -1,
733 },
734 },
735 [ C(BPU ) ] = {
736 [ C(OP_READ) ] = {
737 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
738 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
739 },
740 [ C(OP_WRITE) ] = {
741 [ C(RESULT_ACCESS) ] = -1,
742 [ C(RESULT_MISS) ] = -1,
743 },
744 [ C(OP_PREFETCH) ] = {
745 [ C(RESULT_ACCESS) ] = -1,
746 [ C(RESULT_MISS) ] = -1,
747 },
748 },
749};
750
caaa8be3 751static __initconst const u64 atom_hw_cache_event_ids
f22f54f4
PZ
752 [PERF_COUNT_HW_CACHE_MAX]
753 [PERF_COUNT_HW_CACHE_OP_MAX]
754 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
755{
756 [ C(L1D) ] = {
757 [ C(OP_READ) ] = {
758 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
759 [ C(RESULT_MISS) ] = 0,
760 },
761 [ C(OP_WRITE) ] = {
762 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
763 [ C(RESULT_MISS) ] = 0,
764 },
765 [ C(OP_PREFETCH) ] = {
766 [ C(RESULT_ACCESS) ] = 0x0,
767 [ C(RESULT_MISS) ] = 0,
768 },
769 },
770 [ C(L1I ) ] = {
771 [ C(OP_READ) ] = {
772 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
773 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
774 },
775 [ C(OP_WRITE) ] = {
776 [ C(RESULT_ACCESS) ] = -1,
777 [ C(RESULT_MISS) ] = -1,
778 },
779 [ C(OP_PREFETCH) ] = {
780 [ C(RESULT_ACCESS) ] = 0,
781 [ C(RESULT_MISS) ] = 0,
782 },
783 },
784 [ C(LL ) ] = {
785 [ C(OP_READ) ] = {
786 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
787 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
788 },
789 [ C(OP_WRITE) ] = {
790 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
791 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
792 },
793 [ C(OP_PREFETCH) ] = {
794 [ C(RESULT_ACCESS) ] = 0,
795 [ C(RESULT_MISS) ] = 0,
796 },
797 },
798 [ C(DTLB) ] = {
799 [ C(OP_READ) ] = {
800 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
801 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
802 },
803 [ C(OP_WRITE) ] = {
804 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
805 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
806 },
807 [ C(OP_PREFETCH) ] = {
808 [ C(RESULT_ACCESS) ] = 0,
809 [ C(RESULT_MISS) ] = 0,
810 },
811 },
812 [ C(ITLB) ] = {
813 [ C(OP_READ) ] = {
814 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
815 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
816 },
817 [ C(OP_WRITE) ] = {
818 [ C(RESULT_ACCESS) ] = -1,
819 [ C(RESULT_MISS) ] = -1,
820 },
821 [ C(OP_PREFETCH) ] = {
822 [ C(RESULT_ACCESS) ] = -1,
823 [ C(RESULT_MISS) ] = -1,
824 },
825 },
826 [ C(BPU ) ] = {
827 [ C(OP_READ) ] = {
828 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
829 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
830 },
831 [ C(OP_WRITE) ] = {
832 [ C(RESULT_ACCESS) ] = -1,
833 [ C(RESULT_MISS) ] = -1,
834 },
835 [ C(OP_PREFETCH) ] = {
836 [ C(RESULT_ACCESS) ] = -1,
837 [ C(RESULT_MISS) ] = -1,
838 },
839 },
840};
841
60ce0fbd
SE
842static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
843{
844 /* user explicitly requested branch sampling */
845 if (has_branch_stack(event))
846 return true;
847
848 /* implicit branch sampling to correct PEBS skid */
849 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
850 return true;
851
852 return false;
853}
854
f22f54f4
PZ
855static void intel_pmu_disable_all(void)
856{
857 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
858
859 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
860
15c7ad51 861 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
f22f54f4 862 intel_pmu_disable_bts();
ca037701
PZ
863
864 intel_pmu_pebs_disable_all();
caff2bef 865 intel_pmu_lbr_disable_all();
f22f54f4
PZ
866}
867
11164cd4 868static void intel_pmu_enable_all(int added)
f22f54f4
PZ
869{
870 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
871
d329527e
PZ
872 intel_pmu_pebs_enable_all();
873 intel_pmu_lbr_enable_all();
144d31e6
GN
874 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
875 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
f22f54f4 876
15c7ad51 877 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
f22f54f4 878 struct perf_event *event =
15c7ad51 879 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
f22f54f4
PZ
880
881 if (WARN_ON_ONCE(!event))
882 return;
883
884 intel_pmu_enable_bts(event->hw.config);
885 }
886}
887
11164cd4
PZ
888/*
889 * Workaround for:
890 * Intel Errata AAK100 (model 26)
891 * Intel Errata AAP53 (model 30)
40b91cd1 892 * Intel Errata BD53 (model 44)
11164cd4 893 *
351af072
ZY
894 * The official story:
895 * These chips need to be 'reset' when adding counters by programming the
896 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
897 * in sequence on the same PMC or on different PMCs.
898 *
899 * In practise it appears some of these events do in fact count, and
900 * we need to programm all 4 events.
11164cd4 901 */
351af072 902static void intel_pmu_nhm_workaround(void)
11164cd4 903{
351af072
ZY
904 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
905 static const unsigned long nhm_magic[4] = {
906 0x4300B5,
907 0x4300D2,
908 0x4300B1,
909 0x4300B1
910 };
911 struct perf_event *event;
912 int i;
11164cd4 913
351af072
ZY
914 /*
915 * The Errata requires below steps:
916 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
917 * 2) Configure 4 PERFEVTSELx with the magic events and clear
918 * the corresponding PMCx;
919 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
920 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
921 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
922 */
11164cd4 923
351af072
ZY
924 /*
925 * The real steps we choose are a little different from above.
926 * A) To reduce MSR operations, we don't run step 1) as they
927 * are already cleared before this function is called;
928 * B) Call x86_perf_event_update to save PMCx before configuring
929 * PERFEVTSELx with magic number;
930 * C) With step 5), we do clear only when the PERFEVTSELx is
931 * not used currently.
932 * D) Call x86_perf_event_set_period to restore PMCx;
933 */
11164cd4 934
351af072
ZY
935 /* We always operate 4 pairs of PERF Counters */
936 for (i = 0; i < 4; i++) {
937 event = cpuc->events[i];
938 if (event)
939 x86_perf_event_update(event);
940 }
11164cd4 941
351af072
ZY
942 for (i = 0; i < 4; i++) {
943 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
944 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
945 }
946
947 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
948 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
11164cd4 949
351af072
ZY
950 for (i = 0; i < 4; i++) {
951 event = cpuc->events[i];
952
953 if (event) {
954 x86_perf_event_set_period(event);
31fa58af 955 __x86_pmu_enable_event(&event->hw,
351af072
ZY
956 ARCH_PERFMON_EVENTSEL_ENABLE);
957 } else
958 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
11164cd4 959 }
351af072
ZY
960}
961
962static void intel_pmu_nhm_enable_all(int added)
963{
964 if (added)
965 intel_pmu_nhm_workaround();
11164cd4
PZ
966 intel_pmu_enable_all(added);
967}
968
f22f54f4
PZ
969static inline u64 intel_pmu_get_status(void)
970{
971 u64 status;
972
973 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
974
975 return status;
976}
977
978static inline void intel_pmu_ack_status(u64 ack)
979{
980 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
981}
982
ca037701 983static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
f22f54f4 984{
15c7ad51 985 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
f22f54f4
PZ
986 u64 ctrl_val, mask;
987
988 mask = 0xfULL << (idx * 4);
989
990 rdmsrl(hwc->config_base, ctrl_val);
991 ctrl_val &= ~mask;
7645a24c 992 wrmsrl(hwc->config_base, ctrl_val);
f22f54f4
PZ
993}
994
ca037701 995static void intel_pmu_disable_event(struct perf_event *event)
f22f54f4 996{
aff3d91a 997 struct hw_perf_event *hwc = &event->hw;
144d31e6 998 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
aff3d91a 999
15c7ad51 1000 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
f22f54f4
PZ
1001 intel_pmu_disable_bts();
1002 intel_pmu_drain_bts_buffer();
1003 return;
1004 }
1005
144d31e6
GN
1006 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
1007 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
1008
60ce0fbd
SE
1009 /*
1010 * must disable before any actual event
1011 * because any event may be combined with LBR
1012 */
1013 if (intel_pmu_needs_lbr_smpl(event))
1014 intel_pmu_lbr_disable(event);
1015
f22f54f4 1016 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
aff3d91a 1017 intel_pmu_disable_fixed(hwc);
f22f54f4
PZ
1018 return;
1019 }
1020
aff3d91a 1021 x86_pmu_disable_event(event);
ca037701 1022
ab608344 1023 if (unlikely(event->attr.precise_ip))
ef21f683 1024 intel_pmu_pebs_disable(event);
f22f54f4
PZ
1025}
1026
ca037701 1027static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
f22f54f4 1028{
15c7ad51 1029 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
f22f54f4 1030 u64 ctrl_val, bits, mask;
f22f54f4
PZ
1031
1032 /*
1033 * Enable IRQ generation (0x8),
1034 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1035 * if requested:
1036 */
1037 bits = 0x8ULL;
1038 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1039 bits |= 0x2;
1040 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1041 bits |= 0x1;
1042
1043 /*
1044 * ANY bit is supported in v3 and up
1045 */
1046 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1047 bits |= 0x4;
1048
1049 bits <<= (idx * 4);
1050 mask = 0xfULL << (idx * 4);
1051
1052 rdmsrl(hwc->config_base, ctrl_val);
1053 ctrl_val &= ~mask;
1054 ctrl_val |= bits;
7645a24c 1055 wrmsrl(hwc->config_base, ctrl_val);
f22f54f4
PZ
1056}
1057
aff3d91a 1058static void intel_pmu_enable_event(struct perf_event *event)
f22f54f4 1059{
aff3d91a 1060 struct hw_perf_event *hwc = &event->hw;
144d31e6 1061 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
aff3d91a 1062
15c7ad51 1063 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
0a3aee0d 1064 if (!__this_cpu_read(cpu_hw_events.enabled))
f22f54f4
PZ
1065 return;
1066
1067 intel_pmu_enable_bts(hwc->config);
1068 return;
1069 }
60ce0fbd
SE
1070 /*
1071 * must enabled before any actual event
1072 * because any event may be combined with LBR
1073 */
1074 if (intel_pmu_needs_lbr_smpl(event))
1075 intel_pmu_lbr_enable(event);
f22f54f4 1076
144d31e6
GN
1077 if (event->attr.exclude_host)
1078 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
1079 if (event->attr.exclude_guest)
1080 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
1081
f22f54f4 1082 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
aff3d91a 1083 intel_pmu_enable_fixed(hwc);
f22f54f4
PZ
1084 return;
1085 }
1086
ab608344 1087 if (unlikely(event->attr.precise_ip))
ef21f683 1088 intel_pmu_pebs_enable(event);
ca037701 1089
31fa58af 1090 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
f22f54f4
PZ
1091}
1092
1093/*
1094 * Save and restart an expired event. Called by NMI contexts,
1095 * so it has to be careful about preempting normal event ops:
1096 */
de0428a7 1097int intel_pmu_save_and_restart(struct perf_event *event)
f22f54f4 1098{
cc2ad4ba
PZ
1099 x86_perf_event_update(event);
1100 return x86_perf_event_set_period(event);
f22f54f4
PZ
1101}
1102
1103static void intel_pmu_reset(void)
1104{
0a3aee0d 1105 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
f22f54f4
PZ
1106 unsigned long flags;
1107 int idx;
1108
948b1bb8 1109 if (!x86_pmu.num_counters)
f22f54f4
PZ
1110 return;
1111
1112 local_irq_save(flags);
1113
c767a54b 1114 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
f22f54f4 1115
948b1bb8 1116 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
715c85b1
PA
1117 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
1118 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
f22f54f4 1119 }
948b1bb8 1120 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
715c85b1 1121 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
948b1bb8 1122
f22f54f4
PZ
1123 if (ds)
1124 ds->bts_index = ds->bts_buffer_base;
1125
1126 local_irq_restore(flags);
1127}
1128
1129/*
1130 * This handler is triggered by the local APIC, so the APIC IRQ handling
1131 * rules apply:
1132 */
1133static int intel_pmu_handle_irq(struct pt_regs *regs)
1134{
1135 struct perf_sample_data data;
1136 struct cpu_hw_events *cpuc;
1137 int bit, loops;
2e556b5b 1138 u64 status;
b0b2072d 1139 int handled;
f22f54f4 1140
f22f54f4
PZ
1141 cpuc = &__get_cpu_var(cpu_hw_events);
1142
2bce5dac
DZ
1143 /*
1144 * Some chipsets need to unmask the LVTPC in a particular spot
1145 * inside the nmi handler. As a result, the unmasking was pushed
1146 * into all the nmi handlers.
1147 *
1148 * This handler doesn't seem to have any issues with the unmasking
1149 * so it was left at the top.
1150 */
1151 apic_write(APIC_LVTPC, APIC_DM_NMI);
1152
3fb2b8dd 1153 intel_pmu_disable_all();
b0b2072d 1154 handled = intel_pmu_drain_bts_buffer();
f22f54f4
PZ
1155 status = intel_pmu_get_status();
1156 if (!status) {
11164cd4 1157 intel_pmu_enable_all(0);
b0b2072d 1158 return handled;
f22f54f4
PZ
1159 }
1160
1161 loops = 0;
1162again:
2e556b5b 1163 intel_pmu_ack_status(status);
f22f54f4
PZ
1164 if (++loops > 100) {
1165 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1166 perf_event_print_debug();
1167 intel_pmu_reset();
3fb2b8dd 1168 goto done;
f22f54f4
PZ
1169 }
1170
1171 inc_irq_stat(apic_perf_irqs);
ca037701 1172
caff2bef
PZ
1173 intel_pmu_lbr_read();
1174
ca037701
PZ
1175 /*
1176 * PEBS overflow sets bit 62 in the global status register
1177 */
de725dec
PZ
1178 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1179 handled++;
ca037701 1180 x86_pmu.drain_pebs(regs);
de725dec 1181 }
ca037701 1182
984b3f57 1183 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
f22f54f4
PZ
1184 struct perf_event *event = cpuc->events[bit];
1185
de725dec
PZ
1186 handled++;
1187
f22f54f4
PZ
1188 if (!test_bit(bit, cpuc->active_mask))
1189 continue;
1190
1191 if (!intel_pmu_save_and_restart(event))
1192 continue;
1193
fd0d000b 1194 perf_sample_data_init(&data, 0, event->hw.last_period);
f22f54f4 1195
60ce0fbd
SE
1196 if (has_branch_stack(event))
1197 data.br_stack = &cpuc->lbr_stack;
1198
a8b0ca17 1199 if (perf_event_overflow(event, &data, regs))
a4eaf7f1 1200 x86_pmu_stop(event, 0);
f22f54f4
PZ
1201 }
1202
f22f54f4
PZ
1203 /*
1204 * Repeat if there is more work to be done:
1205 */
1206 status = intel_pmu_get_status();
1207 if (status)
1208 goto again;
1209
3fb2b8dd 1210done:
11164cd4 1211 intel_pmu_enable_all(0);
de725dec 1212 return handled;
f22f54f4
PZ
1213}
1214
f22f54f4 1215static struct event_constraint *
ca037701 1216intel_bts_constraints(struct perf_event *event)
f22f54f4 1217{
ca037701
PZ
1218 struct hw_perf_event *hwc = &event->hw;
1219 unsigned int hw_event, bts_event;
f22f54f4 1220
18a073a3
PZ
1221 if (event->attr.freq)
1222 return NULL;
1223
ca037701
PZ
1224 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1225 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
f22f54f4 1226
ca037701 1227 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
f22f54f4 1228 return &bts_constraint;
ca037701 1229
f22f54f4
PZ
1230 return NULL;
1231}
1232
5a425294 1233static int intel_alt_er(int idx)
b79e8941
PZ
1234{
1235 if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
5a425294 1236 return idx;
b79e8941 1237
5a425294
PZ
1238 if (idx == EXTRA_REG_RSP_0)
1239 return EXTRA_REG_RSP_1;
1240
1241 if (idx == EXTRA_REG_RSP_1)
1242 return EXTRA_REG_RSP_0;
1243
1244 return idx;
1245}
1246
1247static void intel_fixup_er(struct perf_event *event, int idx)
1248{
1249 event->hw.extra_reg.idx = idx;
1250
1251 if (idx == EXTRA_REG_RSP_0) {
b79e8941
PZ
1252 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1253 event->hw.config |= 0x01b7;
b79e8941 1254 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
5a425294
PZ
1255 } else if (idx == EXTRA_REG_RSP_1) {
1256 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1257 event->hw.config |= 0x01bb;
1258 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
b79e8941 1259 }
b79e8941
PZ
1260}
1261
efc9f05d
SE
1262/*
1263 * manage allocation of shared extra msr for certain events
1264 *
1265 * sharing can be:
1266 * per-cpu: to be shared between the various events on a single PMU
1267 * per-core: per-cpu + shared by HT threads
1268 */
a7e3ed1e 1269static struct event_constraint *
efc9f05d 1270__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
b36817e8
SE
1271 struct perf_event *event,
1272 struct hw_perf_event_extra *reg)
a7e3ed1e 1273{
efc9f05d 1274 struct event_constraint *c = &emptyconstraint;
a7e3ed1e 1275 struct er_account *era;
cd8a38d3 1276 unsigned long flags;
5a425294 1277 int idx = reg->idx;
a7e3ed1e 1278
5a425294
PZ
1279 /*
1280 * reg->alloc can be set due to existing state, so for fake cpuc we
1281 * need to ignore this, otherwise we might fail to allocate proper fake
1282 * state for this extra reg constraint. Also see the comment below.
1283 */
1284 if (reg->alloc && !cpuc->is_fake)
b36817e8 1285 return NULL; /* call x86_get_event_constraint() */
a7e3ed1e 1286
b79e8941 1287again:
5a425294 1288 era = &cpuc->shared_regs->regs[idx];
cd8a38d3
SE
1289 /*
1290 * we use spin_lock_irqsave() to avoid lockdep issues when
1291 * passing a fake cpuc
1292 */
1293 raw_spin_lock_irqsave(&era->lock, flags);
efc9f05d
SE
1294
1295 if (!atomic_read(&era->ref) || era->config == reg->config) {
1296
5a425294
PZ
1297 /*
1298 * If its a fake cpuc -- as per validate_{group,event}() we
1299 * shouldn't touch event state and we can avoid doing so
1300 * since both will only call get_event_constraints() once
1301 * on each event, this avoids the need for reg->alloc.
1302 *
1303 * Not doing the ER fixup will only result in era->reg being
1304 * wrong, but since we won't actually try and program hardware
1305 * this isn't a problem either.
1306 */
1307 if (!cpuc->is_fake) {
1308 if (idx != reg->idx)
1309 intel_fixup_er(event, idx);
1310
1311 /*
1312 * x86_schedule_events() can call get_event_constraints()
1313 * multiple times on events in the case of incremental
1314 * scheduling(). reg->alloc ensures we only do the ER
1315 * allocation once.
1316 */
1317 reg->alloc = 1;
1318 }
1319
efc9f05d
SE
1320 /* lock in msr value */
1321 era->config = reg->config;
1322 era->reg = reg->reg;
1323
1324 /* one more user */
1325 atomic_inc(&era->ref);
1326
a7e3ed1e 1327 /*
b36817e8
SE
1328 * need to call x86_get_event_constraint()
1329 * to check if associated event has constraints
a7e3ed1e 1330 */
b36817e8 1331 c = NULL;
5a425294
PZ
1332 } else {
1333 idx = intel_alt_er(idx);
1334 if (idx != reg->idx) {
1335 raw_spin_unlock_irqrestore(&era->lock, flags);
1336 goto again;
1337 }
a7e3ed1e 1338 }
cd8a38d3 1339 raw_spin_unlock_irqrestore(&era->lock, flags);
a7e3ed1e 1340
efc9f05d
SE
1341 return c;
1342}
1343
1344static void
1345__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1346 struct hw_perf_event_extra *reg)
1347{
1348 struct er_account *era;
1349
1350 /*
5a425294
PZ
1351 * Only put constraint if extra reg was actually allocated. Also takes
1352 * care of event which do not use an extra shared reg.
1353 *
1354 * Also, if this is a fake cpuc we shouldn't touch any event state
1355 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1356 * either since it'll be thrown out.
efc9f05d 1357 */
5a425294 1358 if (!reg->alloc || cpuc->is_fake)
efc9f05d
SE
1359 return;
1360
1361 era = &cpuc->shared_regs->regs[reg->idx];
1362
1363 /* one fewer user */
1364 atomic_dec(&era->ref);
1365
1366 /* allocate again next time */
1367 reg->alloc = 0;
1368}
1369
1370static struct event_constraint *
1371intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1372 struct perf_event *event)
1373{
b36817e8
SE
1374 struct event_constraint *c = NULL, *d;
1375 struct hw_perf_event_extra *xreg, *breg;
1376
1377 xreg = &event->hw.extra_reg;
1378 if (xreg->idx != EXTRA_REG_NONE) {
1379 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
1380 if (c == &emptyconstraint)
1381 return c;
1382 }
1383 breg = &event->hw.branch_reg;
1384 if (breg->idx != EXTRA_REG_NONE) {
1385 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
1386 if (d == &emptyconstraint) {
1387 __intel_shared_reg_put_constraints(cpuc, xreg);
1388 c = d;
1389 }
1390 }
efc9f05d 1391 return c;
a7e3ed1e
AK
1392}
1393
de0428a7
KW
1394struct event_constraint *
1395x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1396{
1397 struct event_constraint *c;
1398
1399 if (x86_pmu.event_constraints) {
1400 for_each_event_constraint(c, x86_pmu.event_constraints) {
1401 if ((event->hw.config & c->cmask) == c->code)
1402 return c;
1403 }
1404 }
1405
1406 return &unconstrained;
1407}
1408
f22f54f4
PZ
1409static struct event_constraint *
1410intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1411{
1412 struct event_constraint *c;
1413
ca037701
PZ
1414 c = intel_bts_constraints(event);
1415 if (c)
1416 return c;
1417
1418 c = intel_pebs_constraints(event);
f22f54f4
PZ
1419 if (c)
1420 return c;
1421
efc9f05d 1422 c = intel_shared_regs_constraints(cpuc, event);
a7e3ed1e
AK
1423 if (c)
1424 return c;
1425
f22f54f4
PZ
1426 return x86_get_event_constraints(cpuc, event);
1427}
1428
efc9f05d
SE
1429static void
1430intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
a7e3ed1e
AK
1431 struct perf_event *event)
1432{
efc9f05d 1433 struct hw_perf_event_extra *reg;
a7e3ed1e 1434
efc9f05d
SE
1435 reg = &event->hw.extra_reg;
1436 if (reg->idx != EXTRA_REG_NONE)
1437 __intel_shared_reg_put_constraints(cpuc, reg);
b36817e8
SE
1438
1439 reg = &event->hw.branch_reg;
1440 if (reg->idx != EXTRA_REG_NONE)
1441 __intel_shared_reg_put_constraints(cpuc, reg);
efc9f05d 1442}
a7e3ed1e 1443
efc9f05d
SE
1444static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1445 struct perf_event *event)
1446{
1447 intel_put_shared_regs_event_constraints(cpuc, event);
a7e3ed1e
AK
1448}
1449
0780c927 1450static void intel_pebs_aliases_core2(struct perf_event *event)
b4cdc5c2 1451{
0780c927 1452 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
7639dae0
PZ
1453 /*
1454 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1455 * (0x003c) so that we can use it with PEBS.
1456 *
1457 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1458 * PEBS capable. However we can use INST_RETIRED.ANY_P
1459 * (0x00c0), which is a PEBS capable event, to get the same
1460 * count.
1461 *
1462 * INST_RETIRED.ANY_P counts the number of cycles that retires
1463 * CNTMASK instructions. By setting CNTMASK to a value (16)
1464 * larger than the maximum number of instructions that can be
1465 * retired per cycle (4) and then inverting the condition, we
1466 * count all cycles that retire 16 or less instructions, which
1467 * is every cycle.
1468 *
1469 * Thereby we gain a PEBS capable cycle counter.
1470 */
f9b4eeb8
PZ
1471 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1472
0780c927
PZ
1473 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1474 event->hw.config = alt_config;
1475 }
1476}
1477
1478static void intel_pebs_aliases_snb(struct perf_event *event)
1479{
1480 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1481 /*
1482 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1483 * (0x003c) so that we can use it with PEBS.
1484 *
1485 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1486 * PEBS capable. However we can use UOPS_RETIRED.ALL
1487 * (0x01c2), which is a PEBS capable event, to get the same
1488 * count.
1489 *
1490 * UOPS_RETIRED.ALL counts the number of cycles that retires
1491 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1492 * larger than the maximum number of micro-ops that can be
1493 * retired per cycle (4) and then inverting the condition, we
1494 * count all cycles that retire 16 or less micro-ops, which
1495 * is every cycle.
1496 *
1497 * Thereby we gain a PEBS capable cycle counter.
1498 */
1499 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
7639dae0
PZ
1500
1501 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1502 event->hw.config = alt_config;
1503 }
0780c927
PZ
1504}
1505
1506static int intel_pmu_hw_config(struct perf_event *event)
1507{
1508 int ret = x86_pmu_hw_config(event);
1509
1510 if (ret)
1511 return ret;
1512
1513 if (event->attr.precise_ip && x86_pmu.pebs_aliases)
1514 x86_pmu.pebs_aliases(event);
7639dae0 1515
60ce0fbd
SE
1516 if (intel_pmu_needs_lbr_smpl(event)) {
1517 ret = intel_pmu_setup_lbr_filter(event);
1518 if (ret)
1519 return ret;
1520 }
1521
b4cdc5c2
PZ
1522 if (event->attr.type != PERF_TYPE_RAW)
1523 return 0;
1524
1525 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
1526 return 0;
1527
1528 if (x86_pmu.version < 3)
1529 return -EINVAL;
1530
1531 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1532 return -EACCES;
1533
1534 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
1535
1536 return 0;
1537}
1538
144d31e6
GN
1539struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
1540{
1541 if (x86_pmu.guest_get_msrs)
1542 return x86_pmu.guest_get_msrs(nr);
1543 *nr = 0;
1544 return NULL;
1545}
1546EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1547
1548static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1549{
1550 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1551 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1552
1553 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
1554 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
1555 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
26a4f3c0
GN
1556 /*
1557 * If PMU counter has PEBS enabled it is not enough to disable counter
1558 * on a guest entry since PEBS memory write can overshoot guest entry
1559 * and corrupt guest memory. Disabling PEBS solves the problem.
1560 */
1561 arr[1].msr = MSR_IA32_PEBS_ENABLE;
1562 arr[1].host = cpuc->pebs_enabled;
1563 arr[1].guest = 0;
144d31e6 1564
26a4f3c0 1565 *nr = 2;
144d31e6
GN
1566 return arr;
1567}
1568
1569static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1570{
1571 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1572 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1573 int idx;
1574
1575 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1576 struct perf_event *event = cpuc->events[idx];
1577
1578 arr[idx].msr = x86_pmu_config_addr(idx);
1579 arr[idx].host = arr[idx].guest = 0;
1580
1581 if (!test_bit(idx, cpuc->active_mask))
1582 continue;
1583
1584 arr[idx].host = arr[idx].guest =
1585 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
1586
1587 if (event->attr.exclude_host)
1588 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1589 else if (event->attr.exclude_guest)
1590 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1591 }
1592
1593 *nr = x86_pmu.num_counters;
1594 return arr;
1595}
1596
1597static void core_pmu_enable_event(struct perf_event *event)
1598{
1599 if (!event->attr.exclude_host)
1600 x86_pmu_enable_event(event);
1601}
1602
1603static void core_pmu_enable_all(int added)
1604{
1605 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1606 int idx;
1607
1608 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1609 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
1610
1611 if (!test_bit(idx, cpuc->active_mask) ||
1612 cpuc->events[idx]->attr.exclude_host)
1613 continue;
1614
1615 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1616 }
1617}
1618
641cc938
JO
1619PMU_FORMAT_ATTR(event, "config:0-7" );
1620PMU_FORMAT_ATTR(umask, "config:8-15" );
1621PMU_FORMAT_ATTR(edge, "config:18" );
1622PMU_FORMAT_ATTR(pc, "config:19" );
1623PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
1624PMU_FORMAT_ATTR(inv, "config:23" );
1625PMU_FORMAT_ATTR(cmask, "config:24-31" );
1626
1627static struct attribute *intel_arch_formats_attr[] = {
1628 &format_attr_event.attr,
1629 &format_attr_umask.attr,
1630 &format_attr_edge.attr,
1631 &format_attr_pc.attr,
1632 &format_attr_inv.attr,
1633 &format_attr_cmask.attr,
1634 NULL,
1635};
1636
0bf79d44
JO
1637ssize_t intel_event_sysfs_show(char *page, u64 config)
1638{
1639 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
1640
1641 return x86_event_sysfs_show(page, config, event);
1642}
1643
caaa8be3 1644static __initconst const struct x86_pmu core_pmu = {
f22f54f4
PZ
1645 .name = "core",
1646 .handle_irq = x86_pmu_handle_irq,
1647 .disable_all = x86_pmu_disable_all,
144d31e6
GN
1648 .enable_all = core_pmu_enable_all,
1649 .enable = core_pmu_enable_event,
f22f54f4 1650 .disable = x86_pmu_disable_event,
b4cdc5c2 1651 .hw_config = x86_pmu_hw_config,
a072738e 1652 .schedule_events = x86_schedule_events,
f22f54f4
PZ
1653 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1654 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1655 .event_map = intel_pmu_event_map,
f22f54f4
PZ
1656 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1657 .apic = 1,
1658 /*
1659 * Intel PMCs cannot be accessed sanely above 32 bit width,
1660 * so we install an artificial 1<<31 period regardless of
1661 * the generic event period:
1662 */
1663 .max_period = (1ULL << 31) - 1,
1664 .get_event_constraints = intel_get_event_constraints,
a7e3ed1e 1665 .put_event_constraints = intel_put_event_constraints,
f22f54f4 1666 .event_constraints = intel_core_event_constraints,
144d31e6 1667 .guest_get_msrs = core_guest_get_msrs,
641cc938 1668 .format_attrs = intel_arch_formats_attr,
0bf79d44 1669 .events_sysfs_show = intel_event_sysfs_show,
f22f54f4
PZ
1670};
1671
de0428a7 1672struct intel_shared_regs *allocate_shared_regs(int cpu)
efc9f05d
SE
1673{
1674 struct intel_shared_regs *regs;
1675 int i;
1676
1677 regs = kzalloc_node(sizeof(struct intel_shared_regs),
1678 GFP_KERNEL, cpu_to_node(cpu));
1679 if (regs) {
1680 /*
1681 * initialize the locks to keep lockdep happy
1682 */
1683 for (i = 0; i < EXTRA_REG_MAX; i++)
1684 raw_spin_lock_init(&regs->regs[i].lock);
1685
1686 regs->core_id = -1;
1687 }
1688 return regs;
1689}
1690
a7e3ed1e
AK
1691static int intel_pmu_cpu_prepare(int cpu)
1692{
1693 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1694
b36817e8 1695 if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
69092624
LM
1696 return NOTIFY_OK;
1697
efc9f05d
SE
1698 cpuc->shared_regs = allocate_shared_regs(cpu);
1699 if (!cpuc->shared_regs)
a7e3ed1e
AK
1700 return NOTIFY_BAD;
1701
a7e3ed1e
AK
1702 return NOTIFY_OK;
1703}
1704
74846d35
PZ
1705static void intel_pmu_cpu_starting(int cpu)
1706{
a7e3ed1e
AK
1707 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1708 int core_id = topology_core_id(cpu);
1709 int i;
1710
69092624
LM
1711 init_debug_store_on_cpu(cpu);
1712 /*
1713 * Deal with CPUs that don't clear their LBRs on power-up.
1714 */
1715 intel_pmu_lbr_reset();
1716
b36817e8
SE
1717 cpuc->lbr_sel = NULL;
1718
1719 if (!cpuc->shared_regs)
69092624
LM
1720 return;
1721
b36817e8
SE
1722 if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
1723 for_each_cpu(i, topology_thread_cpumask(cpu)) {
1724 struct intel_shared_regs *pc;
a7e3ed1e 1725
b36817e8
SE
1726 pc = per_cpu(cpu_hw_events, i).shared_regs;
1727 if (pc && pc->core_id == core_id) {
1728 cpuc->kfree_on_online = cpuc->shared_regs;
1729 cpuc->shared_regs = pc;
1730 break;
1731 }
a7e3ed1e 1732 }
b36817e8
SE
1733 cpuc->shared_regs->core_id = core_id;
1734 cpuc->shared_regs->refcnt++;
a7e3ed1e
AK
1735 }
1736
b36817e8
SE
1737 if (x86_pmu.lbr_sel_map)
1738 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
74846d35
PZ
1739}
1740
1741static void intel_pmu_cpu_dying(int cpu)
1742{
a7e3ed1e 1743 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
efc9f05d 1744 struct intel_shared_regs *pc;
a7e3ed1e 1745
efc9f05d 1746 pc = cpuc->shared_regs;
a7e3ed1e
AK
1747 if (pc) {
1748 if (pc->core_id == -1 || --pc->refcnt == 0)
1749 kfree(pc);
efc9f05d 1750 cpuc->shared_regs = NULL;
a7e3ed1e
AK
1751 }
1752
74846d35
PZ
1753 fini_debug_store_on_cpu(cpu);
1754}
1755
d010b332
SE
1756static void intel_pmu_flush_branch_stack(void)
1757{
1758 /*
1759 * Intel LBR does not tag entries with the
1760 * PID of the current task, then we need to
1761 * flush it on ctxsw
1762 * For now, we simply reset it
1763 */
1764 if (x86_pmu.lbr_nr)
1765 intel_pmu_lbr_reset();
1766}
1767
641cc938
JO
1768PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
1769
1770static struct attribute *intel_arch3_formats_attr[] = {
1771 &format_attr_event.attr,
1772 &format_attr_umask.attr,
1773 &format_attr_edge.attr,
1774 &format_attr_pc.attr,
1775 &format_attr_any.attr,
1776 &format_attr_inv.attr,
1777 &format_attr_cmask.attr,
1778
1779 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
1780 NULL,
1781};
1782
caaa8be3 1783static __initconst const struct x86_pmu intel_pmu = {
f22f54f4
PZ
1784 .name = "Intel",
1785 .handle_irq = intel_pmu_handle_irq,
1786 .disable_all = intel_pmu_disable_all,
1787 .enable_all = intel_pmu_enable_all,
1788 .enable = intel_pmu_enable_event,
1789 .disable = intel_pmu_disable_event,
b4cdc5c2 1790 .hw_config = intel_pmu_hw_config,
a072738e 1791 .schedule_events = x86_schedule_events,
f22f54f4
PZ
1792 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1793 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1794 .event_map = intel_pmu_event_map,
f22f54f4
PZ
1795 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1796 .apic = 1,
1797 /*
1798 * Intel PMCs cannot be accessed sanely above 32 bit width,
1799 * so we install an artificial 1<<31 period regardless of
1800 * the generic event period:
1801 */
1802 .max_period = (1ULL << 31) - 1,
3f6da390 1803 .get_event_constraints = intel_get_event_constraints,
a7e3ed1e 1804 .put_event_constraints = intel_put_event_constraints,
0780c927 1805 .pebs_aliases = intel_pebs_aliases_core2,
3f6da390 1806
641cc938 1807 .format_attrs = intel_arch3_formats_attr,
0bf79d44 1808 .events_sysfs_show = intel_event_sysfs_show,
641cc938 1809
a7e3ed1e 1810 .cpu_prepare = intel_pmu_cpu_prepare,
74846d35
PZ
1811 .cpu_starting = intel_pmu_cpu_starting,
1812 .cpu_dying = intel_pmu_cpu_dying,
144d31e6 1813 .guest_get_msrs = intel_guest_get_msrs,
d010b332 1814 .flush_branch_stack = intel_pmu_flush_branch_stack,
f22f54f4
PZ
1815};
1816
c1d6f42f 1817static __init void intel_clovertown_quirk(void)
3c44780b
PZ
1818{
1819 /*
1820 * PEBS is unreliable due to:
1821 *
1822 * AJ67 - PEBS may experience CPL leaks
1823 * AJ68 - PEBS PMI may be delayed by one event
1824 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1825 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1826 *
1827 * AJ67 could be worked around by restricting the OS/USR flags.
1828 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1829 *
1830 * AJ106 could possibly be worked around by not allowing LBR
1831 * usage from PEBS, including the fixup.
1832 * AJ68 could possibly be worked around by always programming
ec75a716 1833 * a pebs_event_reset[0] value and coping with the lost events.
3c44780b
PZ
1834 *
1835 * But taken together it might just make sense to not enable PEBS on
1836 * these chips.
1837 */
c767a54b 1838 pr_warn("PEBS disabled due to CPU errata\n");
3c44780b
PZ
1839 x86_pmu.pebs = 0;
1840 x86_pmu.pebs_constraints = NULL;
1841}
1842
c93dc84c
PZ
1843static int intel_snb_pebs_broken(int cpu)
1844{
1845 u32 rev = UINT_MAX; /* default to broken for unknown models */
1846
1847 switch (cpu_data(cpu).x86_model) {
1848 case 42: /* SNB */
1849 rev = 0x28;
1850 break;
1851
1852 case 45: /* SNB-EP */
1853 switch (cpu_data(cpu).x86_mask) {
1854 case 6: rev = 0x618; break;
1855 case 7: rev = 0x70c; break;
1856 }
1857 }
1858
1859 return (cpu_data(cpu).microcode < rev);
1860}
1861
1862static void intel_snb_check_microcode(void)
1863{
1864 int pebs_broken = 0;
1865 int cpu;
1866
1867 get_online_cpus();
1868 for_each_online_cpu(cpu) {
1869 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
1870 break;
1871 }
1872 put_online_cpus();
1873
1874 if (pebs_broken == x86_pmu.pebs_broken)
1875 return;
1876
1877 /*
1878 * Serialized by the microcode lock..
1879 */
1880 if (x86_pmu.pebs_broken) {
1881 pr_info("PEBS enabled due to microcode update\n");
1882 x86_pmu.pebs_broken = 0;
1883 } else {
1884 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
1885 x86_pmu.pebs_broken = 1;
1886 }
1887}
1888
c1d6f42f 1889static __init void intel_sandybridge_quirk(void)
6a600a8b 1890{
c93dc84c
PZ
1891 x86_pmu.check_microcode = intel_snb_check_microcode;
1892 intel_snb_check_microcode();
6a600a8b
PZ
1893}
1894
c1d6f42f
PZ
1895static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
1896 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
1897 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
1898 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
1899 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
1900 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
1901 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
1902 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
ffb871bc
GN
1903};
1904
c1d6f42f
PZ
1905static __init void intel_arch_events_quirk(void)
1906{
1907 int bit;
1908
1909 /* disable event that reported as not presend by cpuid */
1910 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
1911 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
c767a54b
JP
1912 pr_warn("CPUID marked event: \'%s\' unavailable\n",
1913 intel_arch_events_map[bit].name);
c1d6f42f
PZ
1914 }
1915}
1916
1917static __init void intel_nehalem_quirk(void)
1918{
1919 union cpuid10_ebx ebx;
1920
1921 ebx.full = x86_pmu.events_maskl;
1922 if (ebx.split.no_branch_misses_retired) {
1923 /*
1924 * Erratum AAJ80 detected, we work it around by using
1925 * the BR_MISP_EXEC.ANY event. This will over-count
1926 * branch-misses, but it's still much better than the
1927 * architectural event which is often completely bogus:
1928 */
1929 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
1930 ebx.split.no_branch_misses_retired = 0;
1931 x86_pmu.events_maskl = ebx.full;
c767a54b 1932 pr_info("CPU erratum AAJ80 worked around\n");
c1d6f42f
PZ
1933 }
1934}
1935
de0428a7 1936__init int intel_pmu_init(void)
f22f54f4
PZ
1937{
1938 union cpuid10_edx edx;
1939 union cpuid10_eax eax;
ffb871bc 1940 union cpuid10_ebx ebx;
a1eac7ac 1941 struct event_constraint *c;
f22f54f4 1942 unsigned int unused;
f22f54f4
PZ
1943 int version;
1944
1945 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
a072738e
CG
1946 switch (boot_cpu_data.x86) {
1947 case 0x6:
1948 return p6_pmu_init();
e717bf4e
VW
1949 case 0xb:
1950 return knc_pmu_init();
a072738e
CG
1951 case 0xf:
1952 return p4_pmu_init();
1953 }
f22f54f4 1954 return -ENODEV;
f22f54f4
PZ
1955 }
1956
1957 /*
1958 * Check whether the Architectural PerfMon supports
1959 * Branch Misses Retired hw_event or not.
1960 */
ffb871bc
GN
1961 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
1962 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
f22f54f4
PZ
1963 return -ENODEV;
1964
1965 version = eax.split.version_id;
1966 if (version < 2)
1967 x86_pmu = core_pmu;
1968 else
1969 x86_pmu = intel_pmu;
1970
1971 x86_pmu.version = version;
948b1bb8
RR
1972 x86_pmu.num_counters = eax.split.num_counters;
1973 x86_pmu.cntval_bits = eax.split.bit_width;
1974 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
f22f54f4 1975
c1d6f42f
PZ
1976 x86_pmu.events_maskl = ebx.full;
1977 x86_pmu.events_mask_len = eax.split.mask_length;
1978
70ab7003
AK
1979 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
1980
f22f54f4
PZ
1981 /*
1982 * Quirk: v2 perfmon does not report fixed-purpose events, so
1983 * assume at least 3 events:
1984 */
1985 if (version > 1)
948b1bb8 1986 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
f22f54f4 1987
8db909a7
PZ
1988 /*
1989 * v2 and above have a perf capabilities MSR
1990 */
1991 if (version > 1) {
1992 u64 capabilities;
1993
1994 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
1995 x86_pmu.intel_cap.capabilities = capabilities;
1996 }
1997
ca037701
PZ
1998 intel_ds_init();
1999
c1d6f42f
PZ
2000 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
2001
f22f54f4
PZ
2002 /*
2003 * Install the hw-cache-events table:
2004 */
2005 switch (boot_cpu_data.x86_model) {
2006 case 14: /* 65 nm core solo/duo, "Yonah" */
2007 pr_cont("Core events, ");
2008 break;
2009
2010 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
c1d6f42f 2011 x86_add_quirk(intel_clovertown_quirk);
f22f54f4
PZ
2012 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2013 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2014 case 29: /* six-core 45 nm xeon "Dunnington" */
2015 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2016 sizeof(hw_cache_event_ids));
2017
caff2bef
PZ
2018 intel_pmu_lbr_init_core();
2019
f22f54f4 2020 x86_pmu.event_constraints = intel_core2_event_constraints;
17e31629 2021 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
f22f54f4
PZ
2022 pr_cont("Core2 events, ");
2023 break;
2024
2025 case 26: /* 45 nm nehalem, "Bloomfield" */
2026 case 30: /* 45 nm nehalem, "Lynnfield" */
134fbadf 2027 case 46: /* 45 nm nehalem-ex, "Beckton" */
f22f54f4
PZ
2028 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2029 sizeof(hw_cache_event_ids));
e994d7d2
AK
2030 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2031 sizeof(hw_cache_extra_regs));
f22f54f4 2032
caff2bef
PZ
2033 intel_pmu_lbr_init_nhm();
2034
f22f54f4 2035 x86_pmu.event_constraints = intel_nehalem_event_constraints;
17e31629 2036 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
11164cd4 2037 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
a7e3ed1e 2038 x86_pmu.extra_regs = intel_nehalem_extra_regs;
ec75a716 2039
91fc4cc0 2040 /* UOPS_ISSUED.STALLED_CYCLES */
f9b4eeb8
PZ
2041 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2042 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
91fc4cc0 2043 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
f9b4eeb8
PZ
2044 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2045 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
94403f88 2046
c1d6f42f 2047 x86_add_quirk(intel_nehalem_quirk);
ec75a716 2048
11164cd4 2049 pr_cont("Nehalem events, ");
f22f54f4 2050 break;
caff2bef 2051
b622d644 2052 case 28: /* Atom */
0927b482
SL
2053 case 38: /* Lincroft */
2054 case 39: /* Penwell */
2055 case 53: /* Cloverview */
2056 case 54: /* Cedarview */
f22f54f4
PZ
2057 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2058 sizeof(hw_cache_event_ids));
2059
caff2bef
PZ
2060 intel_pmu_lbr_init_atom();
2061
f22f54f4 2062 x86_pmu.event_constraints = intel_gen_event_constraints;
17e31629 2063 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
f22f54f4
PZ
2064 pr_cont("Atom events, ");
2065 break;
2066
2067 case 37: /* 32 nm nehalem, "Clarkdale" */
2068 case 44: /* 32 nm nehalem, "Gulftown" */
b2508e82 2069 case 47: /* 32 nm Xeon E7 */
f22f54f4
PZ
2070 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2071 sizeof(hw_cache_event_ids));
e994d7d2
AK
2072 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2073 sizeof(hw_cache_extra_regs));
f22f54f4 2074
caff2bef
PZ
2075 intel_pmu_lbr_init_nhm();
2076
f22f54f4 2077 x86_pmu.event_constraints = intel_westmere_event_constraints;
40b91cd1 2078 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
17e31629 2079 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
a7e3ed1e 2080 x86_pmu.extra_regs = intel_westmere_extra_regs;
b79e8941 2081 x86_pmu.er_flags |= ERF_HAS_RSP_1;
30112039
IM
2082
2083 /* UOPS_ISSUED.STALLED_CYCLES */
f9b4eeb8
PZ
2084 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2085 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
30112039 2086 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
f9b4eeb8
PZ
2087 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2088 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
30112039 2089
f22f54f4
PZ
2090 pr_cont("Westmere events, ");
2091 break;
b622d644 2092
b06b3d49 2093 case 42: /* SandyBridge */
a34668f6 2094 case 45: /* SandyBridge, "Romely-EP" */
47a8863d 2095 x86_add_quirk(intel_sandybridge_quirk);
b06b3d49
LM
2096 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2097 sizeof(hw_cache_event_ids));
74e6543f
YZ
2098 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2099 sizeof(hw_cache_extra_regs));
b06b3d49 2100
c5cc2cd9 2101 intel_pmu_lbr_init_snb();
b06b3d49
LM
2102
2103 x86_pmu.event_constraints = intel_snb_event_constraints;
de0428a7 2104 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
0780c927 2105 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
f1923820
SE
2106 if (boot_cpu_data.x86_model == 45)
2107 x86_pmu.extra_regs = intel_snbep_extra_regs;
2108 else
2109 x86_pmu.extra_regs = intel_snb_extra_regs;
ee89cbc2 2110 /* all extra regs are per-cpu when HT is on */
b79e8941
PZ
2111 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2112 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
e04d1b23
LM
2113
2114 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
f9b4eeb8
PZ
2115 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2116 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
e04d1b23 2117 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
f9b4eeb8
PZ
2118 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2119 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
e04d1b23 2120
b06b3d49
LM
2121 pr_cont("SandyBridge events, ");
2122 break;
20a36e39 2123 case 58: /* IvyBridge */
923d8697 2124 case 62: /* IvyBridge EP */
20a36e39
SE
2125 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2126 sizeof(hw_cache_event_ids));
2127 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2128 sizeof(hw_cache_extra_regs));
2129
2130 intel_pmu_lbr_init_snb();
2131
69943182 2132 x86_pmu.event_constraints = intel_ivb_event_constraints;
20a36e39
SE
2133 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
2134 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
f1923820
SE
2135 if (boot_cpu_data.x86_model == 62)
2136 x86_pmu.extra_regs = intel_snbep_extra_regs;
2137 else
2138 x86_pmu.extra_regs = intel_snb_extra_regs;
20a36e39
SE
2139 /* all extra regs are per-cpu when HT is on */
2140 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2141 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2142
2143 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2144 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2145 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2146
2147 pr_cont("IvyBridge events, ");
2148 break;
2149
b06b3d49 2150
f22f54f4 2151 default:
0af3ac1f
AK
2152 switch (x86_pmu.version) {
2153 case 1:
2154 x86_pmu.event_constraints = intel_v1_event_constraints;
2155 pr_cont("generic architected perfmon v1, ");
2156 break;
2157 default:
2158 /*
2159 * default constraints for v2 and up
2160 */
2161 x86_pmu.event_constraints = intel_gen_event_constraints;
2162 pr_cont("generic architected perfmon, ");
2163 break;
2164 }
f22f54f4 2165 }
ffb871bc 2166
a1eac7ac
RR
2167 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
2168 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2169 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
2170 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
2171 }
2172 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
2173
2174 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
2175 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2176 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
2177 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
2178 }
2179
2180 x86_pmu.intel_ctrl |=
2181 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
2182
2183 if (x86_pmu.event_constraints) {
2184 /*
2185 * event on fixed counter2 (REF_CYCLES) only works on this
2186 * counter, so do not extend mask to generic counters
2187 */
2188 for_each_event_constraint(c, x86_pmu.event_constraints) {
2189 if (c->cmask != X86_RAW_EVENT_MASK
2190 || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
2191 continue;
2192 }
2193
2194 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
2195 c->weight += x86_pmu.num_counters;
2196 }
2197 }
2198
f22f54f4
PZ
2199 return 0;
2200}
This page took 0.256636 seconds and 5 git commands to generate.