perf/x86: Improve debug output in check_hw_exists()
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_event_intel.c
CommitLineData
a7e3ed1e 1/*
efc9f05d
SE
2 * Per core/cpu state
3 *
4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
a7e3ed1e 6 */
de0428a7
KW
7
8#include <linux/stddef.h>
9#include <linux/types.h>
10#include <linux/init.h>
11#include <linux/slab.h>
69c60c88 12#include <linux/export.h>
de0428a7
KW
13
14#include <asm/hardirq.h>
15#include <asm/apic.h>
16
17#include "perf_event.h"
a7e3ed1e 18
f22f54f4 19/*
b622d644 20 * Intel PerfMon, used on Core and later.
f22f54f4 21 */
ec75a716 22static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
f22f54f4
PZ
23{
24 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
25 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
26 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
27 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
28 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
29 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
30 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
9c1497ea 31 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
f22f54f4
PZ
32};
33
5c543e3c 34static struct event_constraint intel_core_event_constraints[] __read_mostly =
f22f54f4
PZ
35{
36 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
37 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
38 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
39 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
40 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
41 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
42 EVENT_CONSTRAINT_END
43};
44
5c543e3c 45static struct event_constraint intel_core2_event_constraints[] __read_mostly =
f22f54f4 46{
b622d644
PZ
47 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
48 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 49 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
50 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
51 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
52 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
53 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
54 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
55 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
56 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
57 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
b622d644 58 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
f22f54f4
PZ
59 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
60 EVENT_CONSTRAINT_END
61};
62
5c543e3c 63static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
f22f54f4 64{
b622d644
PZ
65 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
66 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 67 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
68 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
69 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
70 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
71 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
72 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
73 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
74 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
75 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
76 EVENT_CONSTRAINT_END
77};
78
5c543e3c 79static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
a7e3ed1e 80{
efc9f05d 81 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
a7e3ed1e
AK
82 EVENT_EXTRA_END
83};
84
5c543e3c 85static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
f22f54f4 86{
b622d644
PZ
87 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
88 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 89 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
90 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
91 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
92 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
d1100770 93 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
f22f54f4
PZ
94 EVENT_CONSTRAINT_END
95};
96
5c543e3c 97static struct event_constraint intel_snb_event_constraints[] __read_mostly =
b06b3d49
LM
98{
99 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
100 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 101 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
b06b3d49 102 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
b06b3d49
LM
103 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
104 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
105 EVENT_CONSTRAINT_END
106};
107
5c543e3c 108static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
a7e3ed1e 109{
efc9f05d
SE
110 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
111 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
a7e3ed1e
AK
112 EVENT_EXTRA_END
113};
114
0af3ac1f
AK
115static struct event_constraint intel_v1_event_constraints[] __read_mostly =
116{
117 EVENT_CONSTRAINT_END
118};
119
5c543e3c 120static struct event_constraint intel_gen_event_constraints[] __read_mostly =
f22f54f4 121{
b622d644
PZ
122 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
123 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 124 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
125 EVENT_CONSTRAINT_END
126};
127
ee89cbc2
SE
128static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
129 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
130 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
131 EVENT_EXTRA_END
132};
133
f22f54f4
PZ
134static u64 intel_pmu_event_map(int hw_event)
135{
136 return intel_perfmon_event_map[hw_event];
137}
138
b06b3d49
LM
139static __initconst const u64 snb_hw_cache_event_ids
140 [PERF_COUNT_HW_CACHE_MAX]
141 [PERF_COUNT_HW_CACHE_OP_MAX]
142 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
143{
144 [ C(L1D) ] = {
145 [ C(OP_READ) ] = {
146 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
147 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
148 },
149 [ C(OP_WRITE) ] = {
150 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
151 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
152 },
153 [ C(OP_PREFETCH) ] = {
154 [ C(RESULT_ACCESS) ] = 0x0,
155 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
156 },
157 },
158 [ C(L1I ) ] = {
159 [ C(OP_READ) ] = {
160 [ C(RESULT_ACCESS) ] = 0x0,
161 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
162 },
163 [ C(OP_WRITE) ] = {
164 [ C(RESULT_ACCESS) ] = -1,
165 [ C(RESULT_MISS) ] = -1,
166 },
167 [ C(OP_PREFETCH) ] = {
168 [ C(RESULT_ACCESS) ] = 0x0,
169 [ C(RESULT_MISS) ] = 0x0,
170 },
171 },
172 [ C(LL ) ] = {
b06b3d49 173 [ C(OP_READ) ] = {
63b6a675 174 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
b06b3d49 175 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
176 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
177 [ C(RESULT_MISS) ] = 0x01b7,
b06b3d49
LM
178 },
179 [ C(OP_WRITE) ] = {
63b6a675 180 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
b06b3d49 181 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
182 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
183 [ C(RESULT_MISS) ] = 0x01b7,
b06b3d49
LM
184 },
185 [ C(OP_PREFETCH) ] = {
63b6a675 186 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
b06b3d49 187 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
188 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
189 [ C(RESULT_MISS) ] = 0x01b7,
b06b3d49
LM
190 },
191 },
192 [ C(DTLB) ] = {
193 [ C(OP_READ) ] = {
194 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
195 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
196 },
197 [ C(OP_WRITE) ] = {
198 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
199 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
200 },
201 [ C(OP_PREFETCH) ] = {
202 [ C(RESULT_ACCESS) ] = 0x0,
203 [ C(RESULT_MISS) ] = 0x0,
204 },
205 },
206 [ C(ITLB) ] = {
207 [ C(OP_READ) ] = {
208 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
209 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
210 },
211 [ C(OP_WRITE) ] = {
212 [ C(RESULT_ACCESS) ] = -1,
213 [ C(RESULT_MISS) ] = -1,
214 },
215 [ C(OP_PREFETCH) ] = {
216 [ C(RESULT_ACCESS) ] = -1,
217 [ C(RESULT_MISS) ] = -1,
218 },
219 },
220 [ C(BPU ) ] = {
221 [ C(OP_READ) ] = {
222 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
223 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
224 },
225 [ C(OP_WRITE) ] = {
226 [ C(RESULT_ACCESS) ] = -1,
227 [ C(RESULT_MISS) ] = -1,
228 },
229 [ C(OP_PREFETCH) ] = {
230 [ C(RESULT_ACCESS) ] = -1,
231 [ C(RESULT_MISS) ] = -1,
232 },
233 },
89d6c0b5
PZ
234 [ C(NODE) ] = {
235 [ C(OP_READ) ] = {
236 [ C(RESULT_ACCESS) ] = -1,
237 [ C(RESULT_MISS) ] = -1,
238 },
239 [ C(OP_WRITE) ] = {
240 [ C(RESULT_ACCESS) ] = -1,
241 [ C(RESULT_MISS) ] = -1,
242 },
243 [ C(OP_PREFETCH) ] = {
244 [ C(RESULT_ACCESS) ] = -1,
245 [ C(RESULT_MISS) ] = -1,
246 },
247 },
248
b06b3d49
LM
249};
250
caaa8be3 251static __initconst const u64 westmere_hw_cache_event_ids
f22f54f4
PZ
252 [PERF_COUNT_HW_CACHE_MAX]
253 [PERF_COUNT_HW_CACHE_OP_MAX]
254 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
255{
256 [ C(L1D) ] = {
257 [ C(OP_READ) ] = {
258 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
259 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
260 },
261 [ C(OP_WRITE) ] = {
262 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
263 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
264 },
265 [ C(OP_PREFETCH) ] = {
266 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
267 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
268 },
269 },
270 [ C(L1I ) ] = {
271 [ C(OP_READ) ] = {
272 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
273 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
274 },
275 [ C(OP_WRITE) ] = {
276 [ C(RESULT_ACCESS) ] = -1,
277 [ C(RESULT_MISS) ] = -1,
278 },
279 [ C(OP_PREFETCH) ] = {
280 [ C(RESULT_ACCESS) ] = 0x0,
281 [ C(RESULT_MISS) ] = 0x0,
282 },
283 },
284 [ C(LL ) ] = {
285 [ C(OP_READ) ] = {
63b6a675 286 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
e994d7d2 287 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
288 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
289 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4 290 },
e994d7d2
AK
291 /*
292 * Use RFO, not WRITEBACK, because a write miss would typically occur
293 * on RFO.
294 */
f22f54f4 295 [ C(OP_WRITE) ] = {
63b6a675
PZ
296 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
297 [ C(RESULT_ACCESS) ] = 0x01b7,
298 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
e994d7d2 299 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
300 },
301 [ C(OP_PREFETCH) ] = {
63b6a675 302 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
e994d7d2 303 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
304 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
305 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
306 },
307 },
308 [ C(DTLB) ] = {
309 [ C(OP_READ) ] = {
310 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
311 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
312 },
313 [ C(OP_WRITE) ] = {
314 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
315 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
316 },
317 [ C(OP_PREFETCH) ] = {
318 [ C(RESULT_ACCESS) ] = 0x0,
319 [ C(RESULT_MISS) ] = 0x0,
320 },
321 },
322 [ C(ITLB) ] = {
323 [ C(OP_READ) ] = {
324 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
325 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
326 },
327 [ C(OP_WRITE) ] = {
328 [ C(RESULT_ACCESS) ] = -1,
329 [ C(RESULT_MISS) ] = -1,
330 },
331 [ C(OP_PREFETCH) ] = {
332 [ C(RESULT_ACCESS) ] = -1,
333 [ C(RESULT_MISS) ] = -1,
334 },
335 },
336 [ C(BPU ) ] = {
337 [ C(OP_READ) ] = {
338 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
339 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
340 },
341 [ C(OP_WRITE) ] = {
342 [ C(RESULT_ACCESS) ] = -1,
343 [ C(RESULT_MISS) ] = -1,
344 },
345 [ C(OP_PREFETCH) ] = {
346 [ C(RESULT_ACCESS) ] = -1,
347 [ C(RESULT_MISS) ] = -1,
348 },
349 },
89d6c0b5
PZ
350 [ C(NODE) ] = {
351 [ C(OP_READ) ] = {
352 [ C(RESULT_ACCESS) ] = 0x01b7,
353 [ C(RESULT_MISS) ] = 0x01b7,
354 },
355 [ C(OP_WRITE) ] = {
356 [ C(RESULT_ACCESS) ] = 0x01b7,
357 [ C(RESULT_MISS) ] = 0x01b7,
358 },
359 [ C(OP_PREFETCH) ] = {
360 [ C(RESULT_ACCESS) ] = 0x01b7,
361 [ C(RESULT_MISS) ] = 0x01b7,
362 },
363 },
f22f54f4
PZ
364};
365
e994d7d2 366/*
63b6a675
PZ
367 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
368 * See IA32 SDM Vol 3B 30.6.1.3
e994d7d2
AK
369 */
370
63b6a675
PZ
371#define NHM_DMND_DATA_RD (1 << 0)
372#define NHM_DMND_RFO (1 << 1)
373#define NHM_DMND_IFETCH (1 << 2)
374#define NHM_DMND_WB (1 << 3)
375#define NHM_PF_DATA_RD (1 << 4)
376#define NHM_PF_DATA_RFO (1 << 5)
377#define NHM_PF_IFETCH (1 << 6)
378#define NHM_OFFCORE_OTHER (1 << 7)
379#define NHM_UNCORE_HIT (1 << 8)
380#define NHM_OTHER_CORE_HIT_SNP (1 << 9)
381#define NHM_OTHER_CORE_HITM (1 << 10)
382 /* reserved */
383#define NHM_REMOTE_CACHE_FWD (1 << 12)
384#define NHM_REMOTE_DRAM (1 << 13)
385#define NHM_LOCAL_DRAM (1 << 14)
386#define NHM_NON_DRAM (1 << 15)
387
87e24f4b
PZ
388#define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
389#define NHM_REMOTE (NHM_REMOTE_DRAM)
63b6a675
PZ
390
391#define NHM_DMND_READ (NHM_DMND_DATA_RD)
392#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
393#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
394
395#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
87e24f4b 396#define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
63b6a675 397#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
e994d7d2
AK
398
399static __initconst const u64 nehalem_hw_cache_extra_regs
400 [PERF_COUNT_HW_CACHE_MAX]
401 [PERF_COUNT_HW_CACHE_OP_MAX]
402 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
403{
404 [ C(LL ) ] = {
405 [ C(OP_READ) ] = {
63b6a675
PZ
406 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
407 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
e994d7d2
AK
408 },
409 [ C(OP_WRITE) ] = {
63b6a675
PZ
410 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
411 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
e994d7d2
AK
412 },
413 [ C(OP_PREFETCH) ] = {
63b6a675
PZ
414 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
415 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
e994d7d2 416 },
89d6c0b5
PZ
417 },
418 [ C(NODE) ] = {
419 [ C(OP_READ) ] = {
87e24f4b
PZ
420 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
421 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
89d6c0b5
PZ
422 },
423 [ C(OP_WRITE) ] = {
87e24f4b
PZ
424 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
425 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
89d6c0b5
PZ
426 },
427 [ C(OP_PREFETCH) ] = {
87e24f4b
PZ
428 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
429 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
89d6c0b5
PZ
430 },
431 },
e994d7d2
AK
432};
433
caaa8be3 434static __initconst const u64 nehalem_hw_cache_event_ids
f22f54f4
PZ
435 [PERF_COUNT_HW_CACHE_MAX]
436 [PERF_COUNT_HW_CACHE_OP_MAX]
437 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
438{
439 [ C(L1D) ] = {
440 [ C(OP_READ) ] = {
f4929bd3
PZ
441 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
442 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
f22f54f4
PZ
443 },
444 [ C(OP_WRITE) ] = {
f4929bd3
PZ
445 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
446 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
f22f54f4
PZ
447 },
448 [ C(OP_PREFETCH) ] = {
449 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
450 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
451 },
452 },
453 [ C(L1I ) ] = {
454 [ C(OP_READ) ] = {
455 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
456 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
457 },
458 [ C(OP_WRITE) ] = {
459 [ C(RESULT_ACCESS) ] = -1,
460 [ C(RESULT_MISS) ] = -1,
461 },
462 [ C(OP_PREFETCH) ] = {
463 [ C(RESULT_ACCESS) ] = 0x0,
464 [ C(RESULT_MISS) ] = 0x0,
465 },
466 },
467 [ C(LL ) ] = {
468 [ C(OP_READ) ] = {
e994d7d2
AK
469 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
470 [ C(RESULT_ACCESS) ] = 0x01b7,
471 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
472 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4 473 },
e994d7d2
AK
474 /*
475 * Use RFO, not WRITEBACK, because a write miss would typically occur
476 * on RFO.
477 */
f22f54f4 478 [ C(OP_WRITE) ] = {
e994d7d2
AK
479 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
480 [ C(RESULT_ACCESS) ] = 0x01b7,
481 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
482 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
483 },
484 [ C(OP_PREFETCH) ] = {
e994d7d2
AK
485 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
486 [ C(RESULT_ACCESS) ] = 0x01b7,
487 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
488 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
489 },
490 },
491 [ C(DTLB) ] = {
492 [ C(OP_READ) ] = {
493 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
494 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
495 },
496 [ C(OP_WRITE) ] = {
497 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
498 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
499 },
500 [ C(OP_PREFETCH) ] = {
501 [ C(RESULT_ACCESS) ] = 0x0,
502 [ C(RESULT_MISS) ] = 0x0,
503 },
504 },
505 [ C(ITLB) ] = {
506 [ C(OP_READ) ] = {
507 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
508 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
509 },
510 [ C(OP_WRITE) ] = {
511 [ C(RESULT_ACCESS) ] = -1,
512 [ C(RESULT_MISS) ] = -1,
513 },
514 [ C(OP_PREFETCH) ] = {
515 [ C(RESULT_ACCESS) ] = -1,
516 [ C(RESULT_MISS) ] = -1,
517 },
518 },
519 [ C(BPU ) ] = {
520 [ C(OP_READ) ] = {
521 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
522 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
523 },
524 [ C(OP_WRITE) ] = {
525 [ C(RESULT_ACCESS) ] = -1,
526 [ C(RESULT_MISS) ] = -1,
527 },
528 [ C(OP_PREFETCH) ] = {
529 [ C(RESULT_ACCESS) ] = -1,
530 [ C(RESULT_MISS) ] = -1,
531 },
532 },
89d6c0b5
PZ
533 [ C(NODE) ] = {
534 [ C(OP_READ) ] = {
535 [ C(RESULT_ACCESS) ] = 0x01b7,
536 [ C(RESULT_MISS) ] = 0x01b7,
537 },
538 [ C(OP_WRITE) ] = {
539 [ C(RESULT_ACCESS) ] = 0x01b7,
540 [ C(RESULT_MISS) ] = 0x01b7,
541 },
542 [ C(OP_PREFETCH) ] = {
543 [ C(RESULT_ACCESS) ] = 0x01b7,
544 [ C(RESULT_MISS) ] = 0x01b7,
545 },
546 },
f22f54f4
PZ
547};
548
caaa8be3 549static __initconst const u64 core2_hw_cache_event_ids
f22f54f4
PZ
550 [PERF_COUNT_HW_CACHE_MAX]
551 [PERF_COUNT_HW_CACHE_OP_MAX]
552 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
553{
554 [ C(L1D) ] = {
555 [ C(OP_READ) ] = {
556 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
557 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
558 },
559 [ C(OP_WRITE) ] = {
560 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
561 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
562 },
563 [ C(OP_PREFETCH) ] = {
564 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
565 [ C(RESULT_MISS) ] = 0,
566 },
567 },
568 [ C(L1I ) ] = {
569 [ C(OP_READ) ] = {
570 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
571 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
572 },
573 [ C(OP_WRITE) ] = {
574 [ C(RESULT_ACCESS) ] = -1,
575 [ C(RESULT_MISS) ] = -1,
576 },
577 [ C(OP_PREFETCH) ] = {
578 [ C(RESULT_ACCESS) ] = 0,
579 [ C(RESULT_MISS) ] = 0,
580 },
581 },
582 [ C(LL ) ] = {
583 [ C(OP_READ) ] = {
584 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
585 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
586 },
587 [ C(OP_WRITE) ] = {
588 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
589 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
590 },
591 [ C(OP_PREFETCH) ] = {
592 [ C(RESULT_ACCESS) ] = 0,
593 [ C(RESULT_MISS) ] = 0,
594 },
595 },
596 [ C(DTLB) ] = {
597 [ C(OP_READ) ] = {
598 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
599 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
600 },
601 [ C(OP_WRITE) ] = {
602 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
603 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
604 },
605 [ C(OP_PREFETCH) ] = {
606 [ C(RESULT_ACCESS) ] = 0,
607 [ C(RESULT_MISS) ] = 0,
608 },
609 },
610 [ C(ITLB) ] = {
611 [ C(OP_READ) ] = {
612 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
613 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
614 },
615 [ C(OP_WRITE) ] = {
616 [ C(RESULT_ACCESS) ] = -1,
617 [ C(RESULT_MISS) ] = -1,
618 },
619 [ C(OP_PREFETCH) ] = {
620 [ C(RESULT_ACCESS) ] = -1,
621 [ C(RESULT_MISS) ] = -1,
622 },
623 },
624 [ C(BPU ) ] = {
625 [ C(OP_READ) ] = {
626 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
627 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
628 },
629 [ C(OP_WRITE) ] = {
630 [ C(RESULT_ACCESS) ] = -1,
631 [ C(RESULT_MISS) ] = -1,
632 },
633 [ C(OP_PREFETCH) ] = {
634 [ C(RESULT_ACCESS) ] = -1,
635 [ C(RESULT_MISS) ] = -1,
636 },
637 },
638};
639
caaa8be3 640static __initconst const u64 atom_hw_cache_event_ids
f22f54f4
PZ
641 [PERF_COUNT_HW_CACHE_MAX]
642 [PERF_COUNT_HW_CACHE_OP_MAX]
643 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
644{
645 [ C(L1D) ] = {
646 [ C(OP_READ) ] = {
647 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
648 [ C(RESULT_MISS) ] = 0,
649 },
650 [ C(OP_WRITE) ] = {
651 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
652 [ C(RESULT_MISS) ] = 0,
653 },
654 [ C(OP_PREFETCH) ] = {
655 [ C(RESULT_ACCESS) ] = 0x0,
656 [ C(RESULT_MISS) ] = 0,
657 },
658 },
659 [ C(L1I ) ] = {
660 [ C(OP_READ) ] = {
661 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
662 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
663 },
664 [ C(OP_WRITE) ] = {
665 [ C(RESULT_ACCESS) ] = -1,
666 [ C(RESULT_MISS) ] = -1,
667 },
668 [ C(OP_PREFETCH) ] = {
669 [ C(RESULT_ACCESS) ] = 0,
670 [ C(RESULT_MISS) ] = 0,
671 },
672 },
673 [ C(LL ) ] = {
674 [ C(OP_READ) ] = {
675 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
676 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
677 },
678 [ C(OP_WRITE) ] = {
679 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
680 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
681 },
682 [ C(OP_PREFETCH) ] = {
683 [ C(RESULT_ACCESS) ] = 0,
684 [ C(RESULT_MISS) ] = 0,
685 },
686 },
687 [ C(DTLB) ] = {
688 [ C(OP_READ) ] = {
689 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
690 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
691 },
692 [ C(OP_WRITE) ] = {
693 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
694 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
695 },
696 [ C(OP_PREFETCH) ] = {
697 [ C(RESULT_ACCESS) ] = 0,
698 [ C(RESULT_MISS) ] = 0,
699 },
700 },
701 [ C(ITLB) ] = {
702 [ C(OP_READ) ] = {
703 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
704 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
705 },
706 [ C(OP_WRITE) ] = {
707 [ C(RESULT_ACCESS) ] = -1,
708 [ C(RESULT_MISS) ] = -1,
709 },
710 [ C(OP_PREFETCH) ] = {
711 [ C(RESULT_ACCESS) ] = -1,
712 [ C(RESULT_MISS) ] = -1,
713 },
714 },
715 [ C(BPU ) ] = {
716 [ C(OP_READ) ] = {
717 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
718 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
719 },
720 [ C(OP_WRITE) ] = {
721 [ C(RESULT_ACCESS) ] = -1,
722 [ C(RESULT_MISS) ] = -1,
723 },
724 [ C(OP_PREFETCH) ] = {
725 [ C(RESULT_ACCESS) ] = -1,
726 [ C(RESULT_MISS) ] = -1,
727 },
728 },
729};
730
60ce0fbd
SE
731static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
732{
733 /* user explicitly requested branch sampling */
734 if (has_branch_stack(event))
735 return true;
736
737 /* implicit branch sampling to correct PEBS skid */
738 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
739 return true;
740
741 return false;
742}
743
f22f54f4
PZ
744static void intel_pmu_disable_all(void)
745{
746 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
747
748 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
749
15c7ad51 750 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
f22f54f4 751 intel_pmu_disable_bts();
ca037701
PZ
752
753 intel_pmu_pebs_disable_all();
caff2bef 754 intel_pmu_lbr_disable_all();
f22f54f4
PZ
755}
756
11164cd4 757static void intel_pmu_enable_all(int added)
f22f54f4
PZ
758{
759 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
760
d329527e
PZ
761 intel_pmu_pebs_enable_all();
762 intel_pmu_lbr_enable_all();
144d31e6
GN
763 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
764 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
f22f54f4 765
15c7ad51 766 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
f22f54f4 767 struct perf_event *event =
15c7ad51 768 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
f22f54f4
PZ
769
770 if (WARN_ON_ONCE(!event))
771 return;
772
773 intel_pmu_enable_bts(event->hw.config);
774 }
775}
776
11164cd4
PZ
777/*
778 * Workaround for:
779 * Intel Errata AAK100 (model 26)
780 * Intel Errata AAP53 (model 30)
40b91cd1 781 * Intel Errata BD53 (model 44)
11164cd4 782 *
351af072
ZY
783 * The official story:
784 * These chips need to be 'reset' when adding counters by programming the
785 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
786 * in sequence on the same PMC or on different PMCs.
787 *
788 * In practise it appears some of these events do in fact count, and
789 * we need to programm all 4 events.
11164cd4 790 */
351af072 791static void intel_pmu_nhm_workaround(void)
11164cd4 792{
351af072
ZY
793 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
794 static const unsigned long nhm_magic[4] = {
795 0x4300B5,
796 0x4300D2,
797 0x4300B1,
798 0x4300B1
799 };
800 struct perf_event *event;
801 int i;
11164cd4 802
351af072
ZY
803 /*
804 * The Errata requires below steps:
805 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
806 * 2) Configure 4 PERFEVTSELx with the magic events and clear
807 * the corresponding PMCx;
808 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
809 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
810 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
811 */
11164cd4 812
351af072
ZY
813 /*
814 * The real steps we choose are a little different from above.
815 * A) To reduce MSR operations, we don't run step 1) as they
816 * are already cleared before this function is called;
817 * B) Call x86_perf_event_update to save PMCx before configuring
818 * PERFEVTSELx with magic number;
819 * C) With step 5), we do clear only when the PERFEVTSELx is
820 * not used currently.
821 * D) Call x86_perf_event_set_period to restore PMCx;
822 */
11164cd4 823
351af072
ZY
824 /* We always operate 4 pairs of PERF Counters */
825 for (i = 0; i < 4; i++) {
826 event = cpuc->events[i];
827 if (event)
828 x86_perf_event_update(event);
829 }
11164cd4 830
351af072
ZY
831 for (i = 0; i < 4; i++) {
832 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
833 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
834 }
835
836 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
837 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
11164cd4 838
351af072
ZY
839 for (i = 0; i < 4; i++) {
840 event = cpuc->events[i];
841
842 if (event) {
843 x86_perf_event_set_period(event);
31fa58af 844 __x86_pmu_enable_event(&event->hw,
351af072
ZY
845 ARCH_PERFMON_EVENTSEL_ENABLE);
846 } else
847 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
11164cd4 848 }
351af072
ZY
849}
850
851static void intel_pmu_nhm_enable_all(int added)
852{
853 if (added)
854 intel_pmu_nhm_workaround();
11164cd4
PZ
855 intel_pmu_enable_all(added);
856}
857
f22f54f4
PZ
858static inline u64 intel_pmu_get_status(void)
859{
860 u64 status;
861
862 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
863
864 return status;
865}
866
867static inline void intel_pmu_ack_status(u64 ack)
868{
869 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
870}
871
ca037701 872static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
f22f54f4 873{
15c7ad51 874 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
f22f54f4
PZ
875 u64 ctrl_val, mask;
876
877 mask = 0xfULL << (idx * 4);
878
879 rdmsrl(hwc->config_base, ctrl_val);
880 ctrl_val &= ~mask;
7645a24c 881 wrmsrl(hwc->config_base, ctrl_val);
f22f54f4
PZ
882}
883
ca037701 884static void intel_pmu_disable_event(struct perf_event *event)
f22f54f4 885{
aff3d91a 886 struct hw_perf_event *hwc = &event->hw;
144d31e6 887 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
aff3d91a 888
15c7ad51 889 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
f22f54f4
PZ
890 intel_pmu_disable_bts();
891 intel_pmu_drain_bts_buffer();
892 return;
893 }
894
144d31e6
GN
895 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
896 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
897
60ce0fbd
SE
898 /*
899 * must disable before any actual event
900 * because any event may be combined with LBR
901 */
902 if (intel_pmu_needs_lbr_smpl(event))
903 intel_pmu_lbr_disable(event);
904
f22f54f4 905 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
aff3d91a 906 intel_pmu_disable_fixed(hwc);
f22f54f4
PZ
907 return;
908 }
909
aff3d91a 910 x86_pmu_disable_event(event);
ca037701 911
ab608344 912 if (unlikely(event->attr.precise_ip))
ef21f683 913 intel_pmu_pebs_disable(event);
f22f54f4
PZ
914}
915
ca037701 916static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
f22f54f4 917{
15c7ad51 918 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
f22f54f4 919 u64 ctrl_val, bits, mask;
f22f54f4
PZ
920
921 /*
922 * Enable IRQ generation (0x8),
923 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
924 * if requested:
925 */
926 bits = 0x8ULL;
927 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
928 bits |= 0x2;
929 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
930 bits |= 0x1;
931
932 /*
933 * ANY bit is supported in v3 and up
934 */
935 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
936 bits |= 0x4;
937
938 bits <<= (idx * 4);
939 mask = 0xfULL << (idx * 4);
940
941 rdmsrl(hwc->config_base, ctrl_val);
942 ctrl_val &= ~mask;
943 ctrl_val |= bits;
7645a24c 944 wrmsrl(hwc->config_base, ctrl_val);
f22f54f4
PZ
945}
946
aff3d91a 947static void intel_pmu_enable_event(struct perf_event *event)
f22f54f4 948{
aff3d91a 949 struct hw_perf_event *hwc = &event->hw;
144d31e6 950 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
aff3d91a 951
15c7ad51 952 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
0a3aee0d 953 if (!__this_cpu_read(cpu_hw_events.enabled))
f22f54f4
PZ
954 return;
955
956 intel_pmu_enable_bts(hwc->config);
957 return;
958 }
60ce0fbd
SE
959 /*
960 * must enabled before any actual event
961 * because any event may be combined with LBR
962 */
963 if (intel_pmu_needs_lbr_smpl(event))
964 intel_pmu_lbr_enable(event);
f22f54f4 965
144d31e6
GN
966 if (event->attr.exclude_host)
967 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
968 if (event->attr.exclude_guest)
969 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
970
f22f54f4 971 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
aff3d91a 972 intel_pmu_enable_fixed(hwc);
f22f54f4
PZ
973 return;
974 }
975
ab608344 976 if (unlikely(event->attr.precise_ip))
ef21f683 977 intel_pmu_pebs_enable(event);
ca037701 978
31fa58af 979 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
f22f54f4
PZ
980}
981
982/*
983 * Save and restart an expired event. Called by NMI contexts,
984 * so it has to be careful about preempting normal event ops:
985 */
de0428a7 986int intel_pmu_save_and_restart(struct perf_event *event)
f22f54f4 987{
cc2ad4ba
PZ
988 x86_perf_event_update(event);
989 return x86_perf_event_set_period(event);
f22f54f4
PZ
990}
991
992static void intel_pmu_reset(void)
993{
0a3aee0d 994 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
f22f54f4
PZ
995 unsigned long flags;
996 int idx;
997
948b1bb8 998 if (!x86_pmu.num_counters)
f22f54f4
PZ
999 return;
1000
1001 local_irq_save(flags);
1002
1003 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1004
948b1bb8 1005 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
715c85b1
PA
1006 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
1007 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
f22f54f4 1008 }
948b1bb8 1009 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
715c85b1 1010 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
948b1bb8 1011
f22f54f4
PZ
1012 if (ds)
1013 ds->bts_index = ds->bts_buffer_base;
1014
1015 local_irq_restore(flags);
1016}
1017
1018/*
1019 * This handler is triggered by the local APIC, so the APIC IRQ handling
1020 * rules apply:
1021 */
1022static int intel_pmu_handle_irq(struct pt_regs *regs)
1023{
1024 struct perf_sample_data data;
1025 struct cpu_hw_events *cpuc;
1026 int bit, loops;
2e556b5b 1027 u64 status;
b0b2072d 1028 int handled;
f22f54f4 1029
f22f54f4
PZ
1030 cpuc = &__get_cpu_var(cpu_hw_events);
1031
2bce5dac
DZ
1032 /*
1033 * Some chipsets need to unmask the LVTPC in a particular spot
1034 * inside the nmi handler. As a result, the unmasking was pushed
1035 * into all the nmi handlers.
1036 *
1037 * This handler doesn't seem to have any issues with the unmasking
1038 * so it was left at the top.
1039 */
1040 apic_write(APIC_LVTPC, APIC_DM_NMI);
1041
3fb2b8dd 1042 intel_pmu_disable_all();
b0b2072d 1043 handled = intel_pmu_drain_bts_buffer();
f22f54f4
PZ
1044 status = intel_pmu_get_status();
1045 if (!status) {
11164cd4 1046 intel_pmu_enable_all(0);
b0b2072d 1047 return handled;
f22f54f4
PZ
1048 }
1049
1050 loops = 0;
1051again:
2e556b5b 1052 intel_pmu_ack_status(status);
f22f54f4
PZ
1053 if (++loops > 100) {
1054 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1055 perf_event_print_debug();
1056 intel_pmu_reset();
3fb2b8dd 1057 goto done;
f22f54f4
PZ
1058 }
1059
1060 inc_irq_stat(apic_perf_irqs);
ca037701 1061
caff2bef
PZ
1062 intel_pmu_lbr_read();
1063
ca037701
PZ
1064 /*
1065 * PEBS overflow sets bit 62 in the global status register
1066 */
de725dec
PZ
1067 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1068 handled++;
ca037701 1069 x86_pmu.drain_pebs(regs);
de725dec 1070 }
ca037701 1071
984b3f57 1072 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
f22f54f4
PZ
1073 struct perf_event *event = cpuc->events[bit];
1074
de725dec
PZ
1075 handled++;
1076
f22f54f4
PZ
1077 if (!test_bit(bit, cpuc->active_mask))
1078 continue;
1079
1080 if (!intel_pmu_save_and_restart(event))
1081 continue;
1082
fd0d000b 1083 perf_sample_data_init(&data, 0, event->hw.last_period);
f22f54f4 1084
60ce0fbd
SE
1085 if (has_branch_stack(event))
1086 data.br_stack = &cpuc->lbr_stack;
1087
a8b0ca17 1088 if (perf_event_overflow(event, &data, regs))
a4eaf7f1 1089 x86_pmu_stop(event, 0);
f22f54f4
PZ
1090 }
1091
f22f54f4
PZ
1092 /*
1093 * Repeat if there is more work to be done:
1094 */
1095 status = intel_pmu_get_status();
1096 if (status)
1097 goto again;
1098
3fb2b8dd 1099done:
11164cd4 1100 intel_pmu_enable_all(0);
de725dec 1101 return handled;
f22f54f4
PZ
1102}
1103
f22f54f4 1104static struct event_constraint *
ca037701 1105intel_bts_constraints(struct perf_event *event)
f22f54f4 1106{
ca037701
PZ
1107 struct hw_perf_event *hwc = &event->hw;
1108 unsigned int hw_event, bts_event;
f22f54f4 1109
18a073a3
PZ
1110 if (event->attr.freq)
1111 return NULL;
1112
ca037701
PZ
1113 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1114 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
f22f54f4 1115
ca037701 1116 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
f22f54f4 1117 return &bts_constraint;
ca037701 1118
f22f54f4
PZ
1119 return NULL;
1120}
1121
5a425294 1122static int intel_alt_er(int idx)
b79e8941
PZ
1123{
1124 if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
5a425294 1125 return idx;
b79e8941 1126
5a425294
PZ
1127 if (idx == EXTRA_REG_RSP_0)
1128 return EXTRA_REG_RSP_1;
1129
1130 if (idx == EXTRA_REG_RSP_1)
1131 return EXTRA_REG_RSP_0;
1132
1133 return idx;
1134}
1135
1136static void intel_fixup_er(struct perf_event *event, int idx)
1137{
1138 event->hw.extra_reg.idx = idx;
1139
1140 if (idx == EXTRA_REG_RSP_0) {
b79e8941
PZ
1141 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1142 event->hw.config |= 0x01b7;
b79e8941 1143 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
5a425294
PZ
1144 } else if (idx == EXTRA_REG_RSP_1) {
1145 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1146 event->hw.config |= 0x01bb;
1147 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
b79e8941 1148 }
b79e8941
PZ
1149}
1150
efc9f05d
SE
1151/*
1152 * manage allocation of shared extra msr for certain events
1153 *
1154 * sharing can be:
1155 * per-cpu: to be shared between the various events on a single PMU
1156 * per-core: per-cpu + shared by HT threads
1157 */
a7e3ed1e 1158static struct event_constraint *
efc9f05d 1159__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
b36817e8
SE
1160 struct perf_event *event,
1161 struct hw_perf_event_extra *reg)
a7e3ed1e 1162{
efc9f05d 1163 struct event_constraint *c = &emptyconstraint;
a7e3ed1e 1164 struct er_account *era;
cd8a38d3 1165 unsigned long flags;
5a425294 1166 int idx = reg->idx;
a7e3ed1e 1167
5a425294
PZ
1168 /*
1169 * reg->alloc can be set due to existing state, so for fake cpuc we
1170 * need to ignore this, otherwise we might fail to allocate proper fake
1171 * state for this extra reg constraint. Also see the comment below.
1172 */
1173 if (reg->alloc && !cpuc->is_fake)
b36817e8 1174 return NULL; /* call x86_get_event_constraint() */
a7e3ed1e 1175
b79e8941 1176again:
5a425294 1177 era = &cpuc->shared_regs->regs[idx];
cd8a38d3
SE
1178 /*
1179 * we use spin_lock_irqsave() to avoid lockdep issues when
1180 * passing a fake cpuc
1181 */
1182 raw_spin_lock_irqsave(&era->lock, flags);
efc9f05d
SE
1183
1184 if (!atomic_read(&era->ref) || era->config == reg->config) {
1185
5a425294
PZ
1186 /*
1187 * If its a fake cpuc -- as per validate_{group,event}() we
1188 * shouldn't touch event state and we can avoid doing so
1189 * since both will only call get_event_constraints() once
1190 * on each event, this avoids the need for reg->alloc.
1191 *
1192 * Not doing the ER fixup will only result in era->reg being
1193 * wrong, but since we won't actually try and program hardware
1194 * this isn't a problem either.
1195 */
1196 if (!cpuc->is_fake) {
1197 if (idx != reg->idx)
1198 intel_fixup_er(event, idx);
1199
1200 /*
1201 * x86_schedule_events() can call get_event_constraints()
1202 * multiple times on events in the case of incremental
1203 * scheduling(). reg->alloc ensures we only do the ER
1204 * allocation once.
1205 */
1206 reg->alloc = 1;
1207 }
1208
efc9f05d
SE
1209 /* lock in msr value */
1210 era->config = reg->config;
1211 era->reg = reg->reg;
1212
1213 /* one more user */
1214 atomic_inc(&era->ref);
1215
a7e3ed1e 1216 /*
b36817e8
SE
1217 * need to call x86_get_event_constraint()
1218 * to check if associated event has constraints
a7e3ed1e 1219 */
b36817e8 1220 c = NULL;
5a425294
PZ
1221 } else {
1222 idx = intel_alt_er(idx);
1223 if (idx != reg->idx) {
1224 raw_spin_unlock_irqrestore(&era->lock, flags);
1225 goto again;
1226 }
a7e3ed1e 1227 }
cd8a38d3 1228 raw_spin_unlock_irqrestore(&era->lock, flags);
a7e3ed1e 1229
efc9f05d
SE
1230 return c;
1231}
1232
1233static void
1234__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1235 struct hw_perf_event_extra *reg)
1236{
1237 struct er_account *era;
1238
1239 /*
5a425294
PZ
1240 * Only put constraint if extra reg was actually allocated. Also takes
1241 * care of event which do not use an extra shared reg.
1242 *
1243 * Also, if this is a fake cpuc we shouldn't touch any event state
1244 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1245 * either since it'll be thrown out.
efc9f05d 1246 */
5a425294 1247 if (!reg->alloc || cpuc->is_fake)
efc9f05d
SE
1248 return;
1249
1250 era = &cpuc->shared_regs->regs[reg->idx];
1251
1252 /* one fewer user */
1253 atomic_dec(&era->ref);
1254
1255 /* allocate again next time */
1256 reg->alloc = 0;
1257}
1258
1259static struct event_constraint *
1260intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1261 struct perf_event *event)
1262{
b36817e8
SE
1263 struct event_constraint *c = NULL, *d;
1264 struct hw_perf_event_extra *xreg, *breg;
1265
1266 xreg = &event->hw.extra_reg;
1267 if (xreg->idx != EXTRA_REG_NONE) {
1268 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
1269 if (c == &emptyconstraint)
1270 return c;
1271 }
1272 breg = &event->hw.branch_reg;
1273 if (breg->idx != EXTRA_REG_NONE) {
1274 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
1275 if (d == &emptyconstraint) {
1276 __intel_shared_reg_put_constraints(cpuc, xreg);
1277 c = d;
1278 }
1279 }
efc9f05d 1280 return c;
a7e3ed1e
AK
1281}
1282
de0428a7
KW
1283struct event_constraint *
1284x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1285{
1286 struct event_constraint *c;
1287
1288 if (x86_pmu.event_constraints) {
1289 for_each_event_constraint(c, x86_pmu.event_constraints) {
1290 if ((event->hw.config & c->cmask) == c->code)
1291 return c;
1292 }
1293 }
1294
1295 return &unconstrained;
1296}
1297
f22f54f4
PZ
1298static struct event_constraint *
1299intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1300{
1301 struct event_constraint *c;
1302
ca037701
PZ
1303 c = intel_bts_constraints(event);
1304 if (c)
1305 return c;
1306
1307 c = intel_pebs_constraints(event);
f22f54f4
PZ
1308 if (c)
1309 return c;
1310
efc9f05d 1311 c = intel_shared_regs_constraints(cpuc, event);
a7e3ed1e
AK
1312 if (c)
1313 return c;
1314
f22f54f4
PZ
1315 return x86_get_event_constraints(cpuc, event);
1316}
1317
efc9f05d
SE
1318static void
1319intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
a7e3ed1e
AK
1320 struct perf_event *event)
1321{
efc9f05d 1322 struct hw_perf_event_extra *reg;
a7e3ed1e 1323
efc9f05d
SE
1324 reg = &event->hw.extra_reg;
1325 if (reg->idx != EXTRA_REG_NONE)
1326 __intel_shared_reg_put_constraints(cpuc, reg);
b36817e8
SE
1327
1328 reg = &event->hw.branch_reg;
1329 if (reg->idx != EXTRA_REG_NONE)
1330 __intel_shared_reg_put_constraints(cpuc, reg);
efc9f05d 1331}
a7e3ed1e 1332
efc9f05d
SE
1333static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1334 struct perf_event *event)
1335{
1336 intel_put_shared_regs_event_constraints(cpuc, event);
a7e3ed1e
AK
1337}
1338
0780c927 1339static void intel_pebs_aliases_core2(struct perf_event *event)
b4cdc5c2 1340{
0780c927 1341 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
7639dae0
PZ
1342 /*
1343 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1344 * (0x003c) so that we can use it with PEBS.
1345 *
1346 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1347 * PEBS capable. However we can use INST_RETIRED.ANY_P
1348 * (0x00c0), which is a PEBS capable event, to get the same
1349 * count.
1350 *
1351 * INST_RETIRED.ANY_P counts the number of cycles that retires
1352 * CNTMASK instructions. By setting CNTMASK to a value (16)
1353 * larger than the maximum number of instructions that can be
1354 * retired per cycle (4) and then inverting the condition, we
1355 * count all cycles that retire 16 or less instructions, which
1356 * is every cycle.
1357 *
1358 * Thereby we gain a PEBS capable cycle counter.
1359 */
f9b4eeb8
PZ
1360 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1361
0780c927
PZ
1362 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1363 event->hw.config = alt_config;
1364 }
1365}
1366
1367static void intel_pebs_aliases_snb(struct perf_event *event)
1368{
1369 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1370 /*
1371 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1372 * (0x003c) so that we can use it with PEBS.
1373 *
1374 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1375 * PEBS capable. However we can use UOPS_RETIRED.ALL
1376 * (0x01c2), which is a PEBS capable event, to get the same
1377 * count.
1378 *
1379 * UOPS_RETIRED.ALL counts the number of cycles that retires
1380 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1381 * larger than the maximum number of micro-ops that can be
1382 * retired per cycle (4) and then inverting the condition, we
1383 * count all cycles that retire 16 or less micro-ops, which
1384 * is every cycle.
1385 *
1386 * Thereby we gain a PEBS capable cycle counter.
1387 */
1388 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
7639dae0
PZ
1389
1390 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1391 event->hw.config = alt_config;
1392 }
0780c927
PZ
1393}
1394
1395static int intel_pmu_hw_config(struct perf_event *event)
1396{
1397 int ret = x86_pmu_hw_config(event);
1398
1399 if (ret)
1400 return ret;
1401
1402 if (event->attr.precise_ip && x86_pmu.pebs_aliases)
1403 x86_pmu.pebs_aliases(event);
7639dae0 1404
60ce0fbd
SE
1405 if (intel_pmu_needs_lbr_smpl(event)) {
1406 ret = intel_pmu_setup_lbr_filter(event);
1407 if (ret)
1408 return ret;
1409 }
1410
b4cdc5c2
PZ
1411 if (event->attr.type != PERF_TYPE_RAW)
1412 return 0;
1413
1414 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
1415 return 0;
1416
1417 if (x86_pmu.version < 3)
1418 return -EINVAL;
1419
1420 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1421 return -EACCES;
1422
1423 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
1424
1425 return 0;
1426}
1427
144d31e6
GN
1428struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
1429{
1430 if (x86_pmu.guest_get_msrs)
1431 return x86_pmu.guest_get_msrs(nr);
1432 *nr = 0;
1433 return NULL;
1434}
1435EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1436
1437static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1438{
1439 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1440 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1441
1442 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
1443 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
1444 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
1445
1446 *nr = 1;
1447 return arr;
1448}
1449
1450static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1451{
1452 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1453 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1454 int idx;
1455
1456 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1457 struct perf_event *event = cpuc->events[idx];
1458
1459 arr[idx].msr = x86_pmu_config_addr(idx);
1460 arr[idx].host = arr[idx].guest = 0;
1461
1462 if (!test_bit(idx, cpuc->active_mask))
1463 continue;
1464
1465 arr[idx].host = arr[idx].guest =
1466 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
1467
1468 if (event->attr.exclude_host)
1469 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1470 else if (event->attr.exclude_guest)
1471 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1472 }
1473
1474 *nr = x86_pmu.num_counters;
1475 return arr;
1476}
1477
1478static void core_pmu_enable_event(struct perf_event *event)
1479{
1480 if (!event->attr.exclude_host)
1481 x86_pmu_enable_event(event);
1482}
1483
1484static void core_pmu_enable_all(int added)
1485{
1486 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1487 int idx;
1488
1489 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1490 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
1491
1492 if (!test_bit(idx, cpuc->active_mask) ||
1493 cpuc->events[idx]->attr.exclude_host)
1494 continue;
1495
1496 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1497 }
1498}
1499
641cc938
JO
1500PMU_FORMAT_ATTR(event, "config:0-7" );
1501PMU_FORMAT_ATTR(umask, "config:8-15" );
1502PMU_FORMAT_ATTR(edge, "config:18" );
1503PMU_FORMAT_ATTR(pc, "config:19" );
1504PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
1505PMU_FORMAT_ATTR(inv, "config:23" );
1506PMU_FORMAT_ATTR(cmask, "config:24-31" );
1507
1508static struct attribute *intel_arch_formats_attr[] = {
1509 &format_attr_event.attr,
1510 &format_attr_umask.attr,
1511 &format_attr_edge.attr,
1512 &format_attr_pc.attr,
1513 &format_attr_inv.attr,
1514 &format_attr_cmask.attr,
1515 NULL,
1516};
1517
caaa8be3 1518static __initconst const struct x86_pmu core_pmu = {
f22f54f4
PZ
1519 .name = "core",
1520 .handle_irq = x86_pmu_handle_irq,
1521 .disable_all = x86_pmu_disable_all,
144d31e6
GN
1522 .enable_all = core_pmu_enable_all,
1523 .enable = core_pmu_enable_event,
f22f54f4 1524 .disable = x86_pmu_disable_event,
b4cdc5c2 1525 .hw_config = x86_pmu_hw_config,
a072738e 1526 .schedule_events = x86_schedule_events,
f22f54f4
PZ
1527 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1528 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1529 .event_map = intel_pmu_event_map,
f22f54f4
PZ
1530 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1531 .apic = 1,
1532 /*
1533 * Intel PMCs cannot be accessed sanely above 32 bit width,
1534 * so we install an artificial 1<<31 period regardless of
1535 * the generic event period:
1536 */
1537 .max_period = (1ULL << 31) - 1,
1538 .get_event_constraints = intel_get_event_constraints,
a7e3ed1e 1539 .put_event_constraints = intel_put_event_constraints,
f22f54f4 1540 .event_constraints = intel_core_event_constraints,
144d31e6 1541 .guest_get_msrs = core_guest_get_msrs,
641cc938 1542 .format_attrs = intel_arch_formats_attr,
f22f54f4
PZ
1543};
1544
de0428a7 1545struct intel_shared_regs *allocate_shared_regs(int cpu)
efc9f05d
SE
1546{
1547 struct intel_shared_regs *regs;
1548 int i;
1549
1550 regs = kzalloc_node(sizeof(struct intel_shared_regs),
1551 GFP_KERNEL, cpu_to_node(cpu));
1552 if (regs) {
1553 /*
1554 * initialize the locks to keep lockdep happy
1555 */
1556 for (i = 0; i < EXTRA_REG_MAX; i++)
1557 raw_spin_lock_init(&regs->regs[i].lock);
1558
1559 regs->core_id = -1;
1560 }
1561 return regs;
1562}
1563
a7e3ed1e
AK
1564static int intel_pmu_cpu_prepare(int cpu)
1565{
1566 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1567
b36817e8 1568 if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
69092624
LM
1569 return NOTIFY_OK;
1570
efc9f05d
SE
1571 cpuc->shared_regs = allocate_shared_regs(cpu);
1572 if (!cpuc->shared_regs)
a7e3ed1e
AK
1573 return NOTIFY_BAD;
1574
a7e3ed1e
AK
1575 return NOTIFY_OK;
1576}
1577
74846d35
PZ
1578static void intel_pmu_cpu_starting(int cpu)
1579{
a7e3ed1e
AK
1580 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1581 int core_id = topology_core_id(cpu);
1582 int i;
1583
69092624
LM
1584 init_debug_store_on_cpu(cpu);
1585 /*
1586 * Deal with CPUs that don't clear their LBRs on power-up.
1587 */
1588 intel_pmu_lbr_reset();
1589
b36817e8
SE
1590 cpuc->lbr_sel = NULL;
1591
1592 if (!cpuc->shared_regs)
69092624
LM
1593 return;
1594
b36817e8
SE
1595 if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
1596 for_each_cpu(i, topology_thread_cpumask(cpu)) {
1597 struct intel_shared_regs *pc;
a7e3ed1e 1598
b36817e8
SE
1599 pc = per_cpu(cpu_hw_events, i).shared_regs;
1600 if (pc && pc->core_id == core_id) {
1601 cpuc->kfree_on_online = cpuc->shared_regs;
1602 cpuc->shared_regs = pc;
1603 break;
1604 }
a7e3ed1e 1605 }
b36817e8
SE
1606 cpuc->shared_regs->core_id = core_id;
1607 cpuc->shared_regs->refcnt++;
a7e3ed1e
AK
1608 }
1609
b36817e8
SE
1610 if (x86_pmu.lbr_sel_map)
1611 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
74846d35
PZ
1612}
1613
1614static void intel_pmu_cpu_dying(int cpu)
1615{
a7e3ed1e 1616 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
efc9f05d 1617 struct intel_shared_regs *pc;
a7e3ed1e 1618
efc9f05d 1619 pc = cpuc->shared_regs;
a7e3ed1e
AK
1620 if (pc) {
1621 if (pc->core_id == -1 || --pc->refcnt == 0)
1622 kfree(pc);
efc9f05d 1623 cpuc->shared_regs = NULL;
a7e3ed1e
AK
1624 }
1625
74846d35
PZ
1626 fini_debug_store_on_cpu(cpu);
1627}
1628
d010b332
SE
1629static void intel_pmu_flush_branch_stack(void)
1630{
1631 /*
1632 * Intel LBR does not tag entries with the
1633 * PID of the current task, then we need to
1634 * flush it on ctxsw
1635 * For now, we simply reset it
1636 */
1637 if (x86_pmu.lbr_nr)
1638 intel_pmu_lbr_reset();
1639}
1640
641cc938
JO
1641PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
1642
1643static struct attribute *intel_arch3_formats_attr[] = {
1644 &format_attr_event.attr,
1645 &format_attr_umask.attr,
1646 &format_attr_edge.attr,
1647 &format_attr_pc.attr,
1648 &format_attr_any.attr,
1649 &format_attr_inv.attr,
1650 &format_attr_cmask.attr,
1651
1652 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
1653 NULL,
1654};
1655
caaa8be3 1656static __initconst const struct x86_pmu intel_pmu = {
f22f54f4
PZ
1657 .name = "Intel",
1658 .handle_irq = intel_pmu_handle_irq,
1659 .disable_all = intel_pmu_disable_all,
1660 .enable_all = intel_pmu_enable_all,
1661 .enable = intel_pmu_enable_event,
1662 .disable = intel_pmu_disable_event,
b4cdc5c2 1663 .hw_config = intel_pmu_hw_config,
a072738e 1664 .schedule_events = x86_schedule_events,
f22f54f4
PZ
1665 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1666 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1667 .event_map = intel_pmu_event_map,
f22f54f4
PZ
1668 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1669 .apic = 1,
1670 /*
1671 * Intel PMCs cannot be accessed sanely above 32 bit width,
1672 * so we install an artificial 1<<31 period regardless of
1673 * the generic event period:
1674 */
1675 .max_period = (1ULL << 31) - 1,
3f6da390 1676 .get_event_constraints = intel_get_event_constraints,
a7e3ed1e 1677 .put_event_constraints = intel_put_event_constraints,
0780c927 1678 .pebs_aliases = intel_pebs_aliases_core2,
3f6da390 1679
641cc938
JO
1680 .format_attrs = intel_arch3_formats_attr,
1681
a7e3ed1e 1682 .cpu_prepare = intel_pmu_cpu_prepare,
74846d35
PZ
1683 .cpu_starting = intel_pmu_cpu_starting,
1684 .cpu_dying = intel_pmu_cpu_dying,
144d31e6 1685 .guest_get_msrs = intel_guest_get_msrs,
d010b332 1686 .flush_branch_stack = intel_pmu_flush_branch_stack,
f22f54f4
PZ
1687};
1688
c1d6f42f 1689static __init void intel_clovertown_quirk(void)
3c44780b
PZ
1690{
1691 /*
1692 * PEBS is unreliable due to:
1693 *
1694 * AJ67 - PEBS may experience CPL leaks
1695 * AJ68 - PEBS PMI may be delayed by one event
1696 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1697 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1698 *
1699 * AJ67 could be worked around by restricting the OS/USR flags.
1700 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1701 *
1702 * AJ106 could possibly be worked around by not allowing LBR
1703 * usage from PEBS, including the fixup.
1704 * AJ68 could possibly be worked around by always programming
ec75a716 1705 * a pebs_event_reset[0] value and coping with the lost events.
3c44780b
PZ
1706 *
1707 * But taken together it might just make sense to not enable PEBS on
1708 * these chips.
1709 */
1710 printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
1711 x86_pmu.pebs = 0;
1712 x86_pmu.pebs_constraints = NULL;
1713}
1714
c1d6f42f 1715static __init void intel_sandybridge_quirk(void)
6a600a8b
PZ
1716{
1717 printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
1718 x86_pmu.pebs = 0;
1719 x86_pmu.pebs_constraints = NULL;
1720}
1721
c1d6f42f
PZ
1722static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
1723 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
1724 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
1725 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
1726 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
1727 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
1728 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
1729 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
ffb871bc
GN
1730};
1731
c1d6f42f
PZ
1732static __init void intel_arch_events_quirk(void)
1733{
1734 int bit;
1735
1736 /* disable event that reported as not presend by cpuid */
1737 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
1738 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
1739 printk(KERN_WARNING "CPUID marked event: \'%s\' unavailable\n",
1740 intel_arch_events_map[bit].name);
1741 }
1742}
1743
1744static __init void intel_nehalem_quirk(void)
1745{
1746 union cpuid10_ebx ebx;
1747
1748 ebx.full = x86_pmu.events_maskl;
1749 if (ebx.split.no_branch_misses_retired) {
1750 /*
1751 * Erratum AAJ80 detected, we work it around by using
1752 * the BR_MISP_EXEC.ANY event. This will over-count
1753 * branch-misses, but it's still much better than the
1754 * architectural event which is often completely bogus:
1755 */
1756 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
1757 ebx.split.no_branch_misses_retired = 0;
1758 x86_pmu.events_maskl = ebx.full;
1759 printk(KERN_INFO "CPU erratum AAJ80 worked around\n");
1760 }
1761}
1762
de0428a7 1763__init int intel_pmu_init(void)
f22f54f4
PZ
1764{
1765 union cpuid10_edx edx;
1766 union cpuid10_eax eax;
ffb871bc 1767 union cpuid10_ebx ebx;
a1eac7ac 1768 struct event_constraint *c;
f22f54f4 1769 unsigned int unused;
f22f54f4
PZ
1770 int version;
1771
1772 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
a072738e
CG
1773 switch (boot_cpu_data.x86) {
1774 case 0x6:
1775 return p6_pmu_init();
1776 case 0xf:
1777 return p4_pmu_init();
1778 }
f22f54f4 1779 return -ENODEV;
f22f54f4
PZ
1780 }
1781
1782 /*
1783 * Check whether the Architectural PerfMon supports
1784 * Branch Misses Retired hw_event or not.
1785 */
ffb871bc
GN
1786 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
1787 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
f22f54f4
PZ
1788 return -ENODEV;
1789
1790 version = eax.split.version_id;
1791 if (version < 2)
1792 x86_pmu = core_pmu;
1793 else
1794 x86_pmu = intel_pmu;
1795
1796 x86_pmu.version = version;
948b1bb8
RR
1797 x86_pmu.num_counters = eax.split.num_counters;
1798 x86_pmu.cntval_bits = eax.split.bit_width;
1799 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
f22f54f4 1800
c1d6f42f
PZ
1801 x86_pmu.events_maskl = ebx.full;
1802 x86_pmu.events_mask_len = eax.split.mask_length;
1803
70ab7003
AK
1804 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
1805
f22f54f4
PZ
1806 /*
1807 * Quirk: v2 perfmon does not report fixed-purpose events, so
1808 * assume at least 3 events:
1809 */
1810 if (version > 1)
948b1bb8 1811 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
f22f54f4 1812
8db909a7
PZ
1813 /*
1814 * v2 and above have a perf capabilities MSR
1815 */
1816 if (version > 1) {
1817 u64 capabilities;
1818
1819 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
1820 x86_pmu.intel_cap.capabilities = capabilities;
1821 }
1822
ca037701
PZ
1823 intel_ds_init();
1824
c1d6f42f
PZ
1825 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
1826
f22f54f4
PZ
1827 /*
1828 * Install the hw-cache-events table:
1829 */
1830 switch (boot_cpu_data.x86_model) {
1831 case 14: /* 65 nm core solo/duo, "Yonah" */
1832 pr_cont("Core events, ");
1833 break;
1834
1835 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
c1d6f42f 1836 x86_add_quirk(intel_clovertown_quirk);
f22f54f4
PZ
1837 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1838 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1839 case 29: /* six-core 45 nm xeon "Dunnington" */
1840 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
1841 sizeof(hw_cache_event_ids));
1842
caff2bef
PZ
1843 intel_pmu_lbr_init_core();
1844
f22f54f4 1845 x86_pmu.event_constraints = intel_core2_event_constraints;
17e31629 1846 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
f22f54f4
PZ
1847 pr_cont("Core2 events, ");
1848 break;
1849
1850 case 26: /* 45 nm nehalem, "Bloomfield" */
1851 case 30: /* 45 nm nehalem, "Lynnfield" */
134fbadf 1852 case 46: /* 45 nm nehalem-ex, "Beckton" */
f22f54f4
PZ
1853 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
1854 sizeof(hw_cache_event_ids));
e994d7d2
AK
1855 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
1856 sizeof(hw_cache_extra_regs));
f22f54f4 1857
caff2bef
PZ
1858 intel_pmu_lbr_init_nhm();
1859
f22f54f4 1860 x86_pmu.event_constraints = intel_nehalem_event_constraints;
17e31629 1861 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
11164cd4 1862 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
a7e3ed1e 1863 x86_pmu.extra_regs = intel_nehalem_extra_regs;
ec75a716 1864
91fc4cc0 1865 /* UOPS_ISSUED.STALLED_CYCLES */
f9b4eeb8
PZ
1866 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
1867 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
91fc4cc0 1868 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
f9b4eeb8
PZ
1869 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
1870 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
94403f88 1871
c1d6f42f 1872 x86_add_quirk(intel_nehalem_quirk);
ec75a716 1873
11164cd4 1874 pr_cont("Nehalem events, ");
f22f54f4 1875 break;
caff2bef 1876
b622d644 1877 case 28: /* Atom */
f22f54f4
PZ
1878 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
1879 sizeof(hw_cache_event_ids));
1880
caff2bef
PZ
1881 intel_pmu_lbr_init_atom();
1882
f22f54f4 1883 x86_pmu.event_constraints = intel_gen_event_constraints;
17e31629 1884 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
f22f54f4
PZ
1885 pr_cont("Atom events, ");
1886 break;
1887
1888 case 37: /* 32 nm nehalem, "Clarkdale" */
1889 case 44: /* 32 nm nehalem, "Gulftown" */
b2508e82 1890 case 47: /* 32 nm Xeon E7 */
f22f54f4
PZ
1891 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
1892 sizeof(hw_cache_event_ids));
e994d7d2
AK
1893 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
1894 sizeof(hw_cache_extra_regs));
f22f54f4 1895
caff2bef
PZ
1896 intel_pmu_lbr_init_nhm();
1897
f22f54f4 1898 x86_pmu.event_constraints = intel_westmere_event_constraints;
40b91cd1 1899 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
17e31629 1900 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
a7e3ed1e 1901 x86_pmu.extra_regs = intel_westmere_extra_regs;
b79e8941 1902 x86_pmu.er_flags |= ERF_HAS_RSP_1;
30112039
IM
1903
1904 /* UOPS_ISSUED.STALLED_CYCLES */
f9b4eeb8
PZ
1905 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
1906 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
30112039 1907 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
f9b4eeb8
PZ
1908 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
1909 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
30112039 1910
f22f54f4
PZ
1911 pr_cont("Westmere events, ");
1912 break;
b622d644 1913
b06b3d49 1914 case 42: /* SandyBridge */
a34668f6 1915 case 45: /* SandyBridge, "Romely-EP" */
47a8863d
PZ
1916 x86_add_quirk(intel_sandybridge_quirk);
1917 case 58: /* IvyBridge */
b06b3d49
LM
1918 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
1919 sizeof(hw_cache_event_ids));
1920
c5cc2cd9 1921 intel_pmu_lbr_init_snb();
b06b3d49
LM
1922
1923 x86_pmu.event_constraints = intel_snb_event_constraints;
de0428a7 1924 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
0780c927 1925 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
ee89cbc2
SE
1926 x86_pmu.extra_regs = intel_snb_extra_regs;
1927 /* all extra regs are per-cpu when HT is on */
b79e8941
PZ
1928 x86_pmu.er_flags |= ERF_HAS_RSP_1;
1929 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
e04d1b23
LM
1930
1931 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
f9b4eeb8
PZ
1932 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
1933 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
e04d1b23 1934 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
f9b4eeb8
PZ
1935 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
1936 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
e04d1b23 1937
b06b3d49
LM
1938 pr_cont("SandyBridge events, ");
1939 break;
1940
f22f54f4 1941 default:
0af3ac1f
AK
1942 switch (x86_pmu.version) {
1943 case 1:
1944 x86_pmu.event_constraints = intel_v1_event_constraints;
1945 pr_cont("generic architected perfmon v1, ");
1946 break;
1947 default:
1948 /*
1949 * default constraints for v2 and up
1950 */
1951 x86_pmu.event_constraints = intel_gen_event_constraints;
1952 pr_cont("generic architected perfmon, ");
1953 break;
1954 }
f22f54f4 1955 }
ffb871bc 1956
a1eac7ac
RR
1957 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
1958 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
1959 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
1960 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
1961 }
1962 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1963
1964 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
1965 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1966 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
1967 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
1968 }
1969
1970 x86_pmu.intel_ctrl |=
1971 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
1972
1973 if (x86_pmu.event_constraints) {
1974 /*
1975 * event on fixed counter2 (REF_CYCLES) only works on this
1976 * counter, so do not extend mask to generic counters
1977 */
1978 for_each_event_constraint(c, x86_pmu.event_constraints) {
1979 if (c->cmask != X86_RAW_EVENT_MASK
1980 || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
1981 continue;
1982 }
1983
1984 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1985 c->weight += x86_pmu.num_counters;
1986 }
1987 }
1988
f22f54f4
PZ
1989 return 0;
1990}
This page took 0.220896 seconds and 5 git commands to generate.