Commit | Line | Data |
---|---|---|
f22f54f4 PZ |
1 | #ifdef CONFIG_CPU_SUP_INTEL |
2 | ||
3 | /* | |
4 | * Intel PerfMon v3. Used on Core2 and later. | |
5 | */ | |
6 | static const u64 intel_perfmon_event_map[] = | |
7 | { | |
8 | [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, | |
9 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | |
10 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, | |
11 | [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, | |
12 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | |
13 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | |
14 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, | |
15 | }; | |
16 | ||
17 | static struct event_constraint intel_core_event_constraints[] = | |
18 | { | |
19 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | |
20 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | |
21 | INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | |
22 | INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | |
23 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | |
24 | INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */ | |
25 | EVENT_CONSTRAINT_END | |
26 | }; | |
27 | ||
28 | static struct event_constraint intel_core2_event_constraints[] = | |
29 | { | |
30 | FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | |
31 | FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | |
32 | INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ | |
33 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | |
34 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | |
35 | INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | |
36 | INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | |
37 | INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ | |
38 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | |
39 | INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ | |
40 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ | |
41 | EVENT_CONSTRAINT_END | |
42 | }; | |
43 | ||
44 | static struct event_constraint intel_nehalem_event_constraints[] = | |
45 | { | |
46 | FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | |
47 | FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | |
48 | INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ | |
49 | INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ | |
50 | INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ | |
51 | INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ | |
52 | INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */ | |
53 | INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ | |
54 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ | |
55 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ | |
56 | EVENT_CONSTRAINT_END | |
57 | }; | |
58 | ||
59 | static struct event_constraint intel_westmere_event_constraints[] = | |
60 | { | |
61 | FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | |
62 | FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | |
63 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ | |
64 | INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ | |
65 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ | |
66 | EVENT_CONSTRAINT_END | |
67 | }; | |
68 | ||
69 | static struct event_constraint intel_gen_event_constraints[] = | |
70 | { | |
71 | FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | |
72 | FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | |
73 | EVENT_CONSTRAINT_END | |
74 | }; | |
75 | ||
76 | static u64 intel_pmu_event_map(int hw_event) | |
77 | { | |
78 | return intel_perfmon_event_map[hw_event]; | |
79 | } | |
80 | ||
81 | static __initconst u64 westmere_hw_cache_event_ids | |
82 | [PERF_COUNT_HW_CACHE_MAX] | |
83 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
84 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
85 | { | |
86 | [ C(L1D) ] = { | |
87 | [ C(OP_READ) ] = { | |
88 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ | |
89 | [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ | |
90 | }, | |
91 | [ C(OP_WRITE) ] = { | |
92 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ | |
93 | [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ | |
94 | }, | |
95 | [ C(OP_PREFETCH) ] = { | |
96 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ | |
97 | [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ | |
98 | }, | |
99 | }, | |
100 | [ C(L1I ) ] = { | |
101 | [ C(OP_READ) ] = { | |
102 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | |
103 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | |
104 | }, | |
105 | [ C(OP_WRITE) ] = { | |
106 | [ C(RESULT_ACCESS) ] = -1, | |
107 | [ C(RESULT_MISS) ] = -1, | |
108 | }, | |
109 | [ C(OP_PREFETCH) ] = { | |
110 | [ C(RESULT_ACCESS) ] = 0x0, | |
111 | [ C(RESULT_MISS) ] = 0x0, | |
112 | }, | |
113 | }, | |
114 | [ C(LL ) ] = { | |
115 | [ C(OP_READ) ] = { | |
116 | [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ | |
117 | [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ | |
118 | }, | |
119 | [ C(OP_WRITE) ] = { | |
120 | [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ | |
121 | [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ | |
122 | }, | |
123 | [ C(OP_PREFETCH) ] = { | |
124 | [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ | |
125 | [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ | |
126 | }, | |
127 | }, | |
128 | [ C(DTLB) ] = { | |
129 | [ C(OP_READ) ] = { | |
130 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ | |
131 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ | |
132 | }, | |
133 | [ C(OP_WRITE) ] = { | |
134 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ | |
135 | [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ | |
136 | }, | |
137 | [ C(OP_PREFETCH) ] = { | |
138 | [ C(RESULT_ACCESS) ] = 0x0, | |
139 | [ C(RESULT_MISS) ] = 0x0, | |
140 | }, | |
141 | }, | |
142 | [ C(ITLB) ] = { | |
143 | [ C(OP_READ) ] = { | |
144 | [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ | |
145 | [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */ | |
146 | }, | |
147 | [ C(OP_WRITE) ] = { | |
148 | [ C(RESULT_ACCESS) ] = -1, | |
149 | [ C(RESULT_MISS) ] = -1, | |
150 | }, | |
151 | [ C(OP_PREFETCH) ] = { | |
152 | [ C(RESULT_ACCESS) ] = -1, | |
153 | [ C(RESULT_MISS) ] = -1, | |
154 | }, | |
155 | }, | |
156 | [ C(BPU ) ] = { | |
157 | [ C(OP_READ) ] = { | |
158 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | |
159 | [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ | |
160 | }, | |
161 | [ C(OP_WRITE) ] = { | |
162 | [ C(RESULT_ACCESS) ] = -1, | |
163 | [ C(RESULT_MISS) ] = -1, | |
164 | }, | |
165 | [ C(OP_PREFETCH) ] = { | |
166 | [ C(RESULT_ACCESS) ] = -1, | |
167 | [ C(RESULT_MISS) ] = -1, | |
168 | }, | |
169 | }, | |
170 | }; | |
171 | ||
172 | static __initconst u64 nehalem_hw_cache_event_ids | |
173 | [PERF_COUNT_HW_CACHE_MAX] | |
174 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
175 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
176 | { | |
177 | [ C(L1D) ] = { | |
178 | [ C(OP_READ) ] = { | |
179 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ | |
180 | [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ | |
181 | }, | |
182 | [ C(OP_WRITE) ] = { | |
183 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ | |
184 | [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ | |
185 | }, | |
186 | [ C(OP_PREFETCH) ] = { | |
187 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ | |
188 | [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ | |
189 | }, | |
190 | }, | |
191 | [ C(L1I ) ] = { | |
192 | [ C(OP_READ) ] = { | |
193 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | |
194 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | |
195 | }, | |
196 | [ C(OP_WRITE) ] = { | |
197 | [ C(RESULT_ACCESS) ] = -1, | |
198 | [ C(RESULT_MISS) ] = -1, | |
199 | }, | |
200 | [ C(OP_PREFETCH) ] = { | |
201 | [ C(RESULT_ACCESS) ] = 0x0, | |
202 | [ C(RESULT_MISS) ] = 0x0, | |
203 | }, | |
204 | }, | |
205 | [ C(LL ) ] = { | |
206 | [ C(OP_READ) ] = { | |
207 | [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ | |
208 | [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ | |
209 | }, | |
210 | [ C(OP_WRITE) ] = { | |
211 | [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ | |
212 | [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ | |
213 | }, | |
214 | [ C(OP_PREFETCH) ] = { | |
215 | [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ | |
216 | [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ | |
217 | }, | |
218 | }, | |
219 | [ C(DTLB) ] = { | |
220 | [ C(OP_READ) ] = { | |
221 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ | |
222 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ | |
223 | }, | |
224 | [ C(OP_WRITE) ] = { | |
225 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ | |
226 | [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ | |
227 | }, | |
228 | [ C(OP_PREFETCH) ] = { | |
229 | [ C(RESULT_ACCESS) ] = 0x0, | |
230 | [ C(RESULT_MISS) ] = 0x0, | |
231 | }, | |
232 | }, | |
233 | [ C(ITLB) ] = { | |
234 | [ C(OP_READ) ] = { | |
235 | [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ | |
236 | [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */ | |
237 | }, | |
238 | [ C(OP_WRITE) ] = { | |
239 | [ C(RESULT_ACCESS) ] = -1, | |
240 | [ C(RESULT_MISS) ] = -1, | |
241 | }, | |
242 | [ C(OP_PREFETCH) ] = { | |
243 | [ C(RESULT_ACCESS) ] = -1, | |
244 | [ C(RESULT_MISS) ] = -1, | |
245 | }, | |
246 | }, | |
247 | [ C(BPU ) ] = { | |
248 | [ C(OP_READ) ] = { | |
249 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | |
250 | [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ | |
251 | }, | |
252 | [ C(OP_WRITE) ] = { | |
253 | [ C(RESULT_ACCESS) ] = -1, | |
254 | [ C(RESULT_MISS) ] = -1, | |
255 | }, | |
256 | [ C(OP_PREFETCH) ] = { | |
257 | [ C(RESULT_ACCESS) ] = -1, | |
258 | [ C(RESULT_MISS) ] = -1, | |
259 | }, | |
260 | }, | |
261 | }; | |
262 | ||
263 | static __initconst u64 core2_hw_cache_event_ids | |
264 | [PERF_COUNT_HW_CACHE_MAX] | |
265 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
266 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
267 | { | |
268 | [ C(L1D) ] = { | |
269 | [ C(OP_READ) ] = { | |
270 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ | |
271 | [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ | |
272 | }, | |
273 | [ C(OP_WRITE) ] = { | |
274 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ | |
275 | [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ | |
276 | }, | |
277 | [ C(OP_PREFETCH) ] = { | |
278 | [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */ | |
279 | [ C(RESULT_MISS) ] = 0, | |
280 | }, | |
281 | }, | |
282 | [ C(L1I ) ] = { | |
283 | [ C(OP_READ) ] = { | |
284 | [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ | |
285 | [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ | |
286 | }, | |
287 | [ C(OP_WRITE) ] = { | |
288 | [ C(RESULT_ACCESS) ] = -1, | |
289 | [ C(RESULT_MISS) ] = -1, | |
290 | }, | |
291 | [ C(OP_PREFETCH) ] = { | |
292 | [ C(RESULT_ACCESS) ] = 0, | |
293 | [ C(RESULT_MISS) ] = 0, | |
294 | }, | |
295 | }, | |
296 | [ C(LL ) ] = { | |
297 | [ C(OP_READ) ] = { | |
298 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ | |
299 | [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ | |
300 | }, | |
301 | [ C(OP_WRITE) ] = { | |
302 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ | |
303 | [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ | |
304 | }, | |
305 | [ C(OP_PREFETCH) ] = { | |
306 | [ C(RESULT_ACCESS) ] = 0, | |
307 | [ C(RESULT_MISS) ] = 0, | |
308 | }, | |
309 | }, | |
310 | [ C(DTLB) ] = { | |
311 | [ C(OP_READ) ] = { | |
312 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ | |
313 | [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */ | |
314 | }, | |
315 | [ C(OP_WRITE) ] = { | |
316 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ | |
317 | [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */ | |
318 | }, | |
319 | [ C(OP_PREFETCH) ] = { | |
320 | [ C(RESULT_ACCESS) ] = 0, | |
321 | [ C(RESULT_MISS) ] = 0, | |
322 | }, | |
323 | }, | |
324 | [ C(ITLB) ] = { | |
325 | [ C(OP_READ) ] = { | |
326 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | |
327 | [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */ | |
328 | }, | |
329 | [ C(OP_WRITE) ] = { | |
330 | [ C(RESULT_ACCESS) ] = -1, | |
331 | [ C(RESULT_MISS) ] = -1, | |
332 | }, | |
333 | [ C(OP_PREFETCH) ] = { | |
334 | [ C(RESULT_ACCESS) ] = -1, | |
335 | [ C(RESULT_MISS) ] = -1, | |
336 | }, | |
337 | }, | |
338 | [ C(BPU ) ] = { | |
339 | [ C(OP_READ) ] = { | |
340 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | |
341 | [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | |
342 | }, | |
343 | [ C(OP_WRITE) ] = { | |
344 | [ C(RESULT_ACCESS) ] = -1, | |
345 | [ C(RESULT_MISS) ] = -1, | |
346 | }, | |
347 | [ C(OP_PREFETCH) ] = { | |
348 | [ C(RESULT_ACCESS) ] = -1, | |
349 | [ C(RESULT_MISS) ] = -1, | |
350 | }, | |
351 | }, | |
352 | }; | |
353 | ||
354 | static __initconst u64 atom_hw_cache_event_ids | |
355 | [PERF_COUNT_HW_CACHE_MAX] | |
356 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
357 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
358 | { | |
359 | [ C(L1D) ] = { | |
360 | [ C(OP_READ) ] = { | |
361 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */ | |
362 | [ C(RESULT_MISS) ] = 0, | |
363 | }, | |
364 | [ C(OP_WRITE) ] = { | |
365 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */ | |
366 | [ C(RESULT_MISS) ] = 0, | |
367 | }, | |
368 | [ C(OP_PREFETCH) ] = { | |
369 | [ C(RESULT_ACCESS) ] = 0x0, | |
370 | [ C(RESULT_MISS) ] = 0, | |
371 | }, | |
372 | }, | |
373 | [ C(L1I ) ] = { | |
374 | [ C(OP_READ) ] = { | |
375 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | |
376 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | |
377 | }, | |
378 | [ C(OP_WRITE) ] = { | |
379 | [ C(RESULT_ACCESS) ] = -1, | |
380 | [ C(RESULT_MISS) ] = -1, | |
381 | }, | |
382 | [ C(OP_PREFETCH) ] = { | |
383 | [ C(RESULT_ACCESS) ] = 0, | |
384 | [ C(RESULT_MISS) ] = 0, | |
385 | }, | |
386 | }, | |
387 | [ C(LL ) ] = { | |
388 | [ C(OP_READ) ] = { | |
389 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ | |
390 | [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ | |
391 | }, | |
392 | [ C(OP_WRITE) ] = { | |
393 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ | |
394 | [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ | |
395 | }, | |
396 | [ C(OP_PREFETCH) ] = { | |
397 | [ C(RESULT_ACCESS) ] = 0, | |
398 | [ C(RESULT_MISS) ] = 0, | |
399 | }, | |
400 | }, | |
401 | [ C(DTLB) ] = { | |
402 | [ C(OP_READ) ] = { | |
403 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */ | |
404 | [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ | |
405 | }, | |
406 | [ C(OP_WRITE) ] = { | |
407 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */ | |
408 | [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ | |
409 | }, | |
410 | [ C(OP_PREFETCH) ] = { | |
411 | [ C(RESULT_ACCESS) ] = 0, | |
412 | [ C(RESULT_MISS) ] = 0, | |
413 | }, | |
414 | }, | |
415 | [ C(ITLB) ] = { | |
416 | [ C(OP_READ) ] = { | |
417 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | |
418 | [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ | |
419 | }, | |
420 | [ C(OP_WRITE) ] = { | |
421 | [ C(RESULT_ACCESS) ] = -1, | |
422 | [ C(RESULT_MISS) ] = -1, | |
423 | }, | |
424 | [ C(OP_PREFETCH) ] = { | |
425 | [ C(RESULT_ACCESS) ] = -1, | |
426 | [ C(RESULT_MISS) ] = -1, | |
427 | }, | |
428 | }, | |
429 | [ C(BPU ) ] = { | |
430 | [ C(OP_READ) ] = { | |
431 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | |
432 | [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | |
433 | }, | |
434 | [ C(OP_WRITE) ] = { | |
435 | [ C(RESULT_ACCESS) ] = -1, | |
436 | [ C(RESULT_MISS) ] = -1, | |
437 | }, | |
438 | [ C(OP_PREFETCH) ] = { | |
439 | [ C(RESULT_ACCESS) ] = -1, | |
440 | [ C(RESULT_MISS) ] = -1, | |
441 | }, | |
442 | }, | |
443 | }; | |
444 | ||
445 | static u64 intel_pmu_raw_event(u64 hw_event) | |
446 | { | |
447 | #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL | |
448 | #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL | |
449 | #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL | |
450 | #define CORE_EVNTSEL_INV_MASK 0x00800000ULL | |
451 | #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL | |
452 | ||
453 | #define CORE_EVNTSEL_MASK \ | |
454 | (INTEL_ARCH_EVTSEL_MASK | \ | |
455 | INTEL_ARCH_UNIT_MASK | \ | |
456 | INTEL_ARCH_EDGE_MASK | \ | |
457 | INTEL_ARCH_INV_MASK | \ | |
458 | INTEL_ARCH_CNT_MASK) | |
459 | ||
460 | return hw_event & CORE_EVNTSEL_MASK; | |
461 | } | |
462 | ||
463 | static void intel_pmu_enable_bts(u64 config) | |
464 | { | |
465 | unsigned long debugctlmsr; | |
466 | ||
467 | debugctlmsr = get_debugctlmsr(); | |
468 | ||
469 | debugctlmsr |= X86_DEBUGCTL_TR; | |
470 | debugctlmsr |= X86_DEBUGCTL_BTS; | |
471 | debugctlmsr |= X86_DEBUGCTL_BTINT; | |
472 | ||
473 | if (!(config & ARCH_PERFMON_EVENTSEL_OS)) | |
474 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS; | |
475 | ||
476 | if (!(config & ARCH_PERFMON_EVENTSEL_USR)) | |
477 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR; | |
478 | ||
479 | update_debugctlmsr(debugctlmsr); | |
480 | } | |
481 | ||
482 | static void intel_pmu_disable_bts(void) | |
483 | { | |
484 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
485 | unsigned long debugctlmsr; | |
486 | ||
487 | if (!cpuc->ds) | |
488 | return; | |
489 | ||
490 | debugctlmsr = get_debugctlmsr(); | |
491 | ||
492 | debugctlmsr &= | |
493 | ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT | | |
494 | X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR); | |
495 | ||
496 | update_debugctlmsr(debugctlmsr); | |
497 | } | |
498 | ||
499 | static void intel_pmu_disable_all(void) | |
500 | { | |
501 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
502 | ||
503 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | |
504 | ||
505 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) | |
506 | intel_pmu_disable_bts(); | |
507 | } | |
508 | ||
509 | static void intel_pmu_enable_all(void) | |
510 | { | |
511 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
512 | ||
513 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | |
514 | ||
515 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { | |
516 | struct perf_event *event = | |
517 | cpuc->events[X86_PMC_IDX_FIXED_BTS]; | |
518 | ||
519 | if (WARN_ON_ONCE(!event)) | |
520 | return; | |
521 | ||
522 | intel_pmu_enable_bts(event->hw.config); | |
523 | } | |
524 | } | |
525 | ||
526 | static inline u64 intel_pmu_get_status(void) | |
527 | { | |
528 | u64 status; | |
529 | ||
530 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | |
531 | ||
532 | return status; | |
533 | } | |
534 | ||
535 | static inline void intel_pmu_ack_status(u64 ack) | |
536 | { | |
537 | wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); | |
538 | } | |
539 | ||
540 | static inline void | |
541 | intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) | |
542 | { | |
543 | int idx = __idx - X86_PMC_IDX_FIXED; | |
544 | u64 ctrl_val, mask; | |
545 | ||
546 | mask = 0xfULL << (idx * 4); | |
547 | ||
548 | rdmsrl(hwc->config_base, ctrl_val); | |
549 | ctrl_val &= ~mask; | |
550 | (void)checking_wrmsrl(hwc->config_base, ctrl_val); | |
551 | } | |
552 | ||
553 | static void intel_pmu_drain_bts_buffer(void) | |
554 | { | |
555 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
556 | struct debug_store *ds = cpuc->ds; | |
557 | struct bts_record { | |
558 | u64 from; | |
559 | u64 to; | |
560 | u64 flags; | |
561 | }; | |
562 | struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; | |
563 | struct bts_record *at, *top; | |
564 | struct perf_output_handle handle; | |
565 | struct perf_event_header header; | |
566 | struct perf_sample_data data; | |
567 | struct pt_regs regs; | |
568 | ||
569 | if (!event) | |
570 | return; | |
571 | ||
572 | if (!ds) | |
573 | return; | |
574 | ||
575 | at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; | |
576 | top = (struct bts_record *)(unsigned long)ds->bts_index; | |
577 | ||
578 | if (top <= at) | |
579 | return; | |
580 | ||
581 | ds->bts_index = ds->bts_buffer_base; | |
582 | ||
583 | ||
584 | data.period = event->hw.last_period; | |
585 | data.addr = 0; | |
586 | data.raw = NULL; | |
587 | regs.ip = 0; | |
588 | ||
589 | /* | |
590 | * Prepare a generic sample, i.e. fill in the invariant fields. | |
591 | * We will overwrite the from and to address before we output | |
592 | * the sample. | |
593 | */ | |
594 | perf_prepare_sample(&header, &data, event, ®s); | |
595 | ||
596 | if (perf_output_begin(&handle, event, | |
597 | header.size * (top - at), 1, 1)) | |
598 | return; | |
599 | ||
600 | for (; at < top; at++) { | |
601 | data.ip = at->from; | |
602 | data.addr = at->to; | |
603 | ||
604 | perf_output_sample(&handle, &header, &data, event); | |
605 | } | |
606 | ||
607 | perf_output_end(&handle); | |
608 | ||
609 | /* There's new data available. */ | |
610 | event->hw.interrupts++; | |
611 | event->pending_kill = POLL_IN; | |
612 | } | |
613 | ||
614 | static inline void | |
615 | intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) | |
616 | { | |
617 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | |
618 | intel_pmu_disable_bts(); | |
619 | intel_pmu_drain_bts_buffer(); | |
620 | return; | |
621 | } | |
622 | ||
623 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | |
624 | intel_pmu_disable_fixed(hwc, idx); | |
625 | return; | |
626 | } | |
627 | ||
628 | x86_pmu_disable_event(hwc, idx); | |
629 | } | |
630 | ||
631 | static inline void | |
632 | intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) | |
633 | { | |
634 | int idx = __idx - X86_PMC_IDX_FIXED; | |
635 | u64 ctrl_val, bits, mask; | |
636 | int err; | |
637 | ||
638 | /* | |
639 | * Enable IRQ generation (0x8), | |
640 | * and enable ring-3 counting (0x2) and ring-0 counting (0x1) | |
641 | * if requested: | |
642 | */ | |
643 | bits = 0x8ULL; | |
644 | if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) | |
645 | bits |= 0x2; | |
646 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) | |
647 | bits |= 0x1; | |
648 | ||
649 | /* | |
650 | * ANY bit is supported in v3 and up | |
651 | */ | |
652 | if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) | |
653 | bits |= 0x4; | |
654 | ||
655 | bits <<= (idx * 4); | |
656 | mask = 0xfULL << (idx * 4); | |
657 | ||
658 | rdmsrl(hwc->config_base, ctrl_val); | |
659 | ctrl_val &= ~mask; | |
660 | ctrl_val |= bits; | |
661 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | |
662 | } | |
663 | ||
664 | static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) | |
665 | { | |
666 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | |
667 | if (!__get_cpu_var(cpu_hw_events).enabled) | |
668 | return; | |
669 | ||
670 | intel_pmu_enable_bts(hwc->config); | |
671 | return; | |
672 | } | |
673 | ||
674 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | |
675 | intel_pmu_enable_fixed(hwc, idx); | |
676 | return; | |
677 | } | |
678 | ||
679 | __x86_pmu_enable_event(hwc, idx); | |
680 | } | |
681 | ||
682 | /* | |
683 | * Save and restart an expired event. Called by NMI contexts, | |
684 | * so it has to be careful about preempting normal event ops: | |
685 | */ | |
686 | static int intel_pmu_save_and_restart(struct perf_event *event) | |
687 | { | |
688 | struct hw_perf_event *hwc = &event->hw; | |
689 | int idx = hwc->idx; | |
690 | int ret; | |
691 | ||
692 | x86_perf_event_update(event, hwc, idx); | |
693 | ret = x86_perf_event_set_period(event, hwc, idx); | |
694 | ||
695 | return ret; | |
696 | } | |
697 | ||
698 | static void intel_pmu_reset(void) | |
699 | { | |
700 | struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds; | |
701 | unsigned long flags; | |
702 | int idx; | |
703 | ||
704 | if (!x86_pmu.num_events) | |
705 | return; | |
706 | ||
707 | local_irq_save(flags); | |
708 | ||
709 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); | |
710 | ||
711 | for (idx = 0; idx < x86_pmu.num_events; idx++) { | |
712 | checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); | |
713 | checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); | |
714 | } | |
715 | for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) { | |
716 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); | |
717 | } | |
718 | if (ds) | |
719 | ds->bts_index = ds->bts_buffer_base; | |
720 | ||
721 | local_irq_restore(flags); | |
722 | } | |
723 | ||
724 | /* | |
725 | * This handler is triggered by the local APIC, so the APIC IRQ handling | |
726 | * rules apply: | |
727 | */ | |
728 | static int intel_pmu_handle_irq(struct pt_regs *regs) | |
729 | { | |
730 | struct perf_sample_data data; | |
731 | struct cpu_hw_events *cpuc; | |
732 | int bit, loops; | |
733 | u64 ack, status; | |
734 | ||
735 | data.addr = 0; | |
736 | data.raw = NULL; | |
737 | ||
738 | cpuc = &__get_cpu_var(cpu_hw_events); | |
739 | ||
740 | perf_disable(); | |
741 | intel_pmu_drain_bts_buffer(); | |
742 | status = intel_pmu_get_status(); | |
743 | if (!status) { | |
744 | perf_enable(); | |
745 | return 0; | |
746 | } | |
747 | ||
748 | loops = 0; | |
749 | again: | |
750 | if (++loops > 100) { | |
751 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); | |
752 | perf_event_print_debug(); | |
753 | intel_pmu_reset(); | |
754 | perf_enable(); | |
755 | return 1; | |
756 | } | |
757 | ||
758 | inc_irq_stat(apic_perf_irqs); | |
759 | ack = status; | |
760 | for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { | |
761 | struct perf_event *event = cpuc->events[bit]; | |
762 | ||
763 | clear_bit(bit, (unsigned long *) &status); | |
764 | if (!test_bit(bit, cpuc->active_mask)) | |
765 | continue; | |
766 | ||
767 | if (!intel_pmu_save_and_restart(event)) | |
768 | continue; | |
769 | ||
770 | data.period = event->hw.last_period; | |
771 | ||
772 | if (perf_event_overflow(event, 1, &data, regs)) | |
773 | intel_pmu_disable_event(&event->hw, bit); | |
774 | } | |
775 | ||
776 | intel_pmu_ack_status(ack); | |
777 | ||
778 | /* | |
779 | * Repeat if there is more work to be done: | |
780 | */ | |
781 | status = intel_pmu_get_status(); | |
782 | if (status) | |
783 | goto again; | |
784 | ||
785 | perf_enable(); | |
786 | ||
787 | return 1; | |
788 | } | |
789 | ||
790 | static struct event_constraint bts_constraint = | |
791 | EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); | |
792 | ||
793 | static struct event_constraint * | |
794 | intel_special_constraints(struct perf_event *event) | |
795 | { | |
796 | unsigned int hw_event; | |
797 | ||
798 | hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK; | |
799 | ||
800 | if (unlikely((hw_event == | |
801 | x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && | |
802 | (event->hw.sample_period == 1))) { | |
803 | ||
804 | return &bts_constraint; | |
805 | } | |
806 | return NULL; | |
807 | } | |
808 | ||
809 | static struct event_constraint * | |
810 | intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |
811 | { | |
812 | struct event_constraint *c; | |
813 | ||
814 | c = intel_special_constraints(event); | |
815 | if (c) | |
816 | return c; | |
817 | ||
818 | return x86_get_event_constraints(cpuc, event); | |
819 | } | |
820 | ||
821 | static __initconst struct x86_pmu core_pmu = { | |
822 | .name = "core", | |
823 | .handle_irq = x86_pmu_handle_irq, | |
824 | .disable_all = x86_pmu_disable_all, | |
825 | .enable_all = x86_pmu_enable_all, | |
826 | .enable = x86_pmu_enable_event, | |
827 | .disable = x86_pmu_disable_event, | |
828 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, | |
829 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | |
830 | .event_map = intel_pmu_event_map, | |
831 | .raw_event = intel_pmu_raw_event, | |
832 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), | |
833 | .apic = 1, | |
834 | /* | |
835 | * Intel PMCs cannot be accessed sanely above 32 bit width, | |
836 | * so we install an artificial 1<<31 period regardless of | |
837 | * the generic event period: | |
838 | */ | |
839 | .max_period = (1ULL << 31) - 1, | |
840 | .get_event_constraints = intel_get_event_constraints, | |
841 | .event_constraints = intel_core_event_constraints, | |
842 | }; | |
843 | ||
844 | static __initconst struct x86_pmu intel_pmu = { | |
845 | .name = "Intel", | |
846 | .handle_irq = intel_pmu_handle_irq, | |
847 | .disable_all = intel_pmu_disable_all, | |
848 | .enable_all = intel_pmu_enable_all, | |
849 | .enable = intel_pmu_enable_event, | |
850 | .disable = intel_pmu_disable_event, | |
851 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, | |
852 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | |
853 | .event_map = intel_pmu_event_map, | |
854 | .raw_event = intel_pmu_raw_event, | |
855 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), | |
856 | .apic = 1, | |
857 | /* | |
858 | * Intel PMCs cannot be accessed sanely above 32 bit width, | |
859 | * so we install an artificial 1<<31 period regardless of | |
860 | * the generic event period: | |
861 | */ | |
862 | .max_period = (1ULL << 31) - 1, | |
863 | .enable_bts = intel_pmu_enable_bts, | |
864 | .disable_bts = intel_pmu_disable_bts, | |
865 | .get_event_constraints = intel_get_event_constraints | |
866 | }; | |
867 | ||
868 | static __init int intel_pmu_init(void) | |
869 | { | |
870 | union cpuid10_edx edx; | |
871 | union cpuid10_eax eax; | |
872 | unsigned int unused; | |
873 | unsigned int ebx; | |
874 | int version; | |
875 | ||
876 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | |
877 | /* check for P6 processor family */ | |
878 | if (boot_cpu_data.x86 == 6) { | |
879 | return p6_pmu_init(); | |
880 | } else { | |
881 | return -ENODEV; | |
882 | } | |
883 | } | |
884 | ||
885 | /* | |
886 | * Check whether the Architectural PerfMon supports | |
887 | * Branch Misses Retired hw_event or not. | |
888 | */ | |
889 | cpuid(10, &eax.full, &ebx, &unused, &edx.full); | |
890 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) | |
891 | return -ENODEV; | |
892 | ||
893 | version = eax.split.version_id; | |
894 | if (version < 2) | |
895 | x86_pmu = core_pmu; | |
896 | else | |
897 | x86_pmu = intel_pmu; | |
898 | ||
899 | x86_pmu.version = version; | |
900 | x86_pmu.num_events = eax.split.num_events; | |
901 | x86_pmu.event_bits = eax.split.bit_width; | |
902 | x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1; | |
903 | ||
904 | /* | |
905 | * Quirk: v2 perfmon does not report fixed-purpose events, so | |
906 | * assume at least 3 events: | |
907 | */ | |
908 | if (version > 1) | |
909 | x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3); | |
910 | ||
911 | /* | |
912 | * Install the hw-cache-events table: | |
913 | */ | |
914 | switch (boot_cpu_data.x86_model) { | |
915 | case 14: /* 65 nm core solo/duo, "Yonah" */ | |
916 | pr_cont("Core events, "); | |
917 | break; | |
918 | ||
919 | case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ | |
920 | case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ | |
921 | case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ | |
922 | case 29: /* six-core 45 nm xeon "Dunnington" */ | |
923 | memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, | |
924 | sizeof(hw_cache_event_ids)); | |
925 | ||
926 | x86_pmu.event_constraints = intel_core2_event_constraints; | |
927 | pr_cont("Core2 events, "); | |
928 | break; | |
929 | ||
930 | case 26: /* 45 nm nehalem, "Bloomfield" */ | |
931 | case 30: /* 45 nm nehalem, "Lynnfield" */ | |
932 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, | |
933 | sizeof(hw_cache_event_ids)); | |
934 | ||
935 | x86_pmu.event_constraints = intel_nehalem_event_constraints; | |
936 | pr_cont("Nehalem/Corei7 events, "); | |
937 | break; | |
938 | case 28: | |
939 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, | |
940 | sizeof(hw_cache_event_ids)); | |
941 | ||
942 | x86_pmu.event_constraints = intel_gen_event_constraints; | |
943 | pr_cont("Atom events, "); | |
944 | break; | |
945 | ||
946 | case 37: /* 32 nm nehalem, "Clarkdale" */ | |
947 | case 44: /* 32 nm nehalem, "Gulftown" */ | |
948 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, | |
949 | sizeof(hw_cache_event_ids)); | |
950 | ||
951 | x86_pmu.event_constraints = intel_westmere_event_constraints; | |
952 | pr_cont("Westmere events, "); | |
953 | break; | |
954 | default: | |
955 | /* | |
956 | * default constraints for v2 and up | |
957 | */ | |
958 | x86_pmu.event_constraints = intel_gen_event_constraints; | |
959 | pr_cont("generic architected perfmon, "); | |
960 | } | |
961 | return 0; | |
962 | } | |
963 | ||
964 | #else /* CONFIG_CPU_SUP_INTEL */ | |
965 | ||
966 | static int intel_pmu_init(void) | |
967 | { | |
968 | return 0; | |
969 | } | |
970 | ||
971 | #endif /* CONFIG_CPU_SUP_INTEL */ |