Commit | Line | Data |
---|---|---|
7ce1346a KL |
1 | /* |
2 | * perf_event_intel_cstate.c: support cstate residency counters | |
3 | * | |
4 | * Copyright (C) 2015, Intel Corp. | |
5 | * Author: Kan Liang (kan.liang@intel.com) | |
6 | * | |
7 | * This library is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU Library General Public | |
9 | * License as published by the Free Software Foundation; either | |
10 | * version 2 of the License, or (at your option) any later version. | |
11 | * | |
12 | * This library is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * Library General Public License for more details. | |
16 | * | |
17 | */ | |
18 | ||
19 | /* | |
20 | * This file export cstate related free running (read-only) counters | |
21 | * for perf. These counters may be use simultaneously by other tools, | |
22 | * such as turbostat. However, it still make sense to implement them | |
23 | * in perf. Because we can conveniently collect them together with | |
24 | * other events, and allow to use them from tools without special MSR | |
25 | * access code. | |
26 | * | |
27 | * The events only support system-wide mode counting. There is no | |
28 | * sampling support because it is not supported by the hardware. | |
29 | * | |
30 | * According to counters' scope and category, two PMUs are registered | |
31 | * with the perf_event core subsystem. | |
32 | * - 'cstate_core': The counter is available for each physical core. | |
33 | * The counters include CORE_C*_RESIDENCY. | |
34 | * - 'cstate_pkg': The counter is available for each physical package. | |
35 | * The counters include PKG_C*_RESIDENCY. | |
36 | * | |
37 | * All of these counters are specified in the IntelĀ® 64 and IA-32 | |
38 | * Architectures Software Developer.s Manual Vol3b. | |
39 | * | |
40 | * Model specific counters: | |
41 | * MSR_CORE_C1_RES: CORE C1 Residency Counter | |
42 | * perf code: 0x00 | |
43 | * Available model: SLM,AMT | |
44 | * Scope: Core (each processor core has a MSR) | |
45 | * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter | |
46 | * perf code: 0x01 | |
47 | * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL | |
48 | * Scope: Core | |
49 | * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter | |
50 | * perf code: 0x02 | |
51 | * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL | |
52 | * Scope: Core | |
53 | * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter | |
54 | * perf code: 0x03 | |
55 | * Available model: SNB,IVB,HSW,BDW,SKL | |
56 | * Scope: Core | |
57 | * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter. | |
58 | * perf code: 0x00 | |
59 | * Available model: SNB,IVB,HSW,BDW,SKL | |
60 | * Scope: Package (physical package) | |
61 | * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. | |
62 | * perf code: 0x01 | |
63 | * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL | |
64 | * Scope: Package (physical package) | |
65 | * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter. | |
66 | * perf code: 0x02 | |
67 | * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL | |
68 | * Scope: Package (physical package) | |
69 | * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. | |
70 | * perf code: 0x03 | |
71 | * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL | |
72 | * Scope: Package (physical package) | |
73 | * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter. | |
74 | * perf code: 0x04 | |
75 | * Available model: HSW ULT only | |
76 | * Scope: Package (physical package) | |
77 | * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter. | |
78 | * perf code: 0x05 | |
79 | * Available model: HSW ULT only | |
80 | * Scope: Package (physical package) | |
81 | * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. | |
82 | * perf code: 0x06 | |
83 | * Available model: HSW ULT only | |
84 | * Scope: Package (physical package) | |
85 | * | |
86 | */ | |
87 | ||
88 | #include <linux/module.h> | |
89 | #include <linux/slab.h> | |
90 | #include <linux/perf_event.h> | |
91 | #include <asm/cpu_device_id.h> | |
92 | #include "perf_event.h" | |
93 | ||
94 | #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \ | |
95 | static ssize_t __cstate_##_var##_show(struct kobject *kobj, \ | |
96 | struct kobj_attribute *attr, \ | |
97 | char *page) \ | |
98 | { \ | |
99 | BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ | |
100 | return sprintf(page, _format "\n"); \ | |
101 | } \ | |
102 | static struct kobj_attribute format_attr_##_var = \ | |
103 | __ATTR(_name, 0444, __cstate_##_var##_show, NULL) | |
104 | ||
105 | static ssize_t cstate_get_attr_cpumask(struct device *dev, | |
106 | struct device_attribute *attr, | |
107 | char *buf); | |
108 | ||
109 | struct perf_cstate_msr { | |
110 | u64 msr; | |
111 | struct perf_pmu_events_attr *attr; | |
112 | bool (*test)(int idx); | |
113 | }; | |
114 | ||
115 | ||
116 | /* cstate_core PMU */ | |
117 | ||
118 | static struct pmu cstate_core_pmu; | |
119 | static bool has_cstate_core; | |
120 | ||
121 | enum perf_cstate_core_id { | |
122 | /* | |
123 | * cstate_core events | |
124 | */ | |
125 | PERF_CSTATE_CORE_C1_RES = 0, | |
126 | PERF_CSTATE_CORE_C3_RES, | |
127 | PERF_CSTATE_CORE_C6_RES, | |
128 | PERF_CSTATE_CORE_C7_RES, | |
129 | ||
130 | PERF_CSTATE_CORE_EVENT_MAX, | |
131 | }; | |
132 | ||
133 | bool test_core(int idx) | |
134 | { | |
135 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || | |
136 | boot_cpu_data.x86 != 6) | |
137 | return false; | |
138 | ||
139 | switch (boot_cpu_data.x86_model) { | |
140 | case 30: /* 45nm Nehalem */ | |
141 | case 26: /* 45nm Nehalem-EP */ | |
142 | case 46: /* 45nm Nehalem-EX */ | |
143 | ||
144 | case 37: /* 32nm Westmere */ | |
145 | case 44: /* 32nm Westmere-EP */ | |
146 | case 47: /* 32nm Westmere-EX */ | |
147 | if (idx == PERF_CSTATE_CORE_C3_RES || | |
148 | idx == PERF_CSTATE_CORE_C6_RES) | |
149 | return true; | |
150 | break; | |
151 | case 42: /* 32nm SandyBridge */ | |
152 | case 45: /* 32nm SandyBridge-E/EN/EP */ | |
153 | ||
154 | case 58: /* 22nm IvyBridge */ | |
155 | case 62: /* 22nm IvyBridge-EP/EX */ | |
156 | ||
157 | case 60: /* 22nm Haswell Core */ | |
158 | case 63: /* 22nm Haswell Server */ | |
159 | case 69: /* 22nm Haswell ULT */ | |
160 | case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */ | |
161 | ||
162 | case 61: /* 14nm Broadwell Core-M */ | |
163 | case 86: /* 14nm Broadwell Xeon D */ | |
164 | case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */ | |
165 | case 79: /* 14nm Broadwell Server */ | |
166 | ||
167 | case 78: /* 14nm Skylake Mobile */ | |
168 | case 94: /* 14nm Skylake Desktop */ | |
169 | if (idx == PERF_CSTATE_CORE_C3_RES || | |
170 | idx == PERF_CSTATE_CORE_C6_RES || | |
171 | idx == PERF_CSTATE_CORE_C7_RES) | |
172 | return true; | |
173 | break; | |
174 | case 55: /* 22nm Atom "Silvermont" */ | |
175 | case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ | |
176 | case 76: /* 14nm Atom "Airmont" */ | |
177 | if (idx == PERF_CSTATE_CORE_C1_RES || | |
178 | idx == PERF_CSTATE_CORE_C6_RES) | |
179 | return true; | |
180 | break; | |
181 | } | |
182 | ||
183 | return false; | |
184 | } | |
185 | ||
186 | PMU_EVENT_ATTR_STRING(c1-residency, evattr_cstate_core_c1, "event=0x00"); | |
187 | PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_core_c3, "event=0x01"); | |
188 | PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_core_c6, "event=0x02"); | |
189 | PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_core_c7, "event=0x03"); | |
190 | ||
191 | static struct perf_cstate_msr core_msr[] = { | |
192 | [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &evattr_cstate_core_c1, test_core, }, | |
193 | [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &evattr_cstate_core_c3, test_core, }, | |
194 | [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &evattr_cstate_core_c6, test_core, }, | |
195 | [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &evattr_cstate_core_c7, test_core, }, | |
196 | }; | |
197 | ||
198 | static struct attribute *core_events_attrs[PERF_CSTATE_CORE_EVENT_MAX + 1] = { | |
199 | NULL, | |
200 | }; | |
201 | ||
202 | static struct attribute_group core_events_attr_group = { | |
203 | .name = "events", | |
204 | .attrs = core_events_attrs, | |
205 | }; | |
206 | ||
207 | DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63"); | |
208 | static struct attribute *core_format_attrs[] = { | |
209 | &format_attr_core_event.attr, | |
210 | NULL, | |
211 | }; | |
212 | ||
213 | static struct attribute_group core_format_attr_group = { | |
214 | .name = "format", | |
215 | .attrs = core_format_attrs, | |
216 | }; | |
217 | ||
218 | static cpumask_t cstate_core_cpu_mask; | |
219 | static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL); | |
220 | ||
221 | static struct attribute *cstate_cpumask_attrs[] = { | |
222 | &dev_attr_cpumask.attr, | |
223 | NULL, | |
224 | }; | |
225 | ||
226 | static struct attribute_group cpumask_attr_group = { | |
227 | .attrs = cstate_cpumask_attrs, | |
228 | }; | |
229 | ||
230 | static const struct attribute_group *core_attr_groups[] = { | |
231 | &core_events_attr_group, | |
232 | &core_format_attr_group, | |
233 | &cpumask_attr_group, | |
234 | NULL, | |
235 | }; | |
236 | ||
237 | /* cstate_core PMU end */ | |
238 | ||
239 | ||
240 | /* cstate_pkg PMU */ | |
241 | ||
242 | static struct pmu cstate_pkg_pmu; | |
243 | static bool has_cstate_pkg; | |
244 | ||
245 | enum perf_cstate_pkg_id { | |
246 | /* | |
247 | * cstate_pkg events | |
248 | */ | |
249 | PERF_CSTATE_PKG_C2_RES = 0, | |
250 | PERF_CSTATE_PKG_C3_RES, | |
251 | PERF_CSTATE_PKG_C6_RES, | |
252 | PERF_CSTATE_PKG_C7_RES, | |
253 | PERF_CSTATE_PKG_C8_RES, | |
254 | PERF_CSTATE_PKG_C9_RES, | |
255 | PERF_CSTATE_PKG_C10_RES, | |
256 | ||
257 | PERF_CSTATE_PKG_EVENT_MAX, | |
258 | }; | |
259 | ||
260 | bool test_pkg(int idx) | |
261 | { | |
262 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || | |
263 | boot_cpu_data.x86 != 6) | |
264 | return false; | |
265 | ||
266 | switch (boot_cpu_data.x86_model) { | |
267 | case 30: /* 45nm Nehalem */ | |
268 | case 26: /* 45nm Nehalem-EP */ | |
269 | case 46: /* 45nm Nehalem-EX */ | |
270 | ||
271 | case 37: /* 32nm Westmere */ | |
272 | case 44: /* 32nm Westmere-EP */ | |
273 | case 47: /* 32nm Westmere-EX */ | |
274 | if (idx == PERF_CSTATE_CORE_C3_RES || | |
275 | idx == PERF_CSTATE_CORE_C6_RES || | |
276 | idx == PERF_CSTATE_CORE_C7_RES) | |
277 | return true; | |
278 | break; | |
279 | case 42: /* 32nm SandyBridge */ | |
280 | case 45: /* 32nm SandyBridge-E/EN/EP */ | |
281 | ||
282 | case 58: /* 22nm IvyBridge */ | |
283 | case 62: /* 22nm IvyBridge-EP/EX */ | |
284 | ||
285 | case 60: /* 22nm Haswell Core */ | |
286 | case 63: /* 22nm Haswell Server */ | |
287 | case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */ | |
288 | ||
289 | case 61: /* 14nm Broadwell Core-M */ | |
290 | case 86: /* 14nm Broadwell Xeon D */ | |
291 | case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */ | |
292 | case 79: /* 14nm Broadwell Server */ | |
293 | ||
294 | case 78: /* 14nm Skylake Mobile */ | |
295 | case 94: /* 14nm Skylake Desktop */ | |
296 | if (idx == PERF_CSTATE_PKG_C2_RES || | |
297 | idx == PERF_CSTATE_PKG_C3_RES || | |
298 | idx == PERF_CSTATE_PKG_C6_RES || | |
299 | idx == PERF_CSTATE_PKG_C7_RES) | |
300 | return true; | |
301 | break; | |
302 | case 55: /* 22nm Atom "Silvermont" */ | |
303 | case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ | |
304 | case 76: /* 14nm Atom "Airmont" */ | |
305 | if (idx == PERF_CSTATE_CORE_C6_RES) | |
306 | return true; | |
307 | break; | |
308 | case 69: /* 22nm Haswell ULT */ | |
309 | if (idx == PERF_CSTATE_PKG_C2_RES || | |
310 | idx == PERF_CSTATE_PKG_C3_RES || | |
311 | idx == PERF_CSTATE_PKG_C6_RES || | |
312 | idx == PERF_CSTATE_PKG_C7_RES || | |
313 | idx == PERF_CSTATE_PKG_C8_RES || | |
314 | idx == PERF_CSTATE_PKG_C9_RES || | |
315 | idx == PERF_CSTATE_PKG_C10_RES) | |
316 | return true; | |
317 | break; | |
318 | } | |
319 | ||
320 | return false; | |
321 | } | |
322 | ||
323 | PMU_EVENT_ATTR_STRING(c2-residency, evattr_cstate_pkg_c2, "event=0x00"); | |
324 | PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_pkg_c3, "event=0x01"); | |
325 | PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_pkg_c6, "event=0x02"); | |
326 | PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_pkg_c7, "event=0x03"); | |
327 | PMU_EVENT_ATTR_STRING(c8-residency, evattr_cstate_pkg_c8, "event=0x04"); | |
328 | PMU_EVENT_ATTR_STRING(c9-residency, evattr_cstate_pkg_c9, "event=0x05"); | |
329 | PMU_EVENT_ATTR_STRING(c10-residency, evattr_cstate_pkg_c10, "event=0x06"); | |
330 | ||
331 | static struct perf_cstate_msr pkg_msr[] = { | |
332 | [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &evattr_cstate_pkg_c2, test_pkg, }, | |
333 | [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &evattr_cstate_pkg_c3, test_pkg, }, | |
334 | [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &evattr_cstate_pkg_c6, test_pkg, }, | |
335 | [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &evattr_cstate_pkg_c7, test_pkg, }, | |
336 | [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &evattr_cstate_pkg_c8, test_pkg, }, | |
337 | [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &evattr_cstate_pkg_c9, test_pkg, }, | |
338 | [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &evattr_cstate_pkg_c10, test_pkg, }, | |
339 | }; | |
340 | ||
341 | static struct attribute *pkg_events_attrs[PERF_CSTATE_PKG_EVENT_MAX + 1] = { | |
342 | NULL, | |
343 | }; | |
344 | ||
345 | static struct attribute_group pkg_events_attr_group = { | |
346 | .name = "events", | |
347 | .attrs = pkg_events_attrs, | |
348 | }; | |
349 | ||
350 | DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63"); | |
351 | static struct attribute *pkg_format_attrs[] = { | |
352 | &format_attr_pkg_event.attr, | |
353 | NULL, | |
354 | }; | |
355 | static struct attribute_group pkg_format_attr_group = { | |
356 | .name = "format", | |
357 | .attrs = pkg_format_attrs, | |
358 | }; | |
359 | ||
360 | static cpumask_t cstate_pkg_cpu_mask; | |
361 | ||
362 | static const struct attribute_group *pkg_attr_groups[] = { | |
363 | &pkg_events_attr_group, | |
364 | &pkg_format_attr_group, | |
365 | &cpumask_attr_group, | |
366 | NULL, | |
367 | }; | |
368 | ||
369 | /* cstate_pkg PMU end*/ | |
370 | ||
371 | static ssize_t cstate_get_attr_cpumask(struct device *dev, | |
372 | struct device_attribute *attr, | |
373 | char *buf) | |
374 | { | |
375 | struct pmu *pmu = dev_get_drvdata(dev); | |
376 | ||
377 | if (pmu == &cstate_core_pmu) | |
378 | return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask); | |
379 | else if (pmu == &cstate_pkg_pmu) | |
380 | return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask); | |
381 | else | |
382 | return 0; | |
383 | } | |
384 | ||
385 | static int cstate_pmu_event_init(struct perf_event *event) | |
386 | { | |
387 | u64 cfg = event->attr.config; | |
388 | int ret = 0; | |
389 | ||
390 | if (event->attr.type != event->pmu->type) | |
391 | return -ENOENT; | |
392 | ||
393 | /* unsupported modes and filters */ | |
394 | if (event->attr.exclude_user || | |
395 | event->attr.exclude_kernel || | |
396 | event->attr.exclude_hv || | |
397 | event->attr.exclude_idle || | |
398 | event->attr.exclude_host || | |
399 | event->attr.exclude_guest || | |
400 | event->attr.sample_period) /* no sampling */ | |
401 | return -EINVAL; | |
402 | ||
403 | if (event->pmu == &cstate_core_pmu) { | |
404 | if (cfg >= PERF_CSTATE_CORE_EVENT_MAX) | |
405 | return -EINVAL; | |
406 | if (!core_msr[cfg].attr) | |
407 | return -EINVAL; | |
408 | event->hw.event_base = core_msr[cfg].msr; | |
409 | } else if (event->pmu == &cstate_pkg_pmu) { | |
410 | if (cfg >= PERF_CSTATE_PKG_EVENT_MAX) | |
411 | return -EINVAL; | |
412 | if (!pkg_msr[cfg].attr) | |
413 | return -EINVAL; | |
414 | event->hw.event_base = pkg_msr[cfg].msr; | |
415 | } else | |
416 | return -ENOENT; | |
417 | ||
418 | /* must be done before validate_group */ | |
419 | event->hw.config = cfg; | |
420 | event->hw.idx = -1; | |
421 | ||
422 | return ret; | |
423 | } | |
424 | ||
425 | static inline u64 cstate_pmu_read_counter(struct perf_event *event) | |
426 | { | |
427 | u64 val; | |
428 | ||
429 | rdmsrl(event->hw.event_base, val); | |
430 | return val; | |
431 | } | |
432 | ||
433 | static void cstate_pmu_event_update(struct perf_event *event) | |
434 | { | |
435 | struct hw_perf_event *hwc = &event->hw; | |
436 | u64 prev_raw_count, new_raw_count; | |
437 | ||
438 | again: | |
439 | prev_raw_count = local64_read(&hwc->prev_count); | |
440 | new_raw_count = cstate_pmu_read_counter(event); | |
441 | ||
442 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
443 | new_raw_count) != prev_raw_count) | |
444 | goto again; | |
445 | ||
446 | local64_add(new_raw_count - prev_raw_count, &event->count); | |
447 | } | |
448 | ||
449 | static void cstate_pmu_event_start(struct perf_event *event, int mode) | |
450 | { | |
451 | local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event)); | |
452 | } | |
453 | ||
454 | static void cstate_pmu_event_stop(struct perf_event *event, int mode) | |
455 | { | |
456 | cstate_pmu_event_update(event); | |
457 | } | |
458 | ||
459 | static void cstate_pmu_event_del(struct perf_event *event, int mode) | |
460 | { | |
461 | cstate_pmu_event_stop(event, PERF_EF_UPDATE); | |
462 | } | |
463 | ||
464 | static int cstate_pmu_event_add(struct perf_event *event, int mode) | |
465 | { | |
466 | if (mode & PERF_EF_START) | |
467 | cstate_pmu_event_start(event, mode); | |
468 | ||
469 | return 0; | |
470 | } | |
471 | ||
472 | static void cstate_cpu_exit(int cpu) | |
473 | { | |
474 | int i, id, target; | |
475 | ||
476 | /* cpu exit for cstate core */ | |
477 | if (has_cstate_core) { | |
478 | id = topology_core_id(cpu); | |
479 | target = -1; | |
480 | ||
481 | for_each_online_cpu(i) { | |
482 | if (i == cpu) | |
483 | continue; | |
484 | if (id == topology_core_id(i)) { | |
485 | target = i; | |
486 | break; | |
487 | } | |
488 | } | |
489 | if (cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask) && target >= 0) | |
490 | cpumask_set_cpu(target, &cstate_core_cpu_mask); | |
491 | WARN_ON(cpumask_empty(&cstate_core_cpu_mask)); | |
492 | if (target >= 0) | |
493 | perf_pmu_migrate_context(&cstate_core_pmu, cpu, target); | |
494 | } | |
495 | ||
496 | /* cpu exit for cstate pkg */ | |
497 | if (has_cstate_pkg) { | |
498 | id = topology_physical_package_id(cpu); | |
499 | target = -1; | |
500 | ||
501 | for_each_online_cpu(i) { | |
502 | if (i == cpu) | |
503 | continue; | |
504 | if (id == topology_physical_package_id(i)) { | |
505 | target = i; | |
506 | break; | |
507 | } | |
508 | } | |
509 | if (cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask) && target >= 0) | |
510 | cpumask_set_cpu(target, &cstate_pkg_cpu_mask); | |
511 | WARN_ON(cpumask_empty(&cstate_pkg_cpu_mask)); | |
512 | if (target >= 0) | |
513 | perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target); | |
514 | } | |
515 | } | |
516 | ||
517 | static void cstate_cpu_init(int cpu) | |
518 | { | |
519 | int i, id; | |
520 | ||
521 | /* cpu init for cstate core */ | |
522 | if (has_cstate_core) { | |
523 | id = topology_core_id(cpu); | |
524 | for_each_cpu(i, &cstate_core_cpu_mask) { | |
525 | if (id == topology_core_id(i)) | |
526 | break; | |
527 | } | |
528 | if (i >= nr_cpu_ids) | |
529 | cpumask_set_cpu(cpu, &cstate_core_cpu_mask); | |
530 | } | |
531 | ||
532 | /* cpu init for cstate pkg */ | |
533 | if (has_cstate_pkg) { | |
534 | id = topology_physical_package_id(cpu); | |
535 | for_each_cpu(i, &cstate_pkg_cpu_mask) { | |
536 | if (id == topology_physical_package_id(i)) | |
537 | break; | |
538 | } | |
539 | if (i >= nr_cpu_ids) | |
540 | cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask); | |
541 | } | |
542 | } | |
543 | ||
544 | static int cstate_cpu_notifier(struct notifier_block *self, | |
545 | unsigned long action, void *hcpu) | |
546 | { | |
547 | unsigned int cpu = (long)hcpu; | |
548 | ||
549 | switch (action & ~CPU_TASKS_FROZEN) { | |
550 | case CPU_UP_PREPARE: | |
551 | break; | |
552 | case CPU_STARTING: | |
553 | cstate_cpu_init(cpu); | |
554 | break; | |
555 | case CPU_UP_CANCELED: | |
556 | case CPU_DYING: | |
557 | break; | |
558 | case CPU_ONLINE: | |
559 | case CPU_DEAD: | |
560 | break; | |
561 | case CPU_DOWN_PREPARE: | |
562 | cstate_cpu_exit(cpu); | |
563 | break; | |
564 | default: | |
565 | break; | |
566 | } | |
567 | ||
568 | return NOTIFY_OK; | |
569 | } | |
570 | ||
571 | /* | |
572 | * Probe the cstate events and insert the available one into sysfs attrs | |
573 | * Return false if there is no available events. | |
574 | */ | |
575 | static bool cstate_probe_msr(struct perf_cstate_msr *msr, | |
576 | struct attribute **events_attrs, | |
577 | int max_event_nr) | |
578 | { | |
579 | int i, j = 0; | |
580 | u64 val; | |
581 | ||
582 | /* Probe the cstate events. */ | |
583 | for (i = 0; i < max_event_nr; i++) { | |
584 | if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val)) | |
585 | msr[i].attr = NULL; | |
586 | } | |
587 | ||
588 | /* List remaining events in the sysfs attrs. */ | |
589 | for (i = 0; i < max_event_nr; i++) { | |
590 | if (msr[i].attr) | |
591 | events_attrs[j++] = &msr[i].attr->attr.attr; | |
592 | } | |
593 | events_attrs[j] = NULL; | |
594 | ||
595 | return (j > 0) ? true : false; | |
596 | } | |
597 | ||
598 | static int __init cstate_init(void) | |
599 | { | |
600 | /* SLM has different MSR for PKG C6 */ | |
601 | switch (boot_cpu_data.x86_model) { | |
602 | case 55: | |
603 | case 76: | |
604 | case 77: | |
605 | pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY; | |
606 | } | |
607 | ||
608 | if (cstate_probe_msr(core_msr, core_events_attrs, PERF_CSTATE_CORE_EVENT_MAX)) | |
609 | has_cstate_core = true; | |
610 | ||
611 | if (cstate_probe_msr(pkg_msr, pkg_events_attrs, PERF_CSTATE_PKG_EVENT_MAX)) | |
612 | has_cstate_pkg = true; | |
613 | ||
614 | return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV; | |
615 | } | |
616 | ||
617 | static void __init cstate_cpumask_init(void) | |
618 | { | |
619 | int cpu; | |
620 | ||
621 | cpu_notifier_register_begin(); | |
622 | ||
623 | for_each_online_cpu(cpu) | |
624 | cstate_cpu_init(cpu); | |
625 | ||
626 | __perf_cpu_notifier(cstate_cpu_notifier); | |
627 | ||
628 | cpu_notifier_register_done(); | |
629 | } | |
630 | ||
631 | static struct pmu cstate_core_pmu = { | |
632 | .attr_groups = core_attr_groups, | |
633 | .name = "cstate_core", | |
634 | .task_ctx_nr = perf_invalid_context, | |
635 | .event_init = cstate_pmu_event_init, | |
636 | .add = cstate_pmu_event_add, /* must have */ | |
637 | .del = cstate_pmu_event_del, /* must have */ | |
638 | .start = cstate_pmu_event_start, | |
639 | .stop = cstate_pmu_event_stop, | |
640 | .read = cstate_pmu_event_update, | |
641 | .capabilities = PERF_PMU_CAP_NO_INTERRUPT, | |
642 | }; | |
643 | ||
644 | static struct pmu cstate_pkg_pmu = { | |
645 | .attr_groups = pkg_attr_groups, | |
646 | .name = "cstate_pkg", | |
647 | .task_ctx_nr = perf_invalid_context, | |
648 | .event_init = cstate_pmu_event_init, | |
649 | .add = cstate_pmu_event_add, /* must have */ | |
650 | .del = cstate_pmu_event_del, /* must have */ | |
651 | .start = cstate_pmu_event_start, | |
652 | .stop = cstate_pmu_event_stop, | |
653 | .read = cstate_pmu_event_update, | |
654 | .capabilities = PERF_PMU_CAP_NO_INTERRUPT, | |
655 | }; | |
656 | ||
657 | static void __init cstate_pmus_register(void) | |
658 | { | |
659 | int err; | |
660 | ||
661 | if (has_cstate_core) { | |
662 | err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1); | |
663 | if (WARN_ON(err)) | |
664 | pr_info("Failed to register PMU %s error %d\n", | |
665 | cstate_core_pmu.name, err); | |
666 | } | |
667 | ||
668 | if (has_cstate_pkg) { | |
669 | err = perf_pmu_register(&cstate_pkg_pmu, cstate_pkg_pmu.name, -1); | |
670 | if (WARN_ON(err)) | |
671 | pr_info("Failed to register PMU %s error %d\n", | |
672 | cstate_pkg_pmu.name, err); | |
673 | } | |
674 | } | |
675 | ||
676 | static int __init cstate_pmu_init(void) | |
677 | { | |
678 | int err; | |
679 | ||
680 | if (cpu_has_hypervisor) | |
681 | return -ENODEV; | |
682 | ||
683 | err = cstate_init(); | |
684 | if (err) | |
685 | return err; | |
686 | ||
687 | cstate_cpumask_init(); | |
688 | ||
689 | cstate_pmus_register(); | |
690 | ||
691 | return 0; | |
692 | } | |
693 | ||
694 | device_initcall(cstate_pmu_init); |