2 * perf_event_intel_rapl.c: support Intel RAPL energy consumption counters
3 * Copyright (C) 2013 Google, Inc., Stephane Eranian
5 * Intel RAPL interface is specified in the IA-32 Manual Vol3b
6 * section 14.7.1 (September 2013)
8 * RAPL provides more controls than just reporting energy consumption
9 * however here we only expose the 3 energy consumption free running
10 * counters (pp0, pkg, dram).
12 * Each of those counters increments in a power unit defined by the
13 * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules
16 * Counter to rapl events mappings:
18 * pp0 counter: consumption of all physical cores (power plane 0)
19 * event: rapl_energy_cores
22 * pkg counter: consumption of the whole processor package
23 * event: rapl_energy_pkg
26 * dram counter: consumption of the dram domain (servers only)
27 * event: rapl_energy_dram
30 * gpu counter: consumption of the builtin-gpu domain (client only)
31 * event: rapl_energy_gpu
34 * psys counter: consumption of the builtin-psys domain (client only)
35 * event: rapl_energy_psys
38 * We manage those counters as free running (read-only). They may be
39 * use simultaneously by other tools, such as turbostat.
41 * The events only support system-wide mode counting. There is no
42 * sampling support because it does not make sense and is not
43 * supported by the RAPL hardware.
45 * Because we want to avoid floating-point operations in the kernel,
46 * the events are all reported in fixed point arithmetic (32.32).
47 * Tools must adjust the counts to convert them to Watts using
48 * the duration of the measurement. Tools may use a function such as
49 * ldexp(raw_count, -32);
52 #define pr_fmt(fmt) "RAPL PMU: " fmt
54 #include <linux/module.h>
55 #include <linux/slab.h>
56 #include <linux/perf_event.h>
57 #include <asm/cpu_device_id.h>
58 #include "../perf_event.h"
60 MODULE_LICENSE("GPL");
63 * RAPL energy status counters
65 #define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */
66 #define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */
67 #define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */
68 #define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */
69 #define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */
70 #define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */
71 #define RAPL_IDX_PP1_NRG_STAT 3 /* gpu */
72 #define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */
73 #define RAPL_IDX_PSYS_NRG_STAT 4 /* psys */
74 #define INTEL_RAPL_PSYS 0x5 /* pseudo-encoding */
76 #define NR_RAPL_DOMAINS 0x5
77 static const char *const rapl_domain_names
[NR_RAPL_DOMAINS
] __initconst
= {
85 /* Clients have PP0, PKG */
86 #define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
87 1<<RAPL_IDX_PKG_NRG_STAT|\
88 1<<RAPL_IDX_PP1_NRG_STAT)
90 /* Servers have PP0, PKG, RAM */
91 #define RAPL_IDX_SRV (1<<RAPL_IDX_PP0_NRG_STAT|\
92 1<<RAPL_IDX_PKG_NRG_STAT|\
93 1<<RAPL_IDX_RAM_NRG_STAT)
95 /* Servers have PP0, PKG, RAM, PP1 */
96 #define RAPL_IDX_HSW (1<<RAPL_IDX_PP0_NRG_STAT|\
97 1<<RAPL_IDX_PKG_NRG_STAT|\
98 1<<RAPL_IDX_RAM_NRG_STAT|\
99 1<<RAPL_IDX_PP1_NRG_STAT)
101 /* SKL clients have PP0, PKG, RAM, PP1, PSYS */
102 #define RAPL_IDX_SKL_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
103 1<<RAPL_IDX_PKG_NRG_STAT|\
104 1<<RAPL_IDX_RAM_NRG_STAT|\
105 1<<RAPL_IDX_PP1_NRG_STAT|\
106 1<<RAPL_IDX_PSYS_NRG_STAT)
108 /* Knights Landing has PKG, RAM */
109 #define RAPL_IDX_KNL (1<<RAPL_IDX_PKG_NRG_STAT|\
110 1<<RAPL_IDX_RAM_NRG_STAT)
113 * event code: LSB 8 bits, passed in attr->config
114 * any other bit is reserved
116 #define RAPL_EVENT_MASK 0xFFULL
118 #define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \
119 static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
120 struct kobj_attribute *attr, \
123 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
124 return sprintf(page, _format "\n"); \
126 static struct kobj_attribute format_attr_##_var = \
127 __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
129 #define RAPL_CNTR_WIDTH 32
131 #define RAPL_EVENT_ATTR_STR(_name, v, str) \
132 static struct perf_pmu_events_attr event_attr_##v = { \
133 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
142 struct list_head active_list
;
144 ktime_t timer_interval
;
145 struct hrtimer hrtimer
;
151 struct rapl_pmu
*pmus
[];
154 /* 1/2^hw_unit Joule */
155 static int rapl_hw_unit
[NR_RAPL_DOMAINS
] __read_mostly
;
156 static struct rapl_pmus
*rapl_pmus
;
157 static cpumask_t rapl_cpu_mask
;
158 static unsigned int rapl_cntr_mask
;
159 static u64 rapl_timer_ms
;
161 static inline struct rapl_pmu
*cpu_to_rapl_pmu(unsigned int cpu
)
163 return rapl_pmus
->pmus
[topology_logical_package_id(cpu
)];
166 static inline u64
rapl_read_counter(struct perf_event
*event
)
169 rdmsrl(event
->hw
.event_base
, raw
);
173 static inline u64
rapl_scale(u64 v
, int cfg
)
175 if (cfg
> NR_RAPL_DOMAINS
) {
176 pr_warn("Invalid domain %d, failed to scale data\n", cfg
);
180 * scale delta to smallest unit (1/2^32)
181 * users must then scale back: count * 1/(1e9*2^32) to get Joules
182 * or use ldexp(count, -32).
183 * Watts = Joules/Time delta
185 return v
<< (32 - rapl_hw_unit
[cfg
- 1]);
188 static u64
rapl_event_update(struct perf_event
*event
)
190 struct hw_perf_event
*hwc
= &event
->hw
;
191 u64 prev_raw_count
, new_raw_count
;
193 int shift
= RAPL_CNTR_WIDTH
;
196 prev_raw_count
= local64_read(&hwc
->prev_count
);
197 rdmsrl(event
->hw
.event_base
, new_raw_count
);
199 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
200 new_raw_count
) != prev_raw_count
) {
206 * Now we have the new raw value and have updated the prev
207 * timestamp already. We can now calculate the elapsed delta
208 * (event-)time and add that to the generic event.
210 * Careful, not all hw sign-extends above the physical width
213 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
216 sdelta
= rapl_scale(delta
, event
->hw
.config
);
218 local64_add(sdelta
, &event
->count
);
220 return new_raw_count
;
223 static void rapl_start_hrtimer(struct rapl_pmu
*pmu
)
225 hrtimer_start(&pmu
->hrtimer
, pmu
->timer_interval
,
226 HRTIMER_MODE_REL_PINNED
);
229 static enum hrtimer_restart
rapl_hrtimer_handle(struct hrtimer
*hrtimer
)
231 struct rapl_pmu
*pmu
= container_of(hrtimer
, struct rapl_pmu
, hrtimer
);
232 struct perf_event
*event
;
236 return HRTIMER_NORESTART
;
238 raw_spin_lock_irqsave(&pmu
->lock
, flags
);
240 list_for_each_entry(event
, &pmu
->active_list
, active_entry
)
241 rapl_event_update(event
);
243 raw_spin_unlock_irqrestore(&pmu
->lock
, flags
);
245 hrtimer_forward_now(hrtimer
, pmu
->timer_interval
);
247 return HRTIMER_RESTART
;
250 static void rapl_hrtimer_init(struct rapl_pmu
*pmu
)
252 struct hrtimer
*hr
= &pmu
->hrtimer
;
254 hrtimer_init(hr
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
255 hr
->function
= rapl_hrtimer_handle
;
258 static void __rapl_pmu_event_start(struct rapl_pmu
*pmu
,
259 struct perf_event
*event
)
261 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
266 list_add_tail(&event
->active_entry
, &pmu
->active_list
);
268 local64_set(&event
->hw
.prev_count
, rapl_read_counter(event
));
271 if (pmu
->n_active
== 1)
272 rapl_start_hrtimer(pmu
);
275 static void rapl_pmu_event_start(struct perf_event
*event
, int mode
)
277 struct rapl_pmu
*pmu
= event
->pmu_private
;
280 raw_spin_lock_irqsave(&pmu
->lock
, flags
);
281 __rapl_pmu_event_start(pmu
, event
);
282 raw_spin_unlock_irqrestore(&pmu
->lock
, flags
);
285 static void rapl_pmu_event_stop(struct perf_event
*event
, int mode
)
287 struct rapl_pmu
*pmu
= event
->pmu_private
;
288 struct hw_perf_event
*hwc
= &event
->hw
;
291 raw_spin_lock_irqsave(&pmu
->lock
, flags
);
293 /* mark event as deactivated and stopped */
294 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
295 WARN_ON_ONCE(pmu
->n_active
<= 0);
297 if (pmu
->n_active
== 0)
298 hrtimer_cancel(&pmu
->hrtimer
);
300 list_del(&event
->active_entry
);
302 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
303 hwc
->state
|= PERF_HES_STOPPED
;
306 /* check if update of sw counter is necessary */
307 if ((mode
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
309 * Drain the remaining delta count out of a event
310 * that we are disabling:
312 rapl_event_update(event
);
313 hwc
->state
|= PERF_HES_UPTODATE
;
316 raw_spin_unlock_irqrestore(&pmu
->lock
, flags
);
319 static int rapl_pmu_event_add(struct perf_event
*event
, int mode
)
321 struct rapl_pmu
*pmu
= event
->pmu_private
;
322 struct hw_perf_event
*hwc
= &event
->hw
;
325 raw_spin_lock_irqsave(&pmu
->lock
, flags
);
327 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
329 if (mode
& PERF_EF_START
)
330 __rapl_pmu_event_start(pmu
, event
);
332 raw_spin_unlock_irqrestore(&pmu
->lock
, flags
);
337 static void rapl_pmu_event_del(struct perf_event
*event
, int flags
)
339 rapl_pmu_event_stop(event
, PERF_EF_UPDATE
);
342 static int rapl_pmu_event_init(struct perf_event
*event
)
344 u64 cfg
= event
->attr
.config
& RAPL_EVENT_MASK
;
345 int bit
, msr
, ret
= 0;
346 struct rapl_pmu
*pmu
;
348 /* only look at RAPL events */
349 if (event
->attr
.type
!= rapl_pmus
->pmu
.type
)
352 /* check only supported bits are set */
353 if (event
->attr
.config
& ~RAPL_EVENT_MASK
)
360 * check event is known (determines counter)
364 bit
= RAPL_IDX_PP0_NRG_STAT
;
365 msr
= MSR_PP0_ENERGY_STATUS
;
368 bit
= RAPL_IDX_PKG_NRG_STAT
;
369 msr
= MSR_PKG_ENERGY_STATUS
;
372 bit
= RAPL_IDX_RAM_NRG_STAT
;
373 msr
= MSR_DRAM_ENERGY_STATUS
;
376 bit
= RAPL_IDX_PP1_NRG_STAT
;
377 msr
= MSR_PP1_ENERGY_STATUS
;
379 case INTEL_RAPL_PSYS
:
380 bit
= RAPL_IDX_PSYS_NRG_STAT
;
381 msr
= MSR_PLATFORM_ENERGY_STATUS
;
386 /* check event supported */
387 if (!(rapl_cntr_mask
& (1 << bit
)))
390 /* unsupported modes and filters */
391 if (event
->attr
.exclude_user
||
392 event
->attr
.exclude_kernel
||
393 event
->attr
.exclude_hv
||
394 event
->attr
.exclude_idle
||
395 event
->attr
.exclude_host
||
396 event
->attr
.exclude_guest
||
397 event
->attr
.sample_period
) /* no sampling */
400 /* must be done before validate_group */
401 pmu
= cpu_to_rapl_pmu(event
->cpu
);
402 event
->cpu
= pmu
->cpu
;
403 event
->pmu_private
= pmu
;
404 event
->hw
.event_base
= msr
;
405 event
->hw
.config
= cfg
;
411 static void rapl_pmu_event_read(struct perf_event
*event
)
413 rapl_event_update(event
);
416 static ssize_t
rapl_get_attr_cpumask(struct device
*dev
,
417 struct device_attribute
*attr
, char *buf
)
419 return cpumap_print_to_pagebuf(true, buf
, &rapl_cpu_mask
);
422 static DEVICE_ATTR(cpumask
, S_IRUGO
, rapl_get_attr_cpumask
, NULL
);
424 static struct attribute
*rapl_pmu_attrs
[] = {
425 &dev_attr_cpumask
.attr
,
429 static struct attribute_group rapl_pmu_attr_group
= {
430 .attrs
= rapl_pmu_attrs
,
433 RAPL_EVENT_ATTR_STR(energy
-cores
, rapl_cores
, "event=0x01");
434 RAPL_EVENT_ATTR_STR(energy
-pkg
, rapl_pkg
, "event=0x02");
435 RAPL_EVENT_ATTR_STR(energy
-ram
, rapl_ram
, "event=0x03");
436 RAPL_EVENT_ATTR_STR(energy
-gpu
, rapl_gpu
, "event=0x04");
437 RAPL_EVENT_ATTR_STR(energy
-psys
, rapl_psys
, "event=0x05");
439 RAPL_EVENT_ATTR_STR(energy
-cores
.unit
, rapl_cores_unit
, "Joules");
440 RAPL_EVENT_ATTR_STR(energy
-pkg
.unit
, rapl_pkg_unit
, "Joules");
441 RAPL_EVENT_ATTR_STR(energy
-ram
.unit
, rapl_ram_unit
, "Joules");
442 RAPL_EVENT_ATTR_STR(energy
-gpu
.unit
, rapl_gpu_unit
, "Joules");
443 RAPL_EVENT_ATTR_STR(energy
-psys
.unit
, rapl_psys_unit
, "Joules");
446 * we compute in 0.23 nJ increments regardless of MSR
448 RAPL_EVENT_ATTR_STR(energy
-cores
.scale
, rapl_cores_scale
, "2.3283064365386962890625e-10");
449 RAPL_EVENT_ATTR_STR(energy
-pkg
.scale
, rapl_pkg_scale
, "2.3283064365386962890625e-10");
450 RAPL_EVENT_ATTR_STR(energy
-ram
.scale
, rapl_ram_scale
, "2.3283064365386962890625e-10");
451 RAPL_EVENT_ATTR_STR(energy
-gpu
.scale
, rapl_gpu_scale
, "2.3283064365386962890625e-10");
452 RAPL_EVENT_ATTR_STR(energy
-psys
.scale
, rapl_psys_scale
, "2.3283064365386962890625e-10");
454 static struct attribute
*rapl_events_srv_attr
[] = {
455 EVENT_PTR(rapl_cores
),
459 EVENT_PTR(rapl_cores_unit
),
460 EVENT_PTR(rapl_pkg_unit
),
461 EVENT_PTR(rapl_ram_unit
),
463 EVENT_PTR(rapl_cores_scale
),
464 EVENT_PTR(rapl_pkg_scale
),
465 EVENT_PTR(rapl_ram_scale
),
469 static struct attribute
*rapl_events_cln_attr
[] = {
470 EVENT_PTR(rapl_cores
),
474 EVENT_PTR(rapl_cores_unit
),
475 EVENT_PTR(rapl_pkg_unit
),
476 EVENT_PTR(rapl_gpu_unit
),
478 EVENT_PTR(rapl_cores_scale
),
479 EVENT_PTR(rapl_pkg_scale
),
480 EVENT_PTR(rapl_gpu_scale
),
484 static struct attribute
*rapl_events_hsw_attr
[] = {
485 EVENT_PTR(rapl_cores
),
490 EVENT_PTR(rapl_cores_unit
),
491 EVENT_PTR(rapl_pkg_unit
),
492 EVENT_PTR(rapl_gpu_unit
),
493 EVENT_PTR(rapl_ram_unit
),
495 EVENT_PTR(rapl_cores_scale
),
496 EVENT_PTR(rapl_pkg_scale
),
497 EVENT_PTR(rapl_gpu_scale
),
498 EVENT_PTR(rapl_ram_scale
),
502 static struct attribute
*rapl_events_skl_attr
[] = {
503 EVENT_PTR(rapl_cores
),
507 EVENT_PTR(rapl_psys
),
509 EVENT_PTR(rapl_cores_unit
),
510 EVENT_PTR(rapl_pkg_unit
),
511 EVENT_PTR(rapl_gpu_unit
),
512 EVENT_PTR(rapl_ram_unit
),
513 EVENT_PTR(rapl_psys_unit
),
515 EVENT_PTR(rapl_cores_scale
),
516 EVENT_PTR(rapl_pkg_scale
),
517 EVENT_PTR(rapl_gpu_scale
),
518 EVENT_PTR(rapl_ram_scale
),
519 EVENT_PTR(rapl_psys_scale
),
523 static struct attribute
*rapl_events_knl_attr
[] = {
527 EVENT_PTR(rapl_pkg_unit
),
528 EVENT_PTR(rapl_ram_unit
),
530 EVENT_PTR(rapl_pkg_scale
),
531 EVENT_PTR(rapl_ram_scale
),
535 static struct attribute_group rapl_pmu_events_group
= {
537 .attrs
= NULL
, /* patched at runtime */
540 DEFINE_RAPL_FORMAT_ATTR(event
, event
, "config:0-7");
541 static struct attribute
*rapl_formats_attr
[] = {
542 &format_attr_event
.attr
,
546 static struct attribute_group rapl_pmu_format_group
= {
548 .attrs
= rapl_formats_attr
,
551 const struct attribute_group
*rapl_attr_groups
[] = {
552 &rapl_pmu_attr_group
,
553 &rapl_pmu_format_group
,
554 &rapl_pmu_events_group
,
558 static void rapl_cpu_exit(int cpu
)
560 struct rapl_pmu
*pmu
= cpu_to_rapl_pmu(cpu
);
563 /* Check if exiting cpu is used for collecting rapl events */
564 if (!cpumask_test_and_clear_cpu(cpu
, &rapl_cpu_mask
))
568 /* Find a new cpu to collect rapl events */
569 target
= cpumask_any_but(topology_core_cpumask(cpu
), cpu
);
571 /* Migrate rapl events to the new target */
572 if (target
< nr_cpu_ids
) {
573 cpumask_set_cpu(target
, &rapl_cpu_mask
);
575 perf_pmu_migrate_context(pmu
->pmu
, cpu
, target
);
579 static void rapl_cpu_init(int cpu
)
581 struct rapl_pmu
*pmu
= cpu_to_rapl_pmu(cpu
);
585 * Check if there is an online cpu in the package which collects rapl
588 target
= cpumask_any_and(&rapl_cpu_mask
, topology_core_cpumask(cpu
));
589 if (target
< nr_cpu_ids
)
592 cpumask_set_cpu(cpu
, &rapl_cpu_mask
);
596 static int rapl_cpu_prepare(int cpu
)
598 struct rapl_pmu
*pmu
= cpu_to_rapl_pmu(cpu
);
603 pmu
= kzalloc_node(sizeof(*pmu
), GFP_KERNEL
, cpu_to_node(cpu
));
607 raw_spin_lock_init(&pmu
->lock
);
608 INIT_LIST_HEAD(&pmu
->active_list
);
609 pmu
->pmu
= &rapl_pmus
->pmu
;
610 pmu
->timer_interval
= ms_to_ktime(rapl_timer_ms
);
612 rapl_hrtimer_init(pmu
);
613 rapl_pmus
->pmus
[topology_logical_package_id(cpu
)] = pmu
;
617 static int rapl_cpu_notifier(struct notifier_block
*self
,
618 unsigned long action
, void *hcpu
)
620 unsigned int cpu
= (long)hcpu
;
622 switch (action
& ~CPU_TASKS_FROZEN
) {
624 rapl_cpu_prepare(cpu
);
627 case CPU_DOWN_FAILED
:
632 case CPU_DOWN_PREPARE
:
639 static struct notifier_block rapl_cpu_nb
= {
640 .notifier_call
= rapl_cpu_notifier
,
641 .priority
= CPU_PRI_PERF
+ 1,
644 static int rapl_check_hw_unit(bool apply_quirk
)
646 u64 msr_rapl_power_unit_bits
;
649 /* protect rdmsrl() to handle virtualization */
650 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT
, &msr_rapl_power_unit_bits
))
652 for (i
= 0; i
< NR_RAPL_DOMAINS
; i
++)
653 rapl_hw_unit
[i
] = (msr_rapl_power_unit_bits
>> 8) & 0x1FULL
;
656 * DRAM domain on HSW server and KNL has fixed energy unit which can be
657 * different than the unit from power unit MSR. See
658 * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
659 * of 2. Datasheet, September 2014, Reference Number: 330784-001 "
662 rapl_hw_unit
[RAPL_IDX_RAM_NRG_STAT
] = 16;
665 * Calculate the timer rate:
666 * Use reference of 200W for scaling the timeout to avoid counter
667 * overflows. 200W = 200 Joules/sec
668 * Divide interval by 2 to avoid lockstep (2 * 100)
669 * if hw unit is 32, then we use 2 ms 1/200/2
672 if (rapl_hw_unit
[0] < 32) {
673 rapl_timer_ms
= (1000 / (2 * 100));
674 rapl_timer_ms
*= (1ULL << (32 - rapl_hw_unit
[0] - 1));
679 static void __init
rapl_advertise(void)
683 pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n",
684 hweight32(rapl_cntr_mask
), rapl_timer_ms
);
686 for (i
= 0; i
< NR_RAPL_DOMAINS
; i
++) {
687 if (rapl_cntr_mask
& (1 << i
)) {
688 pr_info("hw unit of domain %s 2^-%d Joules\n",
689 rapl_domain_names
[i
], rapl_hw_unit
[i
]);
694 static int __init
rapl_prepare_cpus(void)
696 unsigned int cpu
, pkg
;
699 for_each_online_cpu(cpu
) {
700 pkg
= topology_logical_package_id(cpu
);
701 if (rapl_pmus
->pmus
[pkg
])
704 ret
= rapl_cpu_prepare(cpu
);
712 static void cleanup_rapl_pmus(void)
716 for (i
= 0; i
< rapl_pmus
->maxpkg
; i
++)
717 kfree(rapl_pmus
->pmus
+ i
);
721 static int __init
init_rapl_pmus(void)
723 int maxpkg
= topology_max_packages();
726 size
= sizeof(*rapl_pmus
) + maxpkg
* sizeof(struct rapl_pmu
*);
727 rapl_pmus
= kzalloc(size
, GFP_KERNEL
);
731 rapl_pmus
->maxpkg
= maxpkg
;
732 rapl_pmus
->pmu
.attr_groups
= rapl_attr_groups
;
733 rapl_pmus
->pmu
.task_ctx_nr
= perf_invalid_context
;
734 rapl_pmus
->pmu
.event_init
= rapl_pmu_event_init
;
735 rapl_pmus
->pmu
.add
= rapl_pmu_event_add
;
736 rapl_pmus
->pmu
.del
= rapl_pmu_event_del
;
737 rapl_pmus
->pmu
.start
= rapl_pmu_event_start
;
738 rapl_pmus
->pmu
.stop
= rapl_pmu_event_stop
;
739 rapl_pmus
->pmu
.read
= rapl_pmu_event_read
;
743 #define X86_RAPL_MODEL_MATCH(model, init) \
744 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
746 struct intel_rapl_init_fun
{
749 struct attribute
**attrs
;
752 static const struct intel_rapl_init_fun snb_rapl_init __initconst
= {
753 .apply_quirk
= false,
754 .cntr_mask
= RAPL_IDX_CLN
,
755 .attrs
= rapl_events_cln_attr
,
758 static const struct intel_rapl_init_fun hsx_rapl_init __initconst
= {
760 .cntr_mask
= RAPL_IDX_SRV
,
761 .attrs
= rapl_events_srv_attr
,
764 static const struct intel_rapl_init_fun hsw_rapl_init __initconst
= {
765 .apply_quirk
= false,
766 .cntr_mask
= RAPL_IDX_HSW
,
767 .attrs
= rapl_events_hsw_attr
,
770 static const struct intel_rapl_init_fun snbep_rapl_init __initconst
= {
771 .apply_quirk
= false,
772 .cntr_mask
= RAPL_IDX_SRV
,
773 .attrs
= rapl_events_srv_attr
,
776 static const struct intel_rapl_init_fun knl_rapl_init __initconst
= {
778 .cntr_mask
= RAPL_IDX_KNL
,
779 .attrs
= rapl_events_knl_attr
,
782 static const struct intel_rapl_init_fun skl_rapl_init __initconst
= {
783 .apply_quirk
= false,
784 .cntr_mask
= RAPL_IDX_SKL_CLN
,
785 .attrs
= rapl_events_skl_attr
,
788 static const struct x86_cpu_id rapl_cpu_match
[] __initconst
= {
789 X86_RAPL_MODEL_MATCH(42, snb_rapl_init
), /* Sandy Bridge */
790 X86_RAPL_MODEL_MATCH(45, snbep_rapl_init
), /* Sandy Bridge-EP */
792 X86_RAPL_MODEL_MATCH(58, snb_rapl_init
), /* Ivy Bridge */
793 X86_RAPL_MODEL_MATCH(62, snbep_rapl_init
), /* IvyTown */
795 X86_RAPL_MODEL_MATCH(60, hsw_rapl_init
), /* Haswell */
796 X86_RAPL_MODEL_MATCH(63, hsx_rapl_init
), /* Haswell-Server */
797 X86_RAPL_MODEL_MATCH(69, hsw_rapl_init
), /* Haswell-Celeron */
798 X86_RAPL_MODEL_MATCH(70, hsw_rapl_init
), /* Haswell GT3e */
800 X86_RAPL_MODEL_MATCH(61, hsw_rapl_init
), /* Broadwell */
801 X86_RAPL_MODEL_MATCH(71, hsw_rapl_init
), /* Broadwell-H */
802 X86_RAPL_MODEL_MATCH(79, hsx_rapl_init
), /* Broadwell-Server */
804 X86_RAPL_MODEL_MATCH(87, knl_rapl_init
), /* Knights Landing */
806 X86_RAPL_MODEL_MATCH(78, skl_rapl_init
), /* Skylake */
807 X86_RAPL_MODEL_MATCH(94, skl_rapl_init
), /* Skylake H/S */
811 MODULE_DEVICE_TABLE(x86cpu
, rapl_cpu_match
);
813 static int __init
rapl_pmu_init(void)
815 const struct x86_cpu_id
*id
;
816 struct intel_rapl_init_fun
*rapl_init
;
820 id
= x86_match_cpu(rapl_cpu_match
);
824 rapl_init
= (struct intel_rapl_init_fun
*)id
->driver_data
;
825 apply_quirk
= rapl_init
->apply_quirk
;
826 rapl_cntr_mask
= rapl_init
->cntr_mask
;
827 rapl_pmu_events_group
.attrs
= rapl_init
->attrs
;
829 ret
= rapl_check_hw_unit(apply_quirk
);
833 ret
= init_rapl_pmus();
837 cpu_notifier_register_begin();
839 ret
= rapl_prepare_cpus();
843 ret
= perf_pmu_register(&rapl_pmus
->pmu
, "power", -1);
847 __register_cpu_notifier(&rapl_cpu_nb
);
848 cpu_notifier_register_done();
853 pr_warn("Initialization failed (%d), disabled\n", ret
);
855 cpu_notifier_register_done();
858 module_init(rapl_pmu_init
);
860 static void __exit
intel_rapl_exit(void)
862 cpu_notifier_register_begin();
863 __unregister_cpu_notifier(&rapl_cpu_nb
);
864 perf_pmu_unregister(&rapl_pmus
->pmu
);
866 cpu_notifier_register_done();
868 module_exit(intel_rapl_exit
);