2 * Copyright 2012 by Oracle Inc
3 * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 * This code borrows ideas from https://lkml.org/lkml/2011/11/30/249
6 * so many thanks go to Kevin Tian <kevin.tian@intel.com>
7 * and Yu Ke <ke.yu@intel.com>.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/freezer.h>
23 #include <linux/kernel.h>
24 #include <linux/kthread.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <acpi/acpi_bus.h>
29 #include <acpi/acpi_drivers.h>
30 #include <acpi/processor.h>
32 #include <xen/interface/platform.h>
33 #include <asm/xen/hypercall.h>
35 #define DRV_NAME "xen-acpi-processor: "
37 static int no_hypercall
;
38 MODULE_PARM_DESC(off
, "Inhibit the hypercall.");
39 module_param_named(off
, no_hypercall
, int, 0400);
42 * Note: Do not convert the acpi_id* below to cpumask_var_t or use cpumask_bit
43 * - as those shrink to nr_cpu_bits (which is dependent on possible_cpu), which
44 * can be less than what we want to put in. Instead use the 'nr_acpi_bits'
45 * which is dynamically computed based on the MADT or x2APIC table.
47 static unsigned int nr_acpi_bits
;
48 /* Mutex to protect the acpi_ids_done - for CPU hotplug use. */
49 static DEFINE_MUTEX(acpi_ids_mutex
);
50 /* Which ACPI ID we have processed from 'struct acpi_processor'. */
51 static unsigned long *acpi_ids_done
;
52 /* Which ACPI ID exist in the SSDT/DSDT processor definitions. */
53 static unsigned long __initdata
*acpi_id_present
;
54 /* And if there is an _CST definition (or a PBLK) for the ACPI IDs */
55 static unsigned long __initdata
*acpi_id_cst_present
;
57 static int push_cxx_to_hypervisor(struct acpi_processor
*_pr
)
59 struct xen_platform_op op
= {
60 .cmd
= XENPF_set_processor_pminfo
,
61 .interface_version
= XENPF_INTERFACE_VERSION
,
62 .u
.set_pminfo
.id
= _pr
->acpi_id
,
63 .u
.set_pminfo
.type
= XEN_PM_CX
,
65 struct xen_processor_cx
*dst_cx
, *dst_cx_states
= NULL
;
66 struct acpi_processor_cx
*cx
;
70 dst_cx_states
= kcalloc(_pr
->power
.count
,
71 sizeof(struct xen_processor_cx
), GFP_KERNEL
);
75 for (ok
= 0, i
= 1; i
<= _pr
->power
.count
; i
++) {
76 cx
= &_pr
->power
.states
[i
];
80 dst_cx
= &(dst_cx_states
[ok
++]);
82 dst_cx
->reg
.space_id
= ACPI_ADR_SPACE_SYSTEM_IO
;
83 if (cx
->entry_method
== ACPI_CSTATE_SYSTEMIO
) {
84 dst_cx
->reg
.bit_width
= 8;
85 dst_cx
->reg
.bit_offset
= 0;
86 dst_cx
->reg
.access_size
= 1;
88 dst_cx
->reg
.space_id
= ACPI_ADR_SPACE_FIXED_HARDWARE
;
89 if (cx
->entry_method
== ACPI_CSTATE_FFH
) {
90 /* NATIVE_CSTATE_BEYOND_HALT */
91 dst_cx
->reg
.bit_offset
= 2;
92 dst_cx
->reg
.bit_width
= 1; /* VENDOR_INTEL */
94 dst_cx
->reg
.access_size
= 0;
96 dst_cx
->reg
.address
= cx
->address
;
98 dst_cx
->type
= cx
->type
;
99 dst_cx
->latency
= cx
->latency
;
100 dst_cx
->power
= cx
->power
;
103 set_xen_guest_handle(dst_cx
->dp
, NULL
);
106 pr_debug(DRV_NAME
"No _Cx for ACPI CPU %u\n", _pr
->acpi_id
);
107 kfree(dst_cx_states
);
110 op
.u
.set_pminfo
.power
.count
= ok
;
111 op
.u
.set_pminfo
.power
.flags
.bm_control
= _pr
->flags
.bm_control
;
112 op
.u
.set_pminfo
.power
.flags
.bm_check
= _pr
->flags
.bm_check
;
113 op
.u
.set_pminfo
.power
.flags
.has_cst
= _pr
->flags
.has_cst
;
114 op
.u
.set_pminfo
.power
.flags
.power_setup_done
=
115 _pr
->flags
.power_setup_done
;
117 set_xen_guest_handle(op
.u
.set_pminfo
.power
.states
, dst_cx_states
);
120 ret
= HYPERVISOR_dom0_op(&op
);
123 pr_debug("ACPI CPU%u - C-states uploaded.\n", _pr
->acpi_id
);
124 for (i
= 1; i
<= _pr
->power
.count
; i
++) {
125 cx
= &_pr
->power
.states
[i
];
128 pr_debug(" C%d: %s %d uS\n",
129 cx
->type
, cx
->desc
, (u32
)cx
->latency
);
132 pr_err(DRV_NAME
"(CX): Hypervisor error (%d) for ACPI CPU%u\n",
135 kfree(dst_cx_states
);
139 static struct xen_processor_px
*
140 xen_copy_pss_data(struct acpi_processor
*_pr
,
141 struct xen_processor_performance
*dst_perf
)
143 struct xen_processor_px
*dst_states
= NULL
;
146 BUILD_BUG_ON(sizeof(struct xen_processor_px
) !=
147 sizeof(struct acpi_processor_px
));
149 dst_states
= kcalloc(_pr
->performance
->state_count
,
150 sizeof(struct xen_processor_px
), GFP_KERNEL
);
152 return ERR_PTR(-ENOMEM
);
154 dst_perf
->state_count
= _pr
->performance
->state_count
;
155 for (i
= 0; i
< _pr
->performance
->state_count
; i
++) {
156 /* Fortunatly for us, they are both the same size */
157 memcpy(&(dst_states
[i
]), &(_pr
->performance
->states
[i
]),
158 sizeof(struct acpi_processor_px
));
162 static int xen_copy_psd_data(struct acpi_processor
*_pr
,
163 struct xen_processor_performance
*dst
)
165 struct acpi_psd_package
*pdomain
;
167 BUILD_BUG_ON(sizeof(struct xen_psd_package
) !=
168 sizeof(struct acpi_psd_package
));
170 /* This information is enumerated only if acpi_processor_preregister_performance
173 dst
->shared_type
= _pr
->performance
->shared_type
;
175 pdomain
= &(_pr
->performance
->domain_info
);
177 /* 'acpi_processor_preregister_performance' does not parse if the
178 * num_processors <= 1, but Xen still requires it. Do it manually here.
180 if (pdomain
->num_processors
<= 1) {
181 if (pdomain
->coord_type
== DOMAIN_COORD_TYPE_SW_ALL
)
182 dst
->shared_type
= CPUFREQ_SHARED_TYPE_ALL
;
183 else if (pdomain
->coord_type
== DOMAIN_COORD_TYPE_HW_ALL
)
184 dst
->shared_type
= CPUFREQ_SHARED_TYPE_HW
;
185 else if (pdomain
->coord_type
== DOMAIN_COORD_TYPE_SW_ANY
)
186 dst
->shared_type
= CPUFREQ_SHARED_TYPE_ANY
;
189 memcpy(&(dst
->domain_info
), pdomain
, sizeof(struct acpi_psd_package
));
192 static int xen_copy_pct_data(struct acpi_pct_register
*pct
,
193 struct xen_pct_register
*dst_pct
)
195 /* It would be nice if you could just do 'memcpy(pct, dst_pct') but
196 * sadly the Xen structure did not have the proper padding so the
197 * descriptor field takes two (dst_pct) bytes instead of one (pct).
199 dst_pct
->descriptor
= pct
->descriptor
;
200 dst_pct
->length
= pct
->length
;
201 dst_pct
->space_id
= pct
->space_id
;
202 dst_pct
->bit_width
= pct
->bit_width
;
203 dst_pct
->bit_offset
= pct
->bit_offset
;
204 dst_pct
->reserved
= pct
->reserved
;
205 dst_pct
->address
= pct
->address
;
208 static int push_pxx_to_hypervisor(struct acpi_processor
*_pr
)
211 struct xen_platform_op op
= {
212 .cmd
= XENPF_set_processor_pminfo
,
213 .interface_version
= XENPF_INTERFACE_VERSION
,
214 .u
.set_pminfo
.id
= _pr
->acpi_id
,
215 .u
.set_pminfo
.type
= XEN_PM_PX
,
217 struct xen_processor_performance
*dst_perf
;
218 struct xen_processor_px
*dst_states
= NULL
;
220 dst_perf
= &op
.u
.set_pminfo
.perf
;
222 dst_perf
->platform_limit
= _pr
->performance_platform_limit
;
223 dst_perf
->flags
|= XEN_PX_PPC
;
224 xen_copy_pct_data(&(_pr
->performance
->control_register
),
225 &dst_perf
->control_register
);
226 xen_copy_pct_data(&(_pr
->performance
->status_register
),
227 &dst_perf
->status_register
);
228 dst_perf
->flags
|= XEN_PX_PCT
;
229 dst_states
= xen_copy_pss_data(_pr
, dst_perf
);
230 if (!IS_ERR_OR_NULL(dst_states
)) {
231 set_xen_guest_handle(dst_perf
->states
, dst_states
);
232 dst_perf
->flags
|= XEN_PX_PSS
;
234 if (!xen_copy_psd_data(_pr
, dst_perf
))
235 dst_perf
->flags
|= XEN_PX_PSD
;
237 if (dst_perf
->flags
!= (XEN_PX_PSD
| XEN_PX_PSS
| XEN_PX_PCT
| XEN_PX_PPC
)) {
238 pr_warn(DRV_NAME
"ACPI CPU%u missing some P-state data (%x), skipping.\n",
239 _pr
->acpi_id
, dst_perf
->flags
);
245 ret
= HYPERVISOR_dom0_op(&op
);
248 struct acpi_processor_performance
*perf
;
251 perf
= _pr
->performance
;
252 pr_debug("ACPI CPU%u - P-states uploaded.\n", _pr
->acpi_id
);
253 for (i
= 0; i
< perf
->state_count
; i
++) {
254 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
255 (i
== perf
->state
? '*' : ' '), i
,
256 (u32
) perf
->states
[i
].core_frequency
,
257 (u32
) perf
->states
[i
].power
,
258 (u32
) perf
->states
[i
].transition_latency
);
260 } else if (ret
!= -EINVAL
)
261 /* EINVAL means the ACPI ID is incorrect - meaning the ACPI
262 * table is referencing a non-existing CPU - which can happen
263 * with broken ACPI tables. */
264 pr_warn(DRV_NAME
"(_PXX): Hypervisor error (%d) for ACPI CPU%u\n",
267 if (!IS_ERR_OR_NULL(dst_states
))
272 static int upload_pm_data(struct acpi_processor
*_pr
)
276 mutex_lock(&acpi_ids_mutex
);
277 if (__test_and_set_bit(_pr
->acpi_id
, acpi_ids_done
)) {
278 mutex_unlock(&acpi_ids_mutex
);
281 if (_pr
->flags
.power
)
282 err
= push_cxx_to_hypervisor(_pr
);
284 if (_pr
->performance
&& _pr
->performance
->states
)
285 err
|= push_pxx_to_hypervisor(_pr
);
287 mutex_unlock(&acpi_ids_mutex
);
290 static unsigned int __init
get_max_acpi_id(void)
292 struct xenpf_pcpuinfo
*info
;
293 struct xen_platform_op op
= {
294 .cmd
= XENPF_get_cpuinfo
,
295 .interface_version
= XENPF_INTERFACE_VERSION
,
298 unsigned int i
, last_cpu
, max_acpi_id
= 0;
300 info
= &op
.u
.pcpu_info
;
303 ret
= HYPERVISOR_dom0_op(&op
);
307 /* The max_present is the same irregardless of the xen_cpuid */
308 last_cpu
= op
.u
.pcpu_info
.max_present
;
309 for (i
= 0; i
<= last_cpu
; i
++) {
311 ret
= HYPERVISOR_dom0_op(&op
);
314 max_acpi_id
= max(info
->acpi_id
, max_acpi_id
);
316 max_acpi_id
*= 2; /* Slack for CPU hotplug support. */
317 pr_debug(DRV_NAME
"Max ACPI ID: %u\n", max_acpi_id
);
321 * The read_acpi_id and check_acpi_ids are there to support the Xen
322 * oddity of virtual CPUs != physical CPUs in the initial domain.
323 * The user can supply 'xen_max_vcpus=X' on the Xen hypervisor line
324 * which will band the amount of CPUs the initial domain can see.
325 * In general that is OK, except it plays havoc with any of the
326 * for_each_[present|online]_cpu macros which are banded to the virtual
329 static acpi_status __init
330 read_acpi_id(acpi_handle handle
, u32 lvl
, void *context
, void **rv
)
334 acpi_object_type acpi_type
;
335 unsigned long long tmp
;
336 union acpi_object object
= { 0 };
337 struct acpi_buffer buffer
= { sizeof(union acpi_object
), &object
};
338 acpi_io_address pblk
= 0;
340 status
= acpi_get_type(handle
, &acpi_type
);
341 if (ACPI_FAILURE(status
))
345 case ACPI_TYPE_PROCESSOR
:
346 status
= acpi_evaluate_object(handle
, NULL
, NULL
, &buffer
);
347 if (ACPI_FAILURE(status
))
349 acpi_id
= object
.processor
.proc_id
;
350 pblk
= object
.processor
.pblk_address
;
352 case ACPI_TYPE_DEVICE
:
353 status
= acpi_evaluate_integer(handle
, "_UID", NULL
, &tmp
);
354 if (ACPI_FAILURE(status
))
361 /* There are more ACPI Processor objects than in x2APIC or MADT.
362 * This can happen with incorrect ACPI SSDT declerations. */
363 if (acpi_id
> nr_acpi_bits
) {
364 pr_debug(DRV_NAME
"We only have %u, trying to set %u\n",
365 nr_acpi_bits
, acpi_id
);
368 /* OK, There is a ACPI Processor object */
369 __set_bit(acpi_id
, acpi_id_present
);
371 pr_debug(DRV_NAME
"ACPI CPU%u w/ PBLK:0x%lx\n", acpi_id
,
372 (unsigned long)pblk
);
374 status
= acpi_evaluate_object(handle
, "_CST", NULL
, &buffer
);
375 if (ACPI_FAILURE(status
)) {
379 /* .. and it has a C-state */
380 __set_bit(acpi_id
, acpi_id_cst_present
);
384 static int __init
check_acpi_ids(struct acpi_processor
*pr_backup
)
390 /* All online CPUs have been processed at this stage. Now verify
391 * whether in fact "online CPUs" == physical CPUs.
393 acpi_id_present
= kcalloc(BITS_TO_LONGS(nr_acpi_bits
), sizeof(unsigned long), GFP_KERNEL
);
394 if (!acpi_id_present
)
397 acpi_id_cst_present
= kcalloc(BITS_TO_LONGS(nr_acpi_bits
), sizeof(unsigned long), GFP_KERNEL
);
398 if (!acpi_id_cst_present
) {
399 kfree(acpi_id_present
);
403 acpi_walk_namespace(ACPI_TYPE_PROCESSOR
, ACPI_ROOT_OBJECT
,
405 read_acpi_id
, NULL
, NULL
, NULL
);
406 acpi_get_devices("ACPI0007", read_acpi_id
, NULL
, NULL
);
408 if (!bitmap_equal(acpi_id_present
, acpi_ids_done
, nr_acpi_bits
)) {
410 for_each_set_bit(i
, acpi_id_present
, nr_acpi_bits
) {
411 pr_backup
->acpi_id
= i
;
412 /* Mask out C-states if there are no _CST or PBLK */
413 pr_backup
->flags
.power
= test_bit(i
, acpi_id_cst_present
);
414 (void)upload_pm_data(pr_backup
);
417 kfree(acpi_id_present
);
418 acpi_id_present
= NULL
;
419 kfree(acpi_id_cst_present
);
420 acpi_id_cst_present
= NULL
;
423 static int __init
check_prereq(void)
425 struct cpuinfo_x86
*c
= &cpu_data(0);
427 if (!xen_initial_domain())
430 if (!acpi_gbl_FADT
.smi_command
)
433 if (c
->x86_vendor
== X86_VENDOR_INTEL
) {
434 if (!cpu_has(c
, X86_FEATURE_EST
))
439 if (c
->x86_vendor
== X86_VENDOR_AMD
) {
440 /* Copied from powernow-k8.h, can't include ../cpufreq/powernow
441 * as we get compile warnings for the static functions.
443 #define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
444 #define USE_HW_PSTATE 0x00000080
445 u32 eax
, ebx
, ecx
, edx
;
446 cpuid(CPUID_FREQ_VOLT_CAPABILITIES
, &eax
, &ebx
, &ecx
, &edx
);
447 if ((edx
& USE_HW_PSTATE
) != USE_HW_PSTATE
)
453 /* acpi_perf_data is a pointer to percpu data. */
454 static struct acpi_processor_performance __percpu
*acpi_perf_data
;
456 static void free_acpi_perf_data(void)
460 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
461 for_each_possible_cpu(i
)
462 free_cpumask_var(per_cpu_ptr(acpi_perf_data
, i
)
464 free_percpu(acpi_perf_data
);
467 static int __init
xen_acpi_processor_init(void)
469 struct acpi_processor
*pr_backup
= NULL
;
471 int rc
= check_prereq();
476 nr_acpi_bits
= get_max_acpi_id() + 1;
477 acpi_ids_done
= kcalloc(BITS_TO_LONGS(nr_acpi_bits
), sizeof(unsigned long), GFP_KERNEL
);
481 acpi_perf_data
= alloc_percpu(struct acpi_processor_performance
);
482 if (!acpi_perf_data
) {
483 pr_debug(DRV_NAME
"Memory allocation error for acpi_perf_data.\n");
484 kfree(acpi_ids_done
);
487 for_each_possible_cpu(i
) {
488 if (!zalloc_cpumask_var_node(
489 &per_cpu_ptr(acpi_perf_data
, i
)->shared_cpu_map
,
490 GFP_KERNEL
, cpu_to_node(i
))) {
496 /* Do initialization in ACPI core. It is OK to fail here. */
497 (void)acpi_processor_preregister_performance(acpi_perf_data
);
499 for_each_possible_cpu(i
) {
500 struct acpi_processor_performance
*perf
;
502 perf
= per_cpu_ptr(acpi_perf_data
, i
);
503 rc
= acpi_processor_register_performance(perf
, i
);
507 rc
= acpi_processor_notify_smm(THIS_MODULE
);
511 for_each_possible_cpu(i
) {
512 struct acpi_processor
*_pr
;
513 _pr
= per_cpu(processors
, i
/* APIC ID */);
518 pr_backup
= kzalloc(sizeof(struct acpi_processor
), GFP_KERNEL
);
519 memcpy(pr_backup
, _pr
, sizeof(struct acpi_processor
));
521 (void)upload_pm_data(_pr
);
523 rc
= check_acpi_ids(pr_backup
);
531 for_each_possible_cpu(i
) {
532 struct acpi_processor_performance
*perf
;
533 perf
= per_cpu_ptr(acpi_perf_data
, i
);
534 acpi_processor_unregister_performance(perf
, i
);
537 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
538 free_acpi_perf_data();
539 kfree(acpi_ids_done
);
542 static void __exit
xen_acpi_processor_exit(void)
546 kfree(acpi_ids_done
);
547 for_each_possible_cpu(i
) {
548 struct acpi_processor_performance
*perf
;
549 perf
= per_cpu_ptr(acpi_perf_data
, i
);
550 acpi_processor_unregister_performance(perf
, i
);
552 free_acpi_perf_data();
555 MODULE_AUTHOR("Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>");
556 MODULE_DESCRIPTION("Xen ACPI Processor P-states (and Cx) driver which uploads PM data to Xen hypervisor");
557 MODULE_LICENSE("GPL");
559 /* We want to be loaded before the CPU freq scaling drivers are loaded.
560 * They are loaded in late_initcall. */
561 device_initcall(xen_acpi_processor_init
);
562 module_exit(xen_acpi_processor_exit
);