2 * intel_pstate.c: Native P state management for Intel processors
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <trace/events/power.h>
31 #include <asm/div64.h>
33 #include <asm/cpu_device_id.h>
35 #define BYT_RATIOS 0x66a
36 #define BYT_VIDS 0x66b
37 #define BYT_TURBO_RATIOS 0x66c
38 #define BYT_TURBO_VIDS 0x66d
41 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
42 #define fp_toint(X) ((X) >> FRAC_BITS)
45 static inline int32_t mul_fp(int32_t x
, int32_t y
)
47 return ((int64_t)x
* (int64_t)y
) >> FRAC_BITS
;
50 static inline int32_t div_fp(int32_t x
, int32_t y
)
52 return div_s64((int64_t)x
<< FRAC_BITS
, y
);
56 int32_t core_pct_busy
;
90 struct timer_list timer
;
92 struct pstate_data pstate
;
96 ktime_t last_sample_time
;
102 static struct cpudata
**all_cpu_data
;
103 struct pstate_adjust_policy
{
112 struct pstate_funcs
{
113 int (*get_max
)(void);
114 int (*get_min
)(void);
115 int (*get_turbo
)(void);
116 void (*set
)(struct cpudata
*, int pstate
);
117 void (*get_vid
)(struct cpudata
*);
120 struct cpu_defaults
{
121 struct pstate_adjust_policy pid_policy
;
122 struct pstate_funcs funcs
;
125 static struct pstate_adjust_policy pid_params
;
126 static struct pstate_funcs pstate_funcs
;
139 static struct perf_limits limits
= {
142 .max_perf
= int_tofp(1),
145 .max_policy_pct
= 100,
146 .max_sysfs_pct
= 100,
149 static inline void pid_reset(struct _pid
*pid
, int setpoint
, int busy
,
150 int deadband
, int integral
) {
151 pid
->setpoint
= setpoint
;
152 pid
->deadband
= deadband
;
153 pid
->integral
= int_tofp(integral
);
154 pid
->last_err
= int_tofp(setpoint
) - int_tofp(busy
);
157 static inline void pid_p_gain_set(struct _pid
*pid
, int percent
)
159 pid
->p_gain
= div_fp(int_tofp(percent
), int_tofp(100));
162 static inline void pid_i_gain_set(struct _pid
*pid
, int percent
)
164 pid
->i_gain
= div_fp(int_tofp(percent
), int_tofp(100));
167 static inline void pid_d_gain_set(struct _pid
*pid
, int percent
)
169 pid
->d_gain
= div_fp(int_tofp(percent
), int_tofp(100));
172 static signed int pid_calc(struct _pid
*pid
, int32_t busy
)
175 int32_t pterm
, dterm
, fp_error
;
176 int32_t integral_limit
;
178 fp_error
= int_tofp(pid
->setpoint
) - busy
;
180 if (abs(fp_error
) <= int_tofp(pid
->deadband
))
183 pterm
= mul_fp(pid
->p_gain
, fp_error
);
185 pid
->integral
+= fp_error
;
187 /* limit the integral term */
188 integral_limit
= int_tofp(30);
189 if (pid
->integral
> integral_limit
)
190 pid
->integral
= integral_limit
;
191 if (pid
->integral
< -integral_limit
)
192 pid
->integral
= -integral_limit
;
194 dterm
= mul_fp(pid
->d_gain
, fp_error
- pid
->last_err
);
195 pid
->last_err
= fp_error
;
197 result
= pterm
+ mul_fp(pid
->integral
, pid
->i_gain
) + dterm
;
198 result
= result
+ (1 << (FRAC_BITS
-1));
199 return (signed int)fp_toint(result
);
202 static inline void intel_pstate_busy_pid_reset(struct cpudata
*cpu
)
204 pid_p_gain_set(&cpu
->pid
, pid_params
.p_gain_pct
);
205 pid_d_gain_set(&cpu
->pid
, pid_params
.d_gain_pct
);
206 pid_i_gain_set(&cpu
->pid
, pid_params
.i_gain_pct
);
208 pid_reset(&cpu
->pid
, pid_params
.setpoint
, 100, pid_params
.deadband
, 0);
211 static inline void intel_pstate_reset_all_pid(void)
215 for_each_online_cpu(cpu
) {
216 if (all_cpu_data
[cpu
])
217 intel_pstate_busy_pid_reset(all_cpu_data
[cpu
]);
221 /************************** debugfs begin ************************/
222 static int pid_param_set(void *data
, u64 val
)
225 intel_pstate_reset_all_pid();
229 static int pid_param_get(void *data
, u64
*val
)
234 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param
, pid_param_get
, pid_param_set
, "%llu\n");
241 static struct pid_param pid_files
[] = {
242 {"sample_rate_ms", &pid_params
.sample_rate_ms
},
243 {"d_gain_pct", &pid_params
.d_gain_pct
},
244 {"i_gain_pct", &pid_params
.i_gain_pct
},
245 {"deadband", &pid_params
.deadband
},
246 {"setpoint", &pid_params
.setpoint
},
247 {"p_gain_pct", &pid_params
.p_gain_pct
},
251 static void __init
intel_pstate_debug_expose_params(void)
253 struct dentry
*debugfs_parent
;
256 debugfs_parent
= debugfs_create_dir("pstate_snb", NULL
);
257 if (IS_ERR_OR_NULL(debugfs_parent
))
259 while (pid_files
[i
].name
) {
260 debugfs_create_file(pid_files
[i
].name
, 0660,
261 debugfs_parent
, pid_files
[i
].value
,
267 /************************** debugfs end ************************/
269 /************************** sysfs begin ************************/
270 #define show_one(file_name, object) \
271 static ssize_t show_##file_name \
272 (struct kobject *kobj, struct attribute *attr, char *buf) \
274 return sprintf(buf, "%u\n", limits.object); \
277 static ssize_t
store_no_turbo(struct kobject
*a
, struct attribute
*b
,
278 const char *buf
, size_t count
)
283 ret
= sscanf(buf
, "%u", &input
);
286 limits
.no_turbo
= clamp_t(int, input
, 0 , 1);
287 if (limits
.turbo_disabled
) {
288 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
289 limits
.no_turbo
= limits
.turbo_disabled
;
294 static ssize_t
store_max_perf_pct(struct kobject
*a
, struct attribute
*b
,
295 const char *buf
, size_t count
)
300 ret
= sscanf(buf
, "%u", &input
);
304 limits
.max_sysfs_pct
= clamp_t(int, input
, 0 , 100);
305 limits
.max_perf_pct
= min(limits
.max_policy_pct
, limits
.max_sysfs_pct
);
306 limits
.max_perf
= div_fp(int_tofp(limits
.max_perf_pct
), int_tofp(100));
311 static ssize_t
store_min_perf_pct(struct kobject
*a
, struct attribute
*b
,
312 const char *buf
, size_t count
)
317 ret
= sscanf(buf
, "%u", &input
);
320 limits
.min_perf_pct
= clamp_t(int, input
, 0 , 100);
321 limits
.min_perf
= div_fp(int_tofp(limits
.min_perf_pct
), int_tofp(100));
326 show_one(no_turbo
, no_turbo
);
327 show_one(max_perf_pct
, max_perf_pct
);
328 show_one(min_perf_pct
, min_perf_pct
);
330 define_one_global_rw(no_turbo
);
331 define_one_global_rw(max_perf_pct
);
332 define_one_global_rw(min_perf_pct
);
334 static struct attribute
*intel_pstate_attributes
[] = {
341 static struct attribute_group intel_pstate_attr_group
= {
342 .attrs
= intel_pstate_attributes
,
345 static void __init
intel_pstate_sysfs_expose_params(void)
347 struct kobject
*intel_pstate_kobject
;
350 intel_pstate_kobject
= kobject_create_and_add("intel_pstate",
351 &cpu_subsys
.dev_root
->kobj
);
352 BUG_ON(!intel_pstate_kobject
);
353 rc
= sysfs_create_group(intel_pstate_kobject
, &intel_pstate_attr_group
);
357 /************************** sysfs end ************************/
358 static int byt_get_min_pstate(void)
362 rdmsrl(BYT_RATIOS
, value
);
363 return (value
>> 8) & 0x7F;
366 static int byt_get_max_pstate(void)
370 rdmsrl(BYT_RATIOS
, value
);
371 return (value
>> 16) & 0x7F;
374 static int byt_get_turbo_pstate(void)
378 rdmsrl(BYT_TURBO_RATIOS
, value
);
382 static void byt_set_pstate(struct cpudata
*cpudata
, int pstate
)
389 if (limits
.no_turbo
&& !limits
.turbo_disabled
)
392 vid_fp
= cpudata
->vid
.min
+ mul_fp(
393 int_tofp(pstate
- cpudata
->pstate
.min_pstate
),
396 vid_fp
= clamp_t(int32_t, vid_fp
, cpudata
->vid
.min
, cpudata
->vid
.max
);
397 vid
= fp_toint(vid_fp
);
399 if (pstate
> cpudata
->pstate
.max_pstate
)
400 vid
= cpudata
->vid
.turbo
;
404 wrmsrl(MSR_IA32_PERF_CTL
, val
);
407 static void byt_get_vid(struct cpudata
*cpudata
)
411 rdmsrl(BYT_VIDS
, value
);
412 cpudata
->vid
.min
= int_tofp((value
>> 8) & 0x7f);
413 cpudata
->vid
.max
= int_tofp((value
>> 16) & 0x7f);
414 cpudata
->vid
.ratio
= div_fp(
415 cpudata
->vid
.max
- cpudata
->vid
.min
,
416 int_tofp(cpudata
->pstate
.max_pstate
-
417 cpudata
->pstate
.min_pstate
));
419 rdmsrl(BYT_TURBO_VIDS
, value
);
420 cpudata
->vid
.turbo
= value
& 0x7f;
423 static int core_get_min_pstate(void)
427 rdmsrl(MSR_PLATFORM_INFO
, value
);
428 return (value
>> 40) & 0xFF;
431 static int core_get_max_pstate(void)
435 rdmsrl(MSR_PLATFORM_INFO
, value
);
436 return (value
>> 8) & 0xFF;
439 static int core_get_turbo_pstate(void)
444 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT
, value
);
445 nont
= core_get_max_pstate();
452 static void core_set_pstate(struct cpudata
*cpudata
, int pstate
)
457 if (limits
.no_turbo
&& !limits
.turbo_disabled
)
460 wrmsrl_on_cpu(cpudata
->cpu
, MSR_IA32_PERF_CTL
, val
);
463 static struct cpu_defaults core_params
= {
465 .sample_rate_ms
= 10,
473 .get_max
= core_get_max_pstate
,
474 .get_min
= core_get_min_pstate
,
475 .get_turbo
= core_get_turbo_pstate
,
476 .set
= core_set_pstate
,
480 static struct cpu_defaults byt_params
= {
482 .sample_rate_ms
= 10,
490 .get_max
= byt_get_max_pstate
,
491 .get_min
= byt_get_min_pstate
,
492 .get_turbo
= byt_get_turbo_pstate
,
493 .set
= byt_set_pstate
,
494 .get_vid
= byt_get_vid
,
498 static void intel_pstate_get_min_max(struct cpudata
*cpu
, int *min
, int *max
)
500 int max_perf
= cpu
->pstate
.turbo_pstate
;
505 max_perf
= cpu
->pstate
.max_pstate
;
507 max_perf_adj
= fp_toint(mul_fp(int_tofp(max_perf
), limits
.max_perf
));
508 *max
= clamp_t(int, max_perf_adj
,
509 cpu
->pstate
.min_pstate
, cpu
->pstate
.turbo_pstate
);
511 min_perf
= fp_toint(mul_fp(int_tofp(max_perf
), limits
.min_perf
));
512 *min
= clamp_t(int, min_perf
, cpu
->pstate
.min_pstate
, max_perf
);
515 static void intel_pstate_set_pstate(struct cpudata
*cpu
, int pstate
)
517 int max_perf
, min_perf
;
519 intel_pstate_get_min_max(cpu
, &min_perf
, &max_perf
);
521 pstate
= clamp_t(int, pstate
, min_perf
, max_perf
);
523 if (pstate
== cpu
->pstate
.current_pstate
)
526 trace_cpu_frequency(pstate
* 100000, cpu
->cpu
);
528 cpu
->pstate
.current_pstate
= pstate
;
530 pstate_funcs
.set(cpu
, pstate
);
533 static void intel_pstate_get_cpu_pstates(struct cpudata
*cpu
)
535 cpu
->pstate
.min_pstate
= pstate_funcs
.get_min();
536 cpu
->pstate
.max_pstate
= pstate_funcs
.get_max();
537 cpu
->pstate
.turbo_pstate
= pstate_funcs
.get_turbo();
539 if (pstate_funcs
.get_vid
)
540 pstate_funcs
.get_vid(cpu
);
541 intel_pstate_set_pstate(cpu
, cpu
->pstate
.min_pstate
);
544 static inline void intel_pstate_calc_busy(struct cpudata
*cpu
)
546 struct sample
*sample
= &cpu
->sample
;
549 core_pct
= int_tofp(sample
->aperf
) * int_tofp(100);
550 core_pct
= div64_u64(core_pct
, int_tofp(sample
->mperf
));
552 sample
->freq
= fp_toint(
553 mul_fp(int_tofp(cpu
->pstate
.max_pstate
* 1000), core_pct
));
555 sample
->core_pct_busy
= (int32_t)core_pct
;
558 static inline void intel_pstate_sample(struct cpudata
*cpu
)
563 local_irq_save(flags
);
564 rdmsrl(MSR_IA32_APERF
, aperf
);
565 rdmsrl(MSR_IA32_MPERF
, mperf
);
566 local_irq_restore(flags
);
568 cpu
->last_sample_time
= cpu
->sample
.time
;
569 cpu
->sample
.time
= ktime_get();
570 cpu
->sample
.aperf
= aperf
;
571 cpu
->sample
.mperf
= mperf
;
572 cpu
->sample
.aperf
-= cpu
->prev_aperf
;
573 cpu
->sample
.mperf
-= cpu
->prev_mperf
;
575 intel_pstate_calc_busy(cpu
);
577 cpu
->prev_aperf
= aperf
;
578 cpu
->prev_mperf
= mperf
;
581 static inline void intel_pstate_set_sample_time(struct cpudata
*cpu
)
585 delay
= msecs_to_jiffies(pid_params
.sample_rate_ms
);
586 mod_timer_pinned(&cpu
->timer
, jiffies
+ delay
);
589 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata
*cpu
)
591 int32_t core_busy
, max_pstate
, current_pstate
, sample_ratio
;
595 core_busy
= cpu
->sample
.core_pct_busy
;
596 max_pstate
= int_tofp(cpu
->pstate
.max_pstate
);
597 current_pstate
= int_tofp(cpu
->pstate
.current_pstate
);
598 core_busy
= mul_fp(core_busy
, div_fp(max_pstate
, current_pstate
));
600 sample_time
= pid_params
.sample_rate_ms
* USEC_PER_MSEC
;
601 duration_us
= (u32
) ktime_us_delta(cpu
->sample
.time
,
602 cpu
->last_sample_time
);
603 if (duration_us
> sample_time
* 3) {
604 sample_ratio
= div_fp(int_tofp(sample_time
),
605 int_tofp(duration_us
));
606 core_busy
= mul_fp(core_busy
, sample_ratio
);
612 static inline void intel_pstate_adjust_busy_pstate(struct cpudata
*cpu
)
619 busy_scaled
= intel_pstate_get_scaled_busy(cpu
);
621 ctl
= pid_calc(pid
, busy_scaled
);
623 /* Negative values of ctl increase the pstate and vice versa */
624 intel_pstate_set_pstate(cpu
, cpu
->pstate
.current_pstate
- ctl
);
627 static void intel_pstate_timer_func(unsigned long __data
)
629 struct cpudata
*cpu
= (struct cpudata
*) __data
;
630 struct sample
*sample
;
632 intel_pstate_sample(cpu
);
634 sample
= &cpu
->sample
;
636 intel_pstate_adjust_busy_pstate(cpu
);
638 trace_pstate_sample(fp_toint(sample
->core_pct_busy
),
639 fp_toint(intel_pstate_get_scaled_busy(cpu
)),
640 cpu
->pstate
.current_pstate
,
645 intel_pstate_set_sample_time(cpu
);
648 #define ICPU(model, policy) \
649 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
650 (unsigned long)&policy }
652 static const struct x86_cpu_id intel_pstate_cpu_ids
[] = {
653 ICPU(0x2a, core_params
),
654 ICPU(0x2d, core_params
),
655 ICPU(0x37, byt_params
),
656 ICPU(0x3a, core_params
),
657 ICPU(0x3c, core_params
),
658 ICPU(0x3d, core_params
),
659 ICPU(0x3e, core_params
),
660 ICPU(0x3f, core_params
),
661 ICPU(0x45, core_params
),
662 ICPU(0x46, core_params
),
663 ICPU(0x4c, byt_params
),
664 ICPU(0x4f, core_params
),
665 ICPU(0x56, core_params
),
668 MODULE_DEVICE_TABLE(x86cpu
, intel_pstate_cpu_ids
);
670 static int intel_pstate_init_cpu(unsigned int cpunum
)
674 all_cpu_data
[cpunum
] = kzalloc(sizeof(struct cpudata
), GFP_KERNEL
);
675 if (!all_cpu_data
[cpunum
])
678 cpu
= all_cpu_data
[cpunum
];
681 intel_pstate_get_cpu_pstates(cpu
);
683 init_timer_deferrable(&cpu
->timer
);
684 cpu
->timer
.function
= intel_pstate_timer_func
;
685 cpu
->timer
.data
= (unsigned long)cpu
;
686 cpu
->timer
.expires
= jiffies
+ HZ
/100;
687 intel_pstate_busy_pid_reset(cpu
);
688 intel_pstate_sample(cpu
);
690 add_timer_on(&cpu
->timer
, cpunum
);
692 pr_debug("Intel pstate controlling: cpu %d\n", cpunum
);
697 static unsigned int intel_pstate_get(unsigned int cpu_num
)
699 struct sample
*sample
;
702 cpu
= all_cpu_data
[cpu_num
];
705 sample
= &cpu
->sample
;
709 static int intel_pstate_set_policy(struct cpufreq_policy
*policy
)
711 if (!policy
->cpuinfo
.max_freq
)
714 if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
) {
715 limits
.min_perf_pct
= 100;
716 limits
.min_perf
= int_tofp(1);
717 limits
.max_perf_pct
= 100;
718 limits
.max_perf
= int_tofp(1);
719 limits
.no_turbo
= limits
.turbo_disabled
;
722 limits
.min_perf_pct
= (policy
->min
* 100) / policy
->cpuinfo
.max_freq
;
723 limits
.min_perf_pct
= clamp_t(int, limits
.min_perf_pct
, 0 , 100);
724 limits
.min_perf
= div_fp(int_tofp(limits
.min_perf_pct
), int_tofp(100));
726 limits
.max_policy_pct
= (policy
->max
* 100) / policy
->cpuinfo
.max_freq
;
727 limits
.max_policy_pct
= clamp_t(int, limits
.max_policy_pct
, 0 , 100);
728 limits
.max_perf_pct
= min(limits
.max_policy_pct
, limits
.max_sysfs_pct
);
729 limits
.max_perf
= div_fp(int_tofp(limits
.max_perf_pct
), int_tofp(100));
734 static int intel_pstate_verify_policy(struct cpufreq_policy
*policy
)
736 cpufreq_verify_within_cpu_limits(policy
);
738 if (policy
->policy
!= CPUFREQ_POLICY_POWERSAVE
&&
739 policy
->policy
!= CPUFREQ_POLICY_PERFORMANCE
)
745 static void intel_pstate_stop_cpu(struct cpufreq_policy
*policy
)
747 int cpu_num
= policy
->cpu
;
748 struct cpudata
*cpu
= all_cpu_data
[cpu_num
];
750 pr_info("intel_pstate CPU %d exiting\n", cpu_num
);
752 del_timer_sync(&all_cpu_data
[cpu_num
]->timer
);
753 intel_pstate_set_pstate(cpu
, cpu
->pstate
.min_pstate
);
754 kfree(all_cpu_data
[cpu_num
]);
755 all_cpu_data
[cpu_num
] = NULL
;
758 static int intel_pstate_cpu_init(struct cpufreq_policy
*policy
)
764 rc
= intel_pstate_init_cpu(policy
->cpu
);
768 cpu
= all_cpu_data
[policy
->cpu
];
770 rdmsrl(MSR_IA32_MISC_ENABLE
, misc_en
);
771 if (misc_en
& MSR_IA32_MISC_ENABLE_TURBO_DISABLE
||
772 cpu
->pstate
.max_pstate
== cpu
->pstate
.turbo_pstate
) {
773 limits
.turbo_disabled
= 1;
776 if (limits
.min_perf_pct
== 100 && limits
.max_perf_pct
== 100)
777 policy
->policy
= CPUFREQ_POLICY_PERFORMANCE
;
779 policy
->policy
= CPUFREQ_POLICY_POWERSAVE
;
781 policy
->min
= cpu
->pstate
.min_pstate
* 100000;
782 policy
->max
= cpu
->pstate
.turbo_pstate
* 100000;
784 /* cpuinfo and default policy values */
785 policy
->cpuinfo
.min_freq
= cpu
->pstate
.min_pstate
* 100000;
786 policy
->cpuinfo
.max_freq
= cpu
->pstate
.turbo_pstate
* 100000;
787 policy
->cpuinfo
.transition_latency
= CPUFREQ_ETERNAL
;
788 cpumask_set_cpu(policy
->cpu
, policy
->cpus
);
793 static struct cpufreq_driver intel_pstate_driver
= {
794 .flags
= CPUFREQ_CONST_LOOPS
,
795 .verify
= intel_pstate_verify_policy
,
796 .setpolicy
= intel_pstate_set_policy
,
797 .get
= intel_pstate_get
,
798 .init
= intel_pstate_cpu_init
,
799 .stop_cpu
= intel_pstate_stop_cpu
,
800 .name
= "intel_pstate",
803 static int __initdata no_load
;
805 static int intel_pstate_msrs_not_valid(void)
807 /* Check that all the msr's we are using are valid. */
808 u64 aperf
, mperf
, tmp
;
810 rdmsrl(MSR_IA32_APERF
, aperf
);
811 rdmsrl(MSR_IA32_MPERF
, mperf
);
813 if (!pstate_funcs
.get_max() ||
814 !pstate_funcs
.get_min() ||
815 !pstate_funcs
.get_turbo())
818 rdmsrl(MSR_IA32_APERF
, tmp
);
822 rdmsrl(MSR_IA32_MPERF
, tmp
);
829 static void copy_pid_params(struct pstate_adjust_policy
*policy
)
831 pid_params
.sample_rate_ms
= policy
->sample_rate_ms
;
832 pid_params
.p_gain_pct
= policy
->p_gain_pct
;
833 pid_params
.i_gain_pct
= policy
->i_gain_pct
;
834 pid_params
.d_gain_pct
= policy
->d_gain_pct
;
835 pid_params
.deadband
= policy
->deadband
;
836 pid_params
.setpoint
= policy
->setpoint
;
839 static void copy_cpu_funcs(struct pstate_funcs
*funcs
)
841 pstate_funcs
.get_max
= funcs
->get_max
;
842 pstate_funcs
.get_min
= funcs
->get_min
;
843 pstate_funcs
.get_turbo
= funcs
->get_turbo
;
844 pstate_funcs
.set
= funcs
->set
;
845 pstate_funcs
.get_vid
= funcs
->get_vid
;
848 #if IS_ENABLED(CONFIG_ACPI)
849 #include <acpi/processor.h>
851 static bool intel_pstate_no_acpi_pss(void)
855 for_each_possible_cpu(i
) {
857 union acpi_object
*pss
;
858 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
859 struct acpi_processor
*pr
= per_cpu(processors
, i
);
864 status
= acpi_evaluate_object(pr
->handle
, "_PSS", NULL
, &buffer
);
865 if (ACPI_FAILURE(status
))
868 pss
= buffer
.pointer
;
869 if (pss
&& pss
->type
== ACPI_TYPE_PACKAGE
) {
880 struct hw_vendor_info
{
882 char oem_id
[ACPI_OEM_ID_SIZE
];
883 char oem_table_id
[ACPI_OEM_TABLE_ID_SIZE
];
886 /* Hardware vendor-specific info that has its own power management modes */
887 static struct hw_vendor_info vendor_info
[] = {
888 {1, "HP ", "ProLiant"},
892 static bool intel_pstate_platform_pwr_mgmt_exists(void)
894 struct acpi_table_header hdr
;
895 struct hw_vendor_info
*v_info
;
898 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT
, 0, &hdr
)))
901 for (v_info
= vendor_info
; v_info
->valid
; v_info
++) {
902 if (!strncmp(hdr
.oem_id
, v_info
->oem_id
, ACPI_OEM_ID_SIZE
) &&
903 !strncmp(hdr
.oem_table_id
, v_info
->oem_table_id
, ACPI_OEM_TABLE_ID_SIZE
) &&
904 intel_pstate_no_acpi_pss())
910 #else /* CONFIG_ACPI not enabled */
911 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
912 #endif /* CONFIG_ACPI */
914 static int __init
intel_pstate_init(void)
917 const struct x86_cpu_id
*id
;
918 struct cpu_defaults
*cpu_info
;
923 id
= x86_match_cpu(intel_pstate_cpu_ids
);
928 * The Intel pstate driver will be ignored if the platform
929 * firmware has its own power management modes.
931 if (intel_pstate_platform_pwr_mgmt_exists())
934 cpu_info
= (struct cpu_defaults
*)id
->driver_data
;
936 copy_pid_params(&cpu_info
->pid_policy
);
937 copy_cpu_funcs(&cpu_info
->funcs
);
939 if (intel_pstate_msrs_not_valid())
942 pr_info("Intel P-state driver initializing.\n");
944 all_cpu_data
= vzalloc(sizeof(void *) * num_possible_cpus());
948 rc
= cpufreq_register_driver(&intel_pstate_driver
);
952 intel_pstate_debug_expose_params();
953 intel_pstate_sysfs_expose_params();
958 for_each_online_cpu(cpu
) {
959 if (all_cpu_data
[cpu
]) {
960 del_timer_sync(&all_cpu_data
[cpu
]->timer
);
961 kfree(all_cpu_data
[cpu
]);
969 device_initcall(intel_pstate_init
);
971 static int __init
intel_pstate_setup(char *str
)
976 if (!strcmp(str
, "disable"))
980 early_param("intel_pstate", intel_pstate_setup
);
982 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
983 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
984 MODULE_LICENSE("GPL");