cpufreq: intel_pstate: Documenation for structures
[deliverable/linux.git] / drivers / cpufreq / intel_pstate.c
CommitLineData
93f0822d 1/*
d1b68485 2 * intel_pstate.c: Native P state management for Intel processors
93f0822d
DB
3 *
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13#include <linux/kernel.h>
14#include <linux/kernel_stat.h>
15#include <linux/module.h>
16#include <linux/ktime.h>
17#include <linux/hrtimer.h>
18#include <linux/tick.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21#include <linux/list.h>
22#include <linux/cpu.h>
23#include <linux/cpufreq.h>
24#include <linux/sysfs.h>
25#include <linux/types.h>
26#include <linux/fs.h>
27#include <linux/debugfs.h>
fbbcdc07 28#include <linux/acpi.h>
d6472302 29#include <linux/vmalloc.h>
93f0822d
DB
30#include <trace/events/power.h>
31
32#include <asm/div64.h>
33#include <asm/msr.h>
34#include <asm/cpu_device_id.h>
64df1fdf 35#include <asm/cpufeature.h>
93f0822d 36
938d21a2
PL
37#define ATOM_RATIOS 0x66a
38#define ATOM_VIDS 0x66b
39#define ATOM_TURBO_RATIOS 0x66c
40#define ATOM_TURBO_VIDS 0x66d
61d8d2ab 41
f0fe3cd7 42#define FRAC_BITS 8
93f0822d
DB
43#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
44#define fp_toint(X) ((X) >> FRAC_BITS)
f0fe3cd7 45
93f0822d
DB
46static inline int32_t mul_fp(int32_t x, int32_t y)
47{
48 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
49}
50
7180dddf 51static inline int32_t div_fp(s64 x, s64 y)
93f0822d 52{
7180dddf 53 return div64_s64((int64_t)x << FRAC_BITS, y);
93f0822d
DB
54}
55
d022a65e
DB
56static inline int ceiling_fp(int32_t x)
57{
58 int mask, ret;
59
60 ret = fp_toint(x);
61 mask = (1 << FRAC_BITS) - 1;
62 if (x & mask)
63 ret += 1;
64 return ret;
65}
66
13ad7701
SP
67/**
68 * struct sample - Store performance sample
69 * @core_pct_busy: Ratio of APERF/MPERF in percent, which is actual
70 * performance during last sample period
71 * @busy_scaled: Scaled busy value which is used to calculate next
72 * P state. This can be different than core_pct_busy
73 * to account for cpu idle period
74 * @aperf: Difference of actual performance frequency clock count
75 * read from APERF MSR between last and current sample
76 * @mperf: Difference of maximum performance frequency clock count
77 * read from MPERF MSR between last and current sample
78 * @tsc: Difference of time stamp counter between last and
79 * current sample
80 * @freq: Effective frequency calculated from APERF/MPERF
81 * @time: Current time from scheduler
82 *
83 * This structure is used in the cpudata structure to store performance sample
84 * data for choosing next P State.
85 */
93f0822d 86struct sample {
d253d2a5 87 int32_t core_pct_busy;
157386b6 88 int32_t busy_scaled;
93f0822d
DB
89 u64 aperf;
90 u64 mperf;
4055fad3 91 u64 tsc;
93f0822d 92 int freq;
a4675fbc 93 u64 time;
93f0822d
DB
94};
95
13ad7701
SP
96/**
97 * struct pstate_data - Store P state data
98 * @current_pstate: Current requested P state
99 * @min_pstate: Min P state possible for this platform
100 * @max_pstate: Max P state possible for this platform
101 * @max_pstate_physical:This is physical Max P state for a processor
102 * This can be higher than the max_pstate which can
103 * be limited by platform thermal design power limits
104 * @scaling: Scaling factor to convert frequency to cpufreq
105 * frequency units
106 * @turbo_pstate: Max Turbo P state possible for this platform
107 *
108 * Stores the per cpu model P state limits and current P state.
109 */
93f0822d
DB
110struct pstate_data {
111 int current_pstate;
112 int min_pstate;
113 int max_pstate;
3bcc6fa9 114 int max_pstate_physical;
b27580b0 115 int scaling;
93f0822d
DB
116 int turbo_pstate;
117};
118
13ad7701
SP
119/**
120 * struct vid_data - Stores voltage information data
121 * @min: VID data for this platform corresponding to
122 * the lowest P state
123 * @max: VID data corresponding to the highest P State.
124 * @turbo: VID data for turbo P state
125 * @ratio: Ratio of (vid max - vid min) /
126 * (max P state - Min P State)
127 *
128 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
129 * This data is used in Atom platforms, where in addition to target P state,
130 * the voltage data needs to be specified to select next P State.
131 */
007bea09 132struct vid_data {
21855ff5
DB
133 int min;
134 int max;
135 int turbo;
007bea09
DB
136 int32_t ratio;
137};
138
13ad7701
SP
139/**
140 * struct _pid - Stores PID data
141 * @setpoint: Target set point for busyness or performance
142 * @integral: Storage for accumulated error values
143 * @p_gain: PID proportional gain
144 * @i_gain: PID integral gain
145 * @d_gain: PID derivative gain
146 * @deadband: PID deadband
147 * @last_err: Last error storage for integral part of PID calculation
148 *
149 * Stores PID coefficients and last error for PID controller.
150 */
93f0822d
DB
151struct _pid {
152 int setpoint;
153 int32_t integral;
154 int32_t p_gain;
155 int32_t i_gain;
156 int32_t d_gain;
157 int deadband;
d253d2a5 158 int32_t last_err;
93f0822d
DB
159};
160
13ad7701
SP
161/**
162 * struct cpudata - Per CPU instance data storage
163 * @cpu: CPU number for this instance data
164 * @update_util: CPUFreq utility callback information
165 * @pstate: Stores P state limits for this CPU
166 * @vid: Stores VID limits for this CPU
167 * @pid: Stores PID parameters for this CPU
168 * @last_sample_time: Last Sample time
169 * @prev_aperf: Last APERF value read from APERF MSR
170 * @prev_mperf: Last MPERF value read from MPERF MSR
171 * @prev_tsc: Last timestamp counter (TSC) value
172 * @prev_cummulative_iowait: IO Wait time difference from last and
173 * current sample
174 * @sample: Storage for storing last Sample data
175 *
176 * This structure stores per CPU instance data for all CPUs.
177 */
93f0822d
DB
178struct cpudata {
179 int cpu;
180
a4675fbc 181 struct update_util_data update_util;
93f0822d 182
93f0822d 183 struct pstate_data pstate;
007bea09 184 struct vid_data vid;
93f0822d 185 struct _pid pid;
93f0822d 186
a4675fbc 187 u64 last_sample_time;
93f0822d
DB
188 u64 prev_aperf;
189 u64 prev_mperf;
4055fad3 190 u64 prev_tsc;
63d1d656 191 u64 prev_cummulative_iowait;
d37e2b76 192 struct sample sample;
93f0822d
DB
193};
194
195static struct cpudata **all_cpu_data;
13ad7701
SP
196
197/**
198 * struct pid_adjust_policy - Stores static PID configuration data
199 * @sample_rate_ms: PID calculation sample rate in ms
200 * @sample_rate_ns: Sample rate calculation in ns
201 * @deadband: PID deadband
202 * @setpoint: PID Setpoint
203 * @p_gain_pct: PID proportional gain
204 * @i_gain_pct: PID integral gain
205 * @d_gain_pct: PID derivative gain
206 *
207 * Stores per CPU model static PID configuration data.
208 */
93f0822d
DB
209struct pstate_adjust_policy {
210 int sample_rate_ms;
a4675fbc 211 s64 sample_rate_ns;
93f0822d
DB
212 int deadband;
213 int setpoint;
214 int p_gain_pct;
215 int d_gain_pct;
216 int i_gain_pct;
217};
218
13ad7701
SP
219/**
220 * struct pstate_funcs - Per CPU model specific callbacks
221 * @get_max: Callback to get maximum non turbo effective P state
222 * @get_max_physical: Callback to get maximum non turbo physical P state
223 * @get_min: Callback to get minimum P state
224 * @get_turbo: Callback to get turbo P state
225 * @get_scaling: Callback to get frequency scaling factor
226 * @get_val: Callback to convert P state to actual MSR write value
227 * @get_vid: Callback to get VID data for Atom platforms
228 * @get_target_pstate: Callback to a function to calculate next P state to use
229 *
230 * Core and Atom CPU models have different way to get P State limits. This
231 * structure is used to store those callbacks.
232 */
016c8150
DB
233struct pstate_funcs {
234 int (*get_max)(void);
3bcc6fa9 235 int (*get_max_physical)(void);
016c8150
DB
236 int (*get_min)(void);
237 int (*get_turbo)(void);
b27580b0 238 int (*get_scaling)(void);
fdfdb2b1 239 u64 (*get_val)(struct cpudata*, int pstate);
007bea09 240 void (*get_vid)(struct cpudata *);
157386b6 241 int32_t (*get_target_pstate)(struct cpudata *);
93f0822d
DB
242};
243
13ad7701
SP
244/**
245 * struct cpu_defaults- Per CPU model default config data
246 * @pid_policy: PID config data
247 * @funcs: Callback function data
248 */
016c8150
DB
249struct cpu_defaults {
250 struct pstate_adjust_policy pid_policy;
251 struct pstate_funcs funcs;
93f0822d
DB
252};
253
157386b6 254static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu);
e70eed2b 255static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
157386b6 256
016c8150
DB
257static struct pstate_adjust_policy pid_params;
258static struct pstate_funcs pstate_funcs;
2f86dc4c 259static int hwp_active;
016c8150 260
13ad7701
SP
261
262/**
263 * struct perf_limits - Store user and policy limits
264 * @no_turbo: User requested turbo state from intel_pstate sysfs
265 * @turbo_disabled: Platform turbo status either from msr
266 * MSR_IA32_MISC_ENABLE or when maximum available pstate
267 * matches the maximum turbo pstate
268 * @max_perf_pct: Effective maximum performance limit in percentage, this
269 * is minimum of either limits enforced by cpufreq policy
270 * or limits from user set limits via intel_pstate sysfs
271 * @min_perf_pct: Effective minimum performance limit in percentage, this
272 * is maximum of either limits enforced by cpufreq policy
273 * or limits from user set limits via intel_pstate sysfs
274 * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct
275 * This value is used to limit max pstate
276 * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct
277 * This value is used to limit min pstate
278 * @max_policy_pct: The maximum performance in percentage enforced by
279 * cpufreq setpolicy interface
280 * @max_sysfs_pct: The maximum performance in percentage enforced by
281 * intel pstate sysfs interface
282 * @min_policy_pct: The minimum performance in percentage enforced by
283 * cpufreq setpolicy interface
284 * @min_sysfs_pct: The minimum performance in percentage enforced by
285 * intel pstate sysfs interface
286 *
287 * Storage for user and policy defined limits.
288 */
93f0822d
DB
289struct perf_limits {
290 int no_turbo;
dd5fbf70 291 int turbo_disabled;
93f0822d
DB
292 int max_perf_pct;
293 int min_perf_pct;
294 int32_t max_perf;
295 int32_t min_perf;
d8f469e9
DB
296 int max_policy_pct;
297 int max_sysfs_pct;
a0475992
KCA
298 int min_policy_pct;
299 int min_sysfs_pct;
93f0822d
DB
300};
301
51443fbf
PB
302static struct perf_limits performance_limits = {
303 .no_turbo = 0,
304 .turbo_disabled = 0,
305 .max_perf_pct = 100,
306 .max_perf = int_tofp(1),
307 .min_perf_pct = 100,
308 .min_perf = int_tofp(1),
309 .max_policy_pct = 100,
310 .max_sysfs_pct = 100,
311 .min_policy_pct = 0,
312 .min_sysfs_pct = 0,
313};
314
315static struct perf_limits powersave_limits = {
93f0822d 316 .no_turbo = 0,
4521e1a0 317 .turbo_disabled = 0,
93f0822d
DB
318 .max_perf_pct = 100,
319 .max_perf = int_tofp(1),
320 .min_perf_pct = 0,
321 .min_perf = 0,
d8f469e9
DB
322 .max_policy_pct = 100,
323 .max_sysfs_pct = 100,
a0475992
KCA
324 .min_policy_pct = 0,
325 .min_sysfs_pct = 0,
93f0822d
DB
326};
327
51443fbf
PB
328#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
329static struct perf_limits *limits = &performance_limits;
330#else
331static struct perf_limits *limits = &powersave_limits;
332#endif
333
93f0822d 334static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
c410833a 335 int deadband, int integral) {
b54a0dfd
PL
336 pid->setpoint = int_tofp(setpoint);
337 pid->deadband = int_tofp(deadband);
93f0822d 338 pid->integral = int_tofp(integral);
d98d099b 339 pid->last_err = int_tofp(setpoint) - int_tofp(busy);
93f0822d
DB
340}
341
342static inline void pid_p_gain_set(struct _pid *pid, int percent)
343{
344 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
345}
346
347static inline void pid_i_gain_set(struct _pid *pid, int percent)
348{
349 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
350}
351
352static inline void pid_d_gain_set(struct _pid *pid, int percent)
353{
93f0822d
DB
354 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
355}
356
d253d2a5 357static signed int pid_calc(struct _pid *pid, int32_t busy)
93f0822d 358{
d253d2a5 359 signed int result;
93f0822d
DB
360 int32_t pterm, dterm, fp_error;
361 int32_t integral_limit;
362
b54a0dfd 363 fp_error = pid->setpoint - busy;
93f0822d 364
b54a0dfd 365 if (abs(fp_error) <= pid->deadband)
93f0822d
DB
366 return 0;
367
368 pterm = mul_fp(pid->p_gain, fp_error);
369
370 pid->integral += fp_error;
371
e0d4c8f8
KCA
372 /*
373 * We limit the integral here so that it will never
374 * get higher than 30. This prevents it from becoming
375 * too large an input over long periods of time and allows
376 * it to get factored out sooner.
377 *
378 * The value of 30 was chosen through experimentation.
379 */
93f0822d
DB
380 integral_limit = int_tofp(30);
381 if (pid->integral > integral_limit)
382 pid->integral = integral_limit;
383 if (pid->integral < -integral_limit)
384 pid->integral = -integral_limit;
385
d253d2a5
BS
386 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
387 pid->last_err = fp_error;
93f0822d
DB
388
389 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
51d211e9 390 result = result + (1 << (FRAC_BITS-1));
93f0822d
DB
391 return (signed int)fp_toint(result);
392}
393
394static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
395{
016c8150
DB
396 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
397 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
398 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
93f0822d 399
2d8d1f18 400 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
93f0822d
DB
401}
402
93f0822d
DB
403static inline void intel_pstate_reset_all_pid(void)
404{
405 unsigned int cpu;
845c1cbe 406
93f0822d
DB
407 for_each_online_cpu(cpu) {
408 if (all_cpu_data[cpu])
409 intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
410 }
411}
412
4521e1a0
GM
413static inline void update_turbo_state(void)
414{
415 u64 misc_en;
416 struct cpudata *cpu;
417
418 cpu = all_cpu_data[0];
419 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
51443fbf 420 limits->turbo_disabled =
4521e1a0
GM
421 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
422 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
423}
424
41cfd64c 425static void intel_pstate_hwp_set(const struct cpumask *cpumask)
2f86dc4c 426{
74da56ce
KCA
427 int min, hw_min, max, hw_max, cpu, range, adj_range;
428 u64 value, cap;
429
430 rdmsrl(MSR_HWP_CAPABILITIES, cap);
431 hw_min = HWP_LOWEST_PERF(cap);
432 hw_max = HWP_HIGHEST_PERF(cap);
433 range = hw_max - hw_min;
2f86dc4c 434
41cfd64c 435 for_each_cpu(cpu, cpumask) {
2f86dc4c 436 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
51443fbf 437 adj_range = limits->min_perf_pct * range / 100;
74da56ce 438 min = hw_min + adj_range;
2f86dc4c
DB
439 value &= ~HWP_MIN_PERF(~0L);
440 value |= HWP_MIN_PERF(min);
441
51443fbf 442 adj_range = limits->max_perf_pct * range / 100;
74da56ce 443 max = hw_min + adj_range;
51443fbf 444 if (limits->no_turbo) {
74da56ce
KCA
445 hw_max = HWP_GUARANTEED_PERF(cap);
446 if (hw_max < max)
447 max = hw_max;
2f86dc4c
DB
448 }
449
450 value &= ~HWP_MAX_PERF(~0L);
451 value |= HWP_MAX_PERF(max);
452 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
453 }
41cfd64c 454}
2f86dc4c 455
41cfd64c
VK
456static void intel_pstate_hwp_set_online_cpus(void)
457{
458 get_online_cpus();
459 intel_pstate_hwp_set(cpu_online_mask);
2f86dc4c
DB
460 put_online_cpus();
461}
462
93f0822d
DB
463/************************** debugfs begin ************************/
464static int pid_param_set(void *data, u64 val)
465{
466 *(u32 *)data = val;
467 intel_pstate_reset_all_pid();
468 return 0;
469}
845c1cbe 470
93f0822d
DB
471static int pid_param_get(void *data, u64 *val)
472{
473 *val = *(u32 *)data;
474 return 0;
475}
2d8d1f18 476DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
93f0822d
DB
477
478struct pid_param {
479 char *name;
480 void *value;
481};
482
483static struct pid_param pid_files[] = {
016c8150
DB
484 {"sample_rate_ms", &pid_params.sample_rate_ms},
485 {"d_gain_pct", &pid_params.d_gain_pct},
486 {"i_gain_pct", &pid_params.i_gain_pct},
487 {"deadband", &pid_params.deadband},
488 {"setpoint", &pid_params.setpoint},
489 {"p_gain_pct", &pid_params.p_gain_pct},
93f0822d
DB
490 {NULL, NULL}
491};
492
317dd50e 493static void __init intel_pstate_debug_expose_params(void)
93f0822d 494{
317dd50e 495 struct dentry *debugfs_parent;
93f0822d
DB
496 int i = 0;
497
2f86dc4c
DB
498 if (hwp_active)
499 return;
93f0822d
DB
500 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
501 if (IS_ERR_OR_NULL(debugfs_parent))
502 return;
503 while (pid_files[i].name) {
504 debugfs_create_file(pid_files[i].name, 0660,
c410833a
SK
505 debugfs_parent, pid_files[i].value,
506 &fops_pid_param);
93f0822d
DB
507 i++;
508 }
509}
510
511/************************** debugfs end ************************/
512
513/************************** sysfs begin ************************/
514#define show_one(file_name, object) \
515 static ssize_t show_##file_name \
516 (struct kobject *kobj, struct attribute *attr, char *buf) \
517 { \
51443fbf 518 return sprintf(buf, "%u\n", limits->object); \
93f0822d
DB
519 }
520
d01b1f48
KCA
521static ssize_t show_turbo_pct(struct kobject *kobj,
522 struct attribute *attr, char *buf)
523{
524 struct cpudata *cpu;
525 int total, no_turbo, turbo_pct;
526 uint32_t turbo_fp;
527
528 cpu = all_cpu_data[0];
529
530 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
531 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
532 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
533 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
534 return sprintf(buf, "%u\n", turbo_pct);
535}
536
0522424e
KCA
537static ssize_t show_num_pstates(struct kobject *kobj,
538 struct attribute *attr, char *buf)
539{
540 struct cpudata *cpu;
541 int total;
542
543 cpu = all_cpu_data[0];
544 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
545 return sprintf(buf, "%u\n", total);
546}
547
4521e1a0
GM
548static ssize_t show_no_turbo(struct kobject *kobj,
549 struct attribute *attr, char *buf)
550{
551 ssize_t ret;
552
553 update_turbo_state();
51443fbf
PB
554 if (limits->turbo_disabled)
555 ret = sprintf(buf, "%u\n", limits->turbo_disabled);
4521e1a0 556 else
51443fbf 557 ret = sprintf(buf, "%u\n", limits->no_turbo);
4521e1a0
GM
558
559 return ret;
560}
561
93f0822d 562static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
c410833a 563 const char *buf, size_t count)
93f0822d
DB
564{
565 unsigned int input;
566 int ret;
845c1cbe 567
93f0822d
DB
568 ret = sscanf(buf, "%u", &input);
569 if (ret != 1)
570 return -EINVAL;
4521e1a0
GM
571
572 update_turbo_state();
51443fbf 573 if (limits->turbo_disabled) {
f16255eb 574 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
4521e1a0 575 return -EPERM;
dd5fbf70 576 }
2f86dc4c 577
51443fbf 578 limits->no_turbo = clamp_t(int, input, 0, 1);
4521e1a0 579
2f86dc4c 580 if (hwp_active)
41cfd64c 581 intel_pstate_hwp_set_online_cpus();
2f86dc4c 582
93f0822d
DB
583 return count;
584}
585
586static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
c410833a 587 const char *buf, size_t count)
93f0822d
DB
588{
589 unsigned int input;
590 int ret;
845c1cbe 591
93f0822d
DB
592 ret = sscanf(buf, "%u", &input);
593 if (ret != 1)
594 return -EINVAL;
595
51443fbf
PB
596 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
597 limits->max_perf_pct = min(limits->max_policy_pct,
598 limits->max_sysfs_pct);
599 limits->max_perf_pct = max(limits->min_policy_pct,
600 limits->max_perf_pct);
601 limits->max_perf_pct = max(limits->min_perf_pct,
602 limits->max_perf_pct);
603 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
604 int_tofp(100));
845c1cbe 605
2f86dc4c 606 if (hwp_active)
41cfd64c 607 intel_pstate_hwp_set_online_cpus();
93f0822d
DB
608 return count;
609}
610
611static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
c410833a 612 const char *buf, size_t count)
93f0822d
DB
613{
614 unsigned int input;
615 int ret;
845c1cbe 616
93f0822d
DB
617 ret = sscanf(buf, "%u", &input);
618 if (ret != 1)
619 return -EINVAL;
a0475992 620
51443fbf
PB
621 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
622 limits->min_perf_pct = max(limits->min_policy_pct,
623 limits->min_sysfs_pct);
624 limits->min_perf_pct = min(limits->max_policy_pct,
625 limits->min_perf_pct);
626 limits->min_perf_pct = min(limits->max_perf_pct,
627 limits->min_perf_pct);
628 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
629 int_tofp(100));
93f0822d 630
2f86dc4c 631 if (hwp_active)
41cfd64c 632 intel_pstate_hwp_set_online_cpus();
93f0822d
DB
633 return count;
634}
635
93f0822d
DB
636show_one(max_perf_pct, max_perf_pct);
637show_one(min_perf_pct, min_perf_pct);
638
639define_one_global_rw(no_turbo);
640define_one_global_rw(max_perf_pct);
641define_one_global_rw(min_perf_pct);
d01b1f48 642define_one_global_ro(turbo_pct);
0522424e 643define_one_global_ro(num_pstates);
93f0822d
DB
644
645static struct attribute *intel_pstate_attributes[] = {
646 &no_turbo.attr,
647 &max_perf_pct.attr,
648 &min_perf_pct.attr,
d01b1f48 649 &turbo_pct.attr,
0522424e 650 &num_pstates.attr,
93f0822d
DB
651 NULL
652};
653
654static struct attribute_group intel_pstate_attr_group = {
655 .attrs = intel_pstate_attributes,
656};
93f0822d 657
317dd50e 658static void __init intel_pstate_sysfs_expose_params(void)
93f0822d 659{
317dd50e 660 struct kobject *intel_pstate_kobject;
93f0822d
DB
661 int rc;
662
663 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
664 &cpu_subsys.dev_root->kobj);
665 BUG_ON(!intel_pstate_kobject);
2d8d1f18 666 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
93f0822d
DB
667 BUG_ON(rc);
668}
93f0822d 669/************************** sysfs end ************************/
2f86dc4c 670
ba88d433 671static void intel_pstate_hwp_enable(struct cpudata *cpudata)
2f86dc4c 672{
f05c9665
SP
673 /* First disable HWP notification interrupt as we don't process them */
674 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
675
ba88d433 676 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
2f86dc4c
DB
677}
678
938d21a2 679static int atom_get_min_pstate(void)
19e77c28
DB
680{
681 u64 value;
845c1cbe 682
938d21a2 683 rdmsrl(ATOM_RATIOS, value);
c16ed060 684 return (value >> 8) & 0x7F;
19e77c28
DB
685}
686
938d21a2 687static int atom_get_max_pstate(void)
19e77c28
DB
688{
689 u64 value;
845c1cbe 690
938d21a2 691 rdmsrl(ATOM_RATIOS, value);
c16ed060 692 return (value >> 16) & 0x7F;
19e77c28 693}
93f0822d 694
938d21a2 695static int atom_get_turbo_pstate(void)
61d8d2ab
DB
696{
697 u64 value;
845c1cbe 698
938d21a2 699 rdmsrl(ATOM_TURBO_RATIOS, value);
c16ed060 700 return value & 0x7F;
61d8d2ab
DB
701}
702
fdfdb2b1 703static u64 atom_get_val(struct cpudata *cpudata, int pstate)
007bea09
DB
704{
705 u64 val;
706 int32_t vid_fp;
707 u32 vid;
708
144c8e17 709 val = (u64)pstate << 8;
51443fbf 710 if (limits->no_turbo && !limits->turbo_disabled)
007bea09
DB
711 val |= (u64)1 << 32;
712
713 vid_fp = cpudata->vid.min + mul_fp(
714 int_tofp(pstate - cpudata->pstate.min_pstate),
715 cpudata->vid.ratio);
716
717 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
d022a65e 718 vid = ceiling_fp(vid_fp);
007bea09 719
21855ff5
DB
720 if (pstate > cpudata->pstate.max_pstate)
721 vid = cpudata->vid.turbo;
722
fdfdb2b1 723 return val | vid;
007bea09
DB
724}
725
1421df63 726static int silvermont_get_scaling(void)
b27580b0
DB
727{
728 u64 value;
729 int i;
1421df63
PL
730 /* Defined in Table 35-6 from SDM (Sept 2015) */
731 static int silvermont_freq_table[] = {
732 83300, 100000, 133300, 116700, 80000};
b27580b0
DB
733
734 rdmsrl(MSR_FSB_FREQ, value);
1421df63
PL
735 i = value & 0x7;
736 WARN_ON(i > 4);
b27580b0 737
1421df63
PL
738 return silvermont_freq_table[i];
739}
b27580b0 740
1421df63
PL
741static int airmont_get_scaling(void)
742{
743 u64 value;
744 int i;
745 /* Defined in Table 35-10 from SDM (Sept 2015) */
746 static int airmont_freq_table[] = {
747 83300, 100000, 133300, 116700, 80000,
748 93300, 90000, 88900, 87500};
749
750 rdmsrl(MSR_FSB_FREQ, value);
751 i = value & 0xF;
752 WARN_ON(i > 8);
753
754 return airmont_freq_table[i];
b27580b0
DB
755}
756
938d21a2 757static void atom_get_vid(struct cpudata *cpudata)
007bea09
DB
758{
759 u64 value;
760
938d21a2 761 rdmsrl(ATOM_VIDS, value);
c16ed060
DB
762 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
763 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
007bea09
DB
764 cpudata->vid.ratio = div_fp(
765 cpudata->vid.max - cpudata->vid.min,
766 int_tofp(cpudata->pstate.max_pstate -
767 cpudata->pstate.min_pstate));
21855ff5 768
938d21a2 769 rdmsrl(ATOM_TURBO_VIDS, value);
21855ff5 770 cpudata->vid.turbo = value & 0x7f;
007bea09
DB
771}
772
016c8150 773static int core_get_min_pstate(void)
93f0822d
DB
774{
775 u64 value;
845c1cbe 776
05e99c8c 777 rdmsrl(MSR_PLATFORM_INFO, value);
93f0822d
DB
778 return (value >> 40) & 0xFF;
779}
780
3bcc6fa9 781static int core_get_max_pstate_physical(void)
93f0822d
DB
782{
783 u64 value;
845c1cbe 784
05e99c8c 785 rdmsrl(MSR_PLATFORM_INFO, value);
93f0822d
DB
786 return (value >> 8) & 0xFF;
787}
788
016c8150 789static int core_get_max_pstate(void)
93f0822d 790{
6a35fc2d
SP
791 u64 tar;
792 u64 plat_info;
793 int max_pstate;
794 int err;
795
796 rdmsrl(MSR_PLATFORM_INFO, plat_info);
797 max_pstate = (plat_info >> 8) & 0xFF;
798
799 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
800 if (!err) {
801 /* Do some sanity checking for safety */
802 if (plat_info & 0x600000000) {
803 u64 tdp_ctrl;
804 u64 tdp_ratio;
805 int tdp_msr;
806
807 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
808 if (err)
809 goto skip_tar;
810
811 tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
812 err = rdmsrl_safe(tdp_msr, &tdp_ratio);
813 if (err)
814 goto skip_tar;
815
816 if (tdp_ratio - 1 == tar) {
817 max_pstate = tar;
818 pr_debug("max_pstate=TAC %x\n", max_pstate);
819 } else {
820 goto skip_tar;
821 }
822 }
823 }
845c1cbe 824
6a35fc2d
SP
825skip_tar:
826 return max_pstate;
93f0822d
DB
827}
828
016c8150 829static int core_get_turbo_pstate(void)
93f0822d
DB
830{
831 u64 value;
832 int nont, ret;
845c1cbe 833
05e99c8c 834 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
016c8150 835 nont = core_get_max_pstate();
285cb990 836 ret = (value) & 255;
93f0822d
DB
837 if (ret <= nont)
838 ret = nont;
839 return ret;
840}
841
b27580b0
DB
842static inline int core_get_scaling(void)
843{
844 return 100000;
845}
846
fdfdb2b1 847static u64 core_get_val(struct cpudata *cpudata, int pstate)
016c8150
DB
848{
849 u64 val;
850
144c8e17 851 val = (u64)pstate << 8;
51443fbf 852 if (limits->no_turbo && !limits->turbo_disabled)
016c8150
DB
853 val |= (u64)1 << 32;
854
fdfdb2b1 855 return val;
016c8150
DB
856}
857
b34ef932
DC
858static int knl_get_turbo_pstate(void)
859{
860 u64 value;
861 int nont, ret;
862
863 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
864 nont = core_get_max_pstate();
865 ret = (((value) >> 8) & 0xFF);
866 if (ret <= nont)
867 ret = nont;
868 return ret;
869}
870
016c8150
DB
871static struct cpu_defaults core_params = {
872 .pid_policy = {
873 .sample_rate_ms = 10,
874 .deadband = 0,
875 .setpoint = 97,
876 .p_gain_pct = 20,
877 .d_gain_pct = 0,
878 .i_gain_pct = 0,
879 },
880 .funcs = {
881 .get_max = core_get_max_pstate,
3bcc6fa9 882 .get_max_physical = core_get_max_pstate_physical,
016c8150
DB
883 .get_min = core_get_min_pstate,
884 .get_turbo = core_get_turbo_pstate,
b27580b0 885 .get_scaling = core_get_scaling,
fdfdb2b1 886 .get_val = core_get_val,
157386b6 887 .get_target_pstate = get_target_pstate_use_performance,
016c8150
DB
888 },
889};
890
1421df63
PL
891static struct cpu_defaults silvermont_params = {
892 .pid_policy = {
893 .sample_rate_ms = 10,
894 .deadband = 0,
895 .setpoint = 60,
896 .p_gain_pct = 14,
897 .d_gain_pct = 0,
898 .i_gain_pct = 4,
899 },
900 .funcs = {
901 .get_max = atom_get_max_pstate,
902 .get_max_physical = atom_get_max_pstate,
903 .get_min = atom_get_min_pstate,
904 .get_turbo = atom_get_turbo_pstate,
fdfdb2b1 905 .get_val = atom_get_val,
1421df63
PL
906 .get_scaling = silvermont_get_scaling,
907 .get_vid = atom_get_vid,
e70eed2b 908 .get_target_pstate = get_target_pstate_use_cpu_load,
1421df63
PL
909 },
910};
911
912static struct cpu_defaults airmont_params = {
19e77c28
DB
913 .pid_policy = {
914 .sample_rate_ms = 10,
915 .deadband = 0,
6a82ba6d 916 .setpoint = 60,
19e77c28
DB
917 .p_gain_pct = 14,
918 .d_gain_pct = 0,
919 .i_gain_pct = 4,
920 },
921 .funcs = {
938d21a2
PL
922 .get_max = atom_get_max_pstate,
923 .get_max_physical = atom_get_max_pstate,
924 .get_min = atom_get_min_pstate,
925 .get_turbo = atom_get_turbo_pstate,
fdfdb2b1 926 .get_val = atom_get_val,
1421df63 927 .get_scaling = airmont_get_scaling,
938d21a2 928 .get_vid = atom_get_vid,
e70eed2b 929 .get_target_pstate = get_target_pstate_use_cpu_load,
19e77c28
DB
930 },
931};
932
b34ef932
DC
933static struct cpu_defaults knl_params = {
934 .pid_policy = {
935 .sample_rate_ms = 10,
936 .deadband = 0,
937 .setpoint = 97,
938 .p_gain_pct = 20,
939 .d_gain_pct = 0,
940 .i_gain_pct = 0,
941 },
942 .funcs = {
943 .get_max = core_get_max_pstate,
3bcc6fa9 944 .get_max_physical = core_get_max_pstate_physical,
b34ef932
DC
945 .get_min = core_get_min_pstate,
946 .get_turbo = knl_get_turbo_pstate,
69cefc27 947 .get_scaling = core_get_scaling,
fdfdb2b1 948 .get_val = core_get_val,
157386b6 949 .get_target_pstate = get_target_pstate_use_performance,
b34ef932
DC
950 },
951};
952
93f0822d
DB
953static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
954{
955 int max_perf = cpu->pstate.turbo_pstate;
7244cb62 956 int max_perf_adj;
93f0822d 957 int min_perf;
845c1cbe 958
51443fbf 959 if (limits->no_turbo || limits->turbo_disabled)
93f0822d
DB
960 max_perf = cpu->pstate.max_pstate;
961
e0d4c8f8
KCA
962 /*
963 * performance can be limited by user through sysfs, by cpufreq
964 * policy, or by cpu specific default values determined through
965 * experimentation.
966 */
a158bed5 967 max_perf_adj = fp_toint(max_perf * limits->max_perf);
799281a3
RW
968 *max = clamp_t(int, max_perf_adj,
969 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
93f0822d 970
a158bed5 971 min_perf = fp_toint(max_perf * limits->min_perf);
799281a3 972 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
93f0822d
DB
973}
974
fdfdb2b1 975static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate)
93f0822d 976{
b27580b0 977 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
93f0822d 978 cpu->pstate.current_pstate = pstate;
fdfdb2b1 979}
93f0822d 980
fdfdb2b1
RW
981static void intel_pstate_set_min_pstate(struct cpudata *cpu)
982{
983 int pstate = cpu->pstate.min_pstate;
984
985 intel_pstate_record_pstate(cpu, pstate);
986 /*
987 * Generally, there is no guarantee that this code will always run on
988 * the CPU being updated, so force the register update to run on the
989 * right CPU.
990 */
991 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
992 pstate_funcs.get_val(cpu, pstate));
93f0822d
DB
993}
994
93f0822d
DB
995static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
996{
016c8150
DB
997 cpu->pstate.min_pstate = pstate_funcs.get_min();
998 cpu->pstate.max_pstate = pstate_funcs.get_max();
3bcc6fa9 999 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
016c8150 1000 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
b27580b0 1001 cpu->pstate.scaling = pstate_funcs.get_scaling();
93f0822d 1002
007bea09
DB
1003 if (pstate_funcs.get_vid)
1004 pstate_funcs.get_vid(cpu);
fdfdb2b1
RW
1005
1006 intel_pstate_set_min_pstate(cpu);
93f0822d
DB
1007}
1008
6b17ddb2 1009static inline void intel_pstate_calc_busy(struct cpudata *cpu)
93f0822d 1010{
6b17ddb2 1011 struct sample *sample = &cpu->sample;
bf810222 1012 int64_t core_pct;
93f0822d 1013
bf810222 1014 core_pct = int_tofp(sample->aperf) * int_tofp(100);
78e27086 1015 core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
e66c1768 1016
bf810222 1017 sample->core_pct_busy = (int32_t)core_pct;
93f0822d
DB
1018}
1019
4fec7ad5 1020static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
93f0822d 1021{
93f0822d 1022 u64 aperf, mperf;
4ab60c3f 1023 unsigned long flags;
4055fad3 1024 u64 tsc;
93f0822d 1025
4ab60c3f 1026 local_irq_save(flags);
93f0822d
DB
1027 rdmsrl(MSR_IA32_APERF, aperf);
1028 rdmsrl(MSR_IA32_MPERF, mperf);
e70eed2b 1029 tsc = rdtsc();
4fec7ad5 1030 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
8e601a9f 1031 local_irq_restore(flags);
4fec7ad5 1032 return false;
8e601a9f 1033 }
4ab60c3f 1034 local_irq_restore(flags);
b69880f9 1035
c4ee841f 1036 cpu->last_sample_time = cpu->sample.time;
a4675fbc 1037 cpu->sample.time = time;
d37e2b76
DB
1038 cpu->sample.aperf = aperf;
1039 cpu->sample.mperf = mperf;
4055fad3 1040 cpu->sample.tsc = tsc;
d37e2b76
DB
1041 cpu->sample.aperf -= cpu->prev_aperf;
1042 cpu->sample.mperf -= cpu->prev_mperf;
4055fad3 1043 cpu->sample.tsc -= cpu->prev_tsc;
1abc4b20 1044
93f0822d
DB
1045 cpu->prev_aperf = aperf;
1046 cpu->prev_mperf = mperf;
4055fad3 1047 cpu->prev_tsc = tsc;
febce40f
RW
1048 /*
1049 * First time this function is invoked in a given cycle, all of the
1050 * previous sample data fields are equal to zero or stale and they must
1051 * be populated with meaningful numbers for things to work, so assume
1052 * that sample.time will always be reset before setting the utilization
1053 * update hook and make the caller skip the sample then.
1054 */
1055 return !!cpu->last_sample_time;
93f0822d
DB
1056}
1057
8fa520af
PL
1058static inline int32_t get_avg_frequency(struct cpudata *cpu)
1059{
1060 return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf *
1061 cpu->pstate.scaling, cpu->sample.mperf);
1062}
1063
e70eed2b
PL
1064static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
1065{
1066 struct sample *sample = &cpu->sample;
63d1d656
PL
1067 u64 cummulative_iowait, delta_iowait_us;
1068 u64 delta_iowait_mperf;
1069 u64 mperf, now;
e70eed2b
PL
1070 int32_t cpu_load;
1071
63d1d656
PL
1072 cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now);
1073
1074 /*
1075 * Convert iowait time into number of IO cycles spent at max_freq.
1076 * IO is considered as busy only for the cpu_load algorithm. For
1077 * performance this is not needed since we always try to reach the
1078 * maximum P-State, so we are already boosting the IOs.
1079 */
1080 delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait;
1081 delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling *
1082 cpu->pstate.max_pstate, MSEC_PER_SEC);
1083
1084 mperf = cpu->sample.mperf + delta_iowait_mperf;
1085 cpu->prev_cummulative_iowait = cummulative_iowait;
1086
e70eed2b
PL
1087 /*
1088 * The load can be estimated as the ratio of the mperf counter
1089 * running at a constant frequency during active periods
1090 * (C0) and the time stamp counter running at the same frequency
1091 * also during C-states.
1092 */
63d1d656 1093 cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc);
e70eed2b
PL
1094 cpu->sample.busy_scaled = cpu_load;
1095
1096 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, cpu_load);
1097}
1098
157386b6 1099static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
93f0822d 1100{
c4ee841f 1101 int32_t core_busy, max_pstate, current_pstate, sample_ratio;
a4675fbc 1102 u64 duration_ns;
93f0822d 1103
7349ec04
PL
1104 intel_pstate_calc_busy(cpu);
1105
e0d4c8f8
KCA
1106 /*
1107 * core_busy is the ratio of actual performance to max
1108 * max_pstate is the max non turbo pstate available
1109 * current_pstate was the pstate that was requested during
1110 * the last sample period.
1111 *
1112 * We normalize core_busy, which was our actual percent
1113 * performance to what we requested during the last sample
1114 * period. The result will be a percentage of busy at a
1115 * specified pstate.
1116 */
d37e2b76 1117 core_busy = cpu->sample.core_pct_busy;
3bcc6fa9 1118 max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
93f0822d 1119 current_pstate = int_tofp(cpu->pstate.current_pstate);
e66c1768 1120 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
c4ee841f 1121
e0d4c8f8 1122 /*
a4675fbc
RW
1123 * Since our utilization update callback will not run unless we are
1124 * in C0, check if the actual elapsed time is significantly greater (3x)
1125 * than our sample interval. If it is, then we were idle for a long
1126 * enough period of time to adjust our busyness.
e0d4c8f8 1127 */
a4675fbc 1128 duration_ns = cpu->sample.time - cpu->last_sample_time;
febce40f 1129 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
a4675fbc
RW
1130 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
1131 int_tofp(duration_ns));
c4ee841f
DB
1132 core_busy = mul_fp(core_busy, sample_ratio);
1133 }
1134
157386b6
PL
1135 cpu->sample.busy_scaled = core_busy;
1136 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy);
93f0822d
DB
1137}
1138
fdfdb2b1
RW
1139static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
1140{
1141 int max_perf, min_perf;
1142
1143 update_turbo_state();
1144
1145 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
1146 pstate = clamp_t(int, pstate, min_perf, max_perf);
1147 if (pstate == cpu->pstate.current_pstate)
1148 return;
1149
1150 intel_pstate_record_pstate(cpu, pstate);
1151 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
1152}
1153
93f0822d
DB
1154static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
1155{
157386b6 1156 int from, target_pstate;
4055fad3
DS
1157 struct sample *sample;
1158
1159 from = cpu->pstate.current_pstate;
93f0822d 1160
157386b6 1161 target_pstate = pstate_funcs.get_target_pstate(cpu);
93f0822d 1162
fdfdb2b1 1163 intel_pstate_update_pstate(cpu, target_pstate);
4055fad3
DS
1164
1165 sample = &cpu->sample;
1166 trace_pstate_sample(fp_toint(sample->core_pct_busy),
157386b6 1167 fp_toint(sample->busy_scaled),
4055fad3
DS
1168 from,
1169 cpu->pstate.current_pstate,
1170 sample->mperf,
1171 sample->aperf,
1172 sample->tsc,
8fa520af 1173 get_avg_frequency(cpu));
93f0822d
DB
1174}
1175
a4675fbc
RW
1176static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1177 unsigned long util, unsigned long max)
93f0822d 1178{
a4675fbc
RW
1179 struct cpudata *cpu = container_of(data, struct cpudata, update_util);
1180 u64 delta_ns = time - cpu->sample.time;
b69880f9 1181
a4675fbc 1182 if ((s64)delta_ns >= pid_params.sample_rate_ns) {
4fec7ad5
RW
1183 bool sample_taken = intel_pstate_sample(cpu, time);
1184
1185 if (sample_taken && !hwp_active)
a4675fbc
RW
1186 intel_pstate_adjust_busy_pstate(cpu);
1187 }
93f0822d
DB
1188}
1189
1190#define ICPU(model, policy) \
6cbd7ee1
DB
1191 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
1192 (unsigned long)&policy }
93f0822d
DB
1193
1194static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
016c8150
DB
1195 ICPU(0x2a, core_params),
1196 ICPU(0x2d, core_params),
1421df63 1197 ICPU(0x37, silvermont_params),
016c8150
DB
1198 ICPU(0x3a, core_params),
1199 ICPU(0x3c, core_params),
c7e241df 1200 ICPU(0x3d, core_params),
016c8150
DB
1201 ICPU(0x3e, core_params),
1202 ICPU(0x3f, core_params),
1203 ICPU(0x45, core_params),
1204 ICPU(0x46, core_params),
43f8a966 1205 ICPU(0x47, core_params),
1421df63 1206 ICPU(0x4c, airmont_params),
7ab0256e 1207 ICPU(0x4e, core_params),
c7e241df 1208 ICPU(0x4f, core_params),
1c939123 1209 ICPU(0x5e, core_params),
c7e241df 1210 ICPU(0x56, core_params),
b34ef932 1211 ICPU(0x57, knl_params),
93f0822d
DB
1212 {}
1213};
1214MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
1215
2f86dc4c
DB
1216static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
1217 ICPU(0x56, core_params),
1218 {}
1219};
1220
93f0822d
DB
1221static int intel_pstate_init_cpu(unsigned int cpunum)
1222{
93f0822d
DB
1223 struct cpudata *cpu;
1224
c0348717
DB
1225 if (!all_cpu_data[cpunum])
1226 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
1227 GFP_KERNEL);
93f0822d
DB
1228 if (!all_cpu_data[cpunum])
1229 return -ENOMEM;
1230
1231 cpu = all_cpu_data[cpunum];
1232
93f0822d 1233 cpu->cpu = cpunum;
ba88d433 1234
a4675fbc 1235 if (hwp_active) {
ba88d433 1236 intel_pstate_hwp_enable(cpu);
a4675fbc
RW
1237 pid_params.sample_rate_ms = 50;
1238 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
1239 }
ba88d433 1240
179e8471 1241 intel_pstate_get_cpu_pstates(cpu);
016c8150 1242
93f0822d 1243 intel_pstate_busy_pid_reset(cpu);
93f0822d 1244
a4675fbc 1245 cpu->update_util.func = intel_pstate_update_util;
93f0822d 1246
f16255eb 1247 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
93f0822d
DB
1248
1249 return 0;
1250}
1251
1252static unsigned int intel_pstate_get(unsigned int cpu_num)
1253{
1254 struct sample *sample;
1255 struct cpudata *cpu;
1256
1257 cpu = all_cpu_data[cpu_num];
1258 if (!cpu)
1259 return 0;
d37e2b76 1260 sample = &cpu->sample;
8fa520af 1261 return get_avg_frequency(cpu);
93f0822d
DB
1262}
1263
febce40f 1264static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
bb6ab52f 1265{
febce40f
RW
1266 struct cpudata *cpu = all_cpu_data[cpu_num];
1267
1268 /* Prevent intel_pstate_update_util() from using stale data. */
1269 cpu->sample.time = 0;
1270 cpufreq_set_update_util_data(cpu_num, &cpu->update_util);
bb6ab52f
RW
1271}
1272
1273static void intel_pstate_clear_update_util_hook(unsigned int cpu)
1274{
1275 cpufreq_set_update_util_data(cpu, NULL);
1276 synchronize_sched();
1277}
1278
30a39153
SP
1279static void intel_pstate_set_performance_limits(struct perf_limits *limits)
1280{
1281 limits->no_turbo = 0;
1282 limits->turbo_disabled = 0;
1283 limits->max_perf_pct = 100;
1284 limits->max_perf = int_tofp(1);
1285 limits->min_perf_pct = 100;
1286 limits->min_perf = int_tofp(1);
1287 limits->max_policy_pct = 100;
1288 limits->max_sysfs_pct = 100;
1289 limits->min_policy_pct = 0;
1290 limits->min_sysfs_pct = 0;
1291}
1292
93f0822d
DB
1293static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1294{
d3929b83
DB
1295 if (!policy->cpuinfo.max_freq)
1296 return -ENODEV;
1297
bb6ab52f
RW
1298 intel_pstate_clear_update_util_hook(policy->cpu);
1299
30a39153 1300 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
51443fbf 1301 limits = &performance_limits;
30a39153
SP
1302 if (policy->max >= policy->cpuinfo.max_freq) {
1303 pr_debug("intel_pstate: set performance\n");
1304 intel_pstate_set_performance_limits(limits);
1305 goto out;
1306 }
1307 } else {
1308 pr_debug("intel_pstate: set powersave\n");
1309 limits = &powersave_limits;
93f0822d 1310 }
2f86dc4c 1311
51443fbf
PB
1312 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
1313 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
8478f539
PB
1314 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
1315 policy->cpuinfo.max_freq);
51443fbf 1316 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100);
43717aad
CY
1317
1318 /* Normalize user input to [min_policy_pct, max_policy_pct] */
51443fbf
PB
1319 limits->min_perf_pct = max(limits->min_policy_pct,
1320 limits->min_sysfs_pct);
1321 limits->min_perf_pct = min(limits->max_policy_pct,
1322 limits->min_perf_pct);
1323 limits->max_perf_pct = min(limits->max_policy_pct,
1324 limits->max_sysfs_pct);
1325 limits->max_perf_pct = max(limits->min_policy_pct,
1326 limits->max_perf_pct);
88b7b7c0 1327 limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
43717aad
CY
1328
1329 /* Make sure min_perf_pct <= max_perf_pct */
51443fbf 1330 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
43717aad 1331
51443fbf
PB
1332 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
1333 int_tofp(100));
1334 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1335 int_tofp(100));
93f0822d 1336
bb6ab52f
RW
1337 out:
1338 intel_pstate_set_update_util_hook(policy->cpu);
1339
2f86dc4c 1340 if (hwp_active)
41cfd64c 1341 intel_pstate_hwp_set(policy->cpus);
2f86dc4c 1342
93f0822d
DB
1343 return 0;
1344}
1345
1346static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
1347{
be49e346 1348 cpufreq_verify_within_cpu_limits(policy);
93f0822d 1349
285cb990 1350 if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
c410833a 1351 policy->policy != CPUFREQ_POLICY_PERFORMANCE)
93f0822d
DB
1352 return -EINVAL;
1353
1354 return 0;
1355}
1356
bb18008f 1357static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
93f0822d 1358{
bb18008f
DB
1359 int cpu_num = policy->cpu;
1360 struct cpudata *cpu = all_cpu_data[cpu_num];
93f0822d 1361
f16255eb 1362 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
bb18008f 1363
bb6ab52f 1364 intel_pstate_clear_update_util_hook(cpu_num);
a4675fbc 1365
2f86dc4c
DB
1366 if (hwp_active)
1367 return;
1368
fdfdb2b1 1369 intel_pstate_set_min_pstate(cpu);
93f0822d
DB
1370}
1371
2760984f 1372static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
93f0822d 1373{
93f0822d 1374 struct cpudata *cpu;
52e0a509 1375 int rc;
93f0822d
DB
1376
1377 rc = intel_pstate_init_cpu(policy->cpu);
1378 if (rc)
1379 return rc;
1380
1381 cpu = all_cpu_data[policy->cpu];
1382
51443fbf 1383 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
93f0822d
DB
1384 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
1385 else
1386 policy->policy = CPUFREQ_POLICY_POWERSAVE;
1387
b27580b0
DB
1388 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
1389 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
93f0822d
DB
1390
1391 /* cpuinfo and default policy values */
b27580b0
DB
1392 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
1393 policy->cpuinfo.max_freq =
1394 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
93f0822d
DB
1395 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
1396 cpumask_set_cpu(policy->cpu, policy->cpus);
1397
1398 return 0;
1399}
1400
1401static struct cpufreq_driver intel_pstate_driver = {
1402 .flags = CPUFREQ_CONST_LOOPS,
1403 .verify = intel_pstate_verify_policy,
1404 .setpolicy = intel_pstate_set_policy,
1405 .get = intel_pstate_get,
1406 .init = intel_pstate_cpu_init,
bb18008f 1407 .stop_cpu = intel_pstate_stop_cpu,
93f0822d 1408 .name = "intel_pstate",
93f0822d
DB
1409};
1410
6be26498 1411static int __initdata no_load;
2f86dc4c 1412static int __initdata no_hwp;
d64c3b0b 1413static int __initdata hwp_only;
aa4ea34d 1414static unsigned int force_load;
6be26498 1415
b563b4e3
DB
1416static int intel_pstate_msrs_not_valid(void)
1417{
016c8150 1418 if (!pstate_funcs.get_max() ||
c410833a
SK
1419 !pstate_funcs.get_min() ||
1420 !pstate_funcs.get_turbo())
b563b4e3
DB
1421 return -ENODEV;
1422
b563b4e3
DB
1423 return 0;
1424}
016c8150 1425
e0a261a2 1426static void copy_pid_params(struct pstate_adjust_policy *policy)
016c8150
DB
1427{
1428 pid_params.sample_rate_ms = policy->sample_rate_ms;
a4675fbc 1429 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
016c8150
DB
1430 pid_params.p_gain_pct = policy->p_gain_pct;
1431 pid_params.i_gain_pct = policy->i_gain_pct;
1432 pid_params.d_gain_pct = policy->d_gain_pct;
1433 pid_params.deadband = policy->deadband;
1434 pid_params.setpoint = policy->setpoint;
1435}
1436
e0a261a2 1437static void copy_cpu_funcs(struct pstate_funcs *funcs)
016c8150
DB
1438{
1439 pstate_funcs.get_max = funcs->get_max;
3bcc6fa9 1440 pstate_funcs.get_max_physical = funcs->get_max_physical;
016c8150
DB
1441 pstate_funcs.get_min = funcs->get_min;
1442 pstate_funcs.get_turbo = funcs->get_turbo;
b27580b0 1443 pstate_funcs.get_scaling = funcs->get_scaling;
fdfdb2b1 1444 pstate_funcs.get_val = funcs->get_val;
007bea09 1445 pstate_funcs.get_vid = funcs->get_vid;
157386b6
PL
1446 pstate_funcs.get_target_pstate = funcs->get_target_pstate;
1447
016c8150
DB
1448}
1449
fbbcdc07 1450#if IS_ENABLED(CONFIG_ACPI)
6ee11e41 1451#include <acpi/processor.h>
fbbcdc07
AH
1452
1453static bool intel_pstate_no_acpi_pss(void)
1454{
1455 int i;
1456
1457 for_each_possible_cpu(i) {
1458 acpi_status status;
1459 union acpi_object *pss;
1460 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1461 struct acpi_processor *pr = per_cpu(processors, i);
1462
1463 if (!pr)
1464 continue;
1465
1466 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
1467 if (ACPI_FAILURE(status))
1468 continue;
1469
1470 pss = buffer.pointer;
1471 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
1472 kfree(pss);
1473 return false;
1474 }
1475
1476 kfree(pss);
1477 }
1478
1479 return true;
1480}
1481
966916ea 1482static bool intel_pstate_has_acpi_ppc(void)
1483{
1484 int i;
1485
1486 for_each_possible_cpu(i) {
1487 struct acpi_processor *pr = per_cpu(processors, i);
1488
1489 if (!pr)
1490 continue;
1491 if (acpi_has_method(pr->handle, "_PPC"))
1492 return true;
1493 }
1494 return false;
1495}
1496
1497enum {
1498 PSS,
1499 PPC,
1500};
1501
fbbcdc07
AH
1502struct hw_vendor_info {
1503 u16 valid;
1504 char oem_id[ACPI_OEM_ID_SIZE];
1505 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
966916ea 1506 int oem_pwr_table;
fbbcdc07
AH
1507};
1508
1509/* Hardware vendor-specific info that has its own power management modes */
1510static struct hw_vendor_info vendor_info[] = {
966916ea 1511 {1, "HP ", "ProLiant", PSS},
1512 {1, "ORACLE", "X4-2 ", PPC},
1513 {1, "ORACLE", "X4-2L ", PPC},
1514 {1, "ORACLE", "X4-2B ", PPC},
1515 {1, "ORACLE", "X3-2 ", PPC},
1516 {1, "ORACLE", "X3-2L ", PPC},
1517 {1, "ORACLE", "X3-2B ", PPC},
1518 {1, "ORACLE", "X4470M2 ", PPC},
1519 {1, "ORACLE", "X4270M3 ", PPC},
1520 {1, "ORACLE", "X4270M2 ", PPC},
1521 {1, "ORACLE", "X4170M2 ", PPC},
5aecc3c8
EZ
1522 {1, "ORACLE", "X4170 M3", PPC},
1523 {1, "ORACLE", "X4275 M3", PPC},
1524 {1, "ORACLE", "X6-2 ", PPC},
1525 {1, "ORACLE", "Sudbury ", PPC},
fbbcdc07
AH
1526 {0, "", ""},
1527};
1528
1529static bool intel_pstate_platform_pwr_mgmt_exists(void)
1530{
1531 struct acpi_table_header hdr;
1532 struct hw_vendor_info *v_info;
2f86dc4c
DB
1533 const struct x86_cpu_id *id;
1534 u64 misc_pwr;
1535
1536 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
1537 if (id) {
1538 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
1539 if ( misc_pwr & (1 << 8))
1540 return true;
1541 }
fbbcdc07 1542
c410833a
SK
1543 if (acpi_disabled ||
1544 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
fbbcdc07
AH
1545 return false;
1546
1547 for (v_info = vendor_info; v_info->valid; v_info++) {
c410833a 1548 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
966916ea 1549 !strncmp(hdr.oem_table_id, v_info->oem_table_id,
1550 ACPI_OEM_TABLE_ID_SIZE))
1551 switch (v_info->oem_pwr_table) {
1552 case PSS:
1553 return intel_pstate_no_acpi_pss();
1554 case PPC:
aa4ea34d
EZ
1555 return intel_pstate_has_acpi_ppc() &&
1556 (!force_load);
966916ea 1557 }
fbbcdc07
AH
1558 }
1559
1560 return false;
1561}
1562#else /* CONFIG_ACPI not enabled */
1563static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
966916ea 1564static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
fbbcdc07
AH
1565#endif /* CONFIG_ACPI */
1566
7791e4aa
SP
1567static const struct x86_cpu_id hwp_support_ids[] __initconst = {
1568 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
1569 {}
1570};
1571
93f0822d
DB
1572static int __init intel_pstate_init(void)
1573{
907cc908 1574 int cpu, rc = 0;
93f0822d 1575 const struct x86_cpu_id *id;
64df1fdf 1576 struct cpu_defaults *cpu_def;
93f0822d 1577
6be26498
DB
1578 if (no_load)
1579 return -ENODEV;
1580
7791e4aa
SP
1581 if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
1582 copy_cpu_funcs(&core_params.funcs);
1583 hwp_active++;
1584 goto hwp_cpu_matched;
1585 }
1586
93f0822d
DB
1587 id = x86_match_cpu(intel_pstate_cpu_ids);
1588 if (!id)
1589 return -ENODEV;
1590
64df1fdf 1591 cpu_def = (struct cpu_defaults *)id->driver_data;
016c8150 1592
64df1fdf
BP
1593 copy_pid_params(&cpu_def->pid_policy);
1594 copy_cpu_funcs(&cpu_def->funcs);
016c8150 1595
b563b4e3
DB
1596 if (intel_pstate_msrs_not_valid())
1597 return -ENODEV;
1598
7791e4aa
SP
1599hwp_cpu_matched:
1600 /*
1601 * The Intel pstate driver will be ignored if the platform
1602 * firmware has its own power management modes.
1603 */
1604 if (intel_pstate_platform_pwr_mgmt_exists())
1605 return -ENODEV;
1606
93f0822d
DB
1607 pr_info("Intel P-state driver initializing.\n");
1608
b57ffac5 1609 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
93f0822d
DB
1610 if (!all_cpu_data)
1611 return -ENOMEM;
93f0822d 1612
d64c3b0b
KCA
1613 if (!hwp_active && hwp_only)
1614 goto out;
1615
93f0822d
DB
1616 rc = cpufreq_register_driver(&intel_pstate_driver);
1617 if (rc)
1618 goto out;
1619
1620 intel_pstate_debug_expose_params();
1621 intel_pstate_sysfs_expose_params();
b69880f9 1622
7791e4aa
SP
1623 if (hwp_active)
1624 pr_info("intel_pstate: HWP enabled\n");
1625
93f0822d
DB
1626 return rc;
1627out:
907cc908
DB
1628 get_online_cpus();
1629 for_each_online_cpu(cpu) {
1630 if (all_cpu_data[cpu]) {
bb6ab52f 1631 intel_pstate_clear_update_util_hook(cpu);
907cc908
DB
1632 kfree(all_cpu_data[cpu]);
1633 }
1634 }
1635
1636 put_online_cpus();
1637 vfree(all_cpu_data);
93f0822d
DB
1638 return -ENODEV;
1639}
1640device_initcall(intel_pstate_init);
1641
6be26498
DB
1642static int __init intel_pstate_setup(char *str)
1643{
1644 if (!str)
1645 return -EINVAL;
1646
1647 if (!strcmp(str, "disable"))
1648 no_load = 1;
539342f6
PB
1649 if (!strcmp(str, "no_hwp")) {
1650 pr_info("intel_pstate: HWP disabled\n");
2f86dc4c 1651 no_hwp = 1;
539342f6 1652 }
aa4ea34d
EZ
1653 if (!strcmp(str, "force"))
1654 force_load = 1;
d64c3b0b
KCA
1655 if (!strcmp(str, "hwp_only"))
1656 hwp_only = 1;
6be26498
DB
1657 return 0;
1658}
1659early_param("intel_pstate", intel_pstate_setup);
1660
93f0822d
DB
1661MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
1662MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
1663MODULE_LICENSE("GPL");
This page took 0.236121 seconds and 5 git commands to generate.