2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/cpu.h>
16 #include <linux/percpu-defs.h>
17 #include <linux/slab.h>
18 #include <linux/tick.h>
19 #include "cpufreq_governor.h"
21 /* On-demand governor macros */
22 #define DEF_FREQUENCY_UP_THRESHOLD (80)
23 #define DEF_SAMPLING_DOWN_FACTOR (1)
24 #define MAX_SAMPLING_DOWN_FACTOR (100000)
25 #define MICRO_FREQUENCY_UP_THRESHOLD (95)
26 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
27 #define MIN_FREQUENCY_UP_THRESHOLD (11)
28 #define MAX_FREQUENCY_UP_THRESHOLD (100)
30 static DEFINE_PER_CPU(struct od_cpu_dbs_info_s
, od_cpu_dbs_info
);
32 static struct od_ops od_ops
;
34 static unsigned int default_powersave_bias
;
36 static void ondemand_powersave_bias_init_cpu(int cpu
)
38 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
40 dbs_info
->freq_table
= cpufreq_frequency_get_table(cpu
);
41 dbs_info
->freq_lo
= 0;
45 * Not all CPUs want IO time to be accounted as busy; this depends on how
46 * efficient idling at a higher frequency/voltage is.
47 * Pavel Machek says this is not so for various generations of AMD and old
49 * Mike Chan (android.com) claims this is also not true for ARM.
50 * Because of this, whitelist specific known (series) of CPUs by default, and
51 * leave all others up to the user.
53 static int should_io_be_busy(void)
55 #if defined(CONFIG_X86)
57 * For Intel, Core 2 (model 15) and later have an efficient idle.
59 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
60 boot_cpu_data
.x86
== 6 &&
61 boot_cpu_data
.x86_model
>= 15)
68 * Find right freq to be set now with powersave_bias on.
69 * Returns the freq_hi to be used right now and will set freq_hi_delay_us,
70 * freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
72 static unsigned int generic_powersave_bias_target(struct cpufreq_policy
*policy
,
73 unsigned int freq_next
, unsigned int relation
)
75 unsigned int freq_req
, freq_reduc
, freq_avg
;
76 unsigned int freq_hi
, freq_lo
;
77 unsigned int index
= 0;
78 unsigned int delay_hi_us
;
79 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
81 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
82 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
83 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
85 if (!dbs_info
->freq_table
) {
86 dbs_info
->freq_lo
= 0;
87 dbs_info
->freq_lo_delay_us
= 0;
91 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_next
,
93 freq_req
= dbs_info
->freq_table
[index
].frequency
;
94 freq_reduc
= freq_req
* od_tuners
->powersave_bias
/ 1000;
95 freq_avg
= freq_req
- freq_reduc
;
97 /* Find freq bounds for freq_avg in freq_table */
99 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
100 CPUFREQ_RELATION_H
, &index
);
101 freq_lo
= dbs_info
->freq_table
[index
].frequency
;
103 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
104 CPUFREQ_RELATION_L
, &index
);
105 freq_hi
= dbs_info
->freq_table
[index
].frequency
;
107 /* Find out how long we have to be in hi and lo freqs */
108 if (freq_hi
== freq_lo
) {
109 dbs_info
->freq_lo
= 0;
110 dbs_info
->freq_lo_delay_us
= 0;
113 delay_hi_us
= (freq_avg
- freq_lo
) * dbs_data
->sampling_rate
;
114 delay_hi_us
+= (freq_hi
- freq_lo
) / 2;
115 delay_hi_us
/= freq_hi
- freq_lo
;
116 dbs_info
->freq_hi_delay_us
= delay_hi_us
;
117 dbs_info
->freq_lo
= freq_lo
;
118 dbs_info
->freq_lo_delay_us
= dbs_data
->sampling_rate
- delay_hi_us
;
122 static void ondemand_powersave_bias_init(void)
125 for_each_online_cpu(i
) {
126 ondemand_powersave_bias_init_cpu(i
);
130 static void dbs_freq_increase(struct cpufreq_policy
*policy
, unsigned int freq
)
132 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
133 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
134 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
136 if (od_tuners
->powersave_bias
)
137 freq
= od_ops
.powersave_bias_target(policy
, freq
,
139 else if (policy
->cur
== policy
->max
)
142 __cpufreq_driver_target(policy
, freq
, od_tuners
->powersave_bias
?
143 CPUFREQ_RELATION_L
: CPUFREQ_RELATION_H
);
147 * Every sampling_rate, we check, if current idle time is less than 20%
148 * (default), then we try to increase frequency. Else, we adjust the frequency
149 * proportional to load.
151 static void od_update(struct cpufreq_policy
*policy
)
153 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, policy
->cpu
);
154 struct policy_dbs_info
*policy_dbs
= dbs_info
->cdbs
.policy_dbs
;
155 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
156 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
157 unsigned int load
= dbs_update(policy
);
159 dbs_info
->freq_lo
= 0;
161 /* Check for frequency increase */
162 if (load
> dbs_data
->up_threshold
) {
163 /* If switching to max speed, apply sampling_down_factor */
164 if (policy
->cur
< policy
->max
)
165 policy_dbs
->rate_mult
= dbs_data
->sampling_down_factor
;
166 dbs_freq_increase(policy
, policy
->max
);
168 /* Calculate the next frequency proportional to load */
169 unsigned int freq_next
, min_f
, max_f
;
171 min_f
= policy
->cpuinfo
.min_freq
;
172 max_f
= policy
->cpuinfo
.max_freq
;
173 freq_next
= min_f
+ load
* (max_f
- min_f
) / 100;
175 /* No longer fully busy, reset rate_mult */
176 policy_dbs
->rate_mult
= 1;
178 if (od_tuners
->powersave_bias
)
179 freq_next
= od_ops
.powersave_bias_target(policy
,
183 __cpufreq_driver_target(policy
, freq_next
, CPUFREQ_RELATION_C
);
187 static unsigned int od_dbs_timer(struct cpufreq_policy
*policy
)
189 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
190 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
191 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, policy
->cpu
);
192 int sample_type
= dbs_info
->sample_type
;
194 /* Common NORMAL_SAMPLE setup */
195 dbs_info
->sample_type
= OD_NORMAL_SAMPLE
;
197 * OD_SUB_SAMPLE doesn't make sense if sample_delay_ns is 0, so ignore
200 if (sample_type
== OD_SUB_SAMPLE
&& policy_dbs
->sample_delay_ns
> 0) {
201 __cpufreq_driver_target(policy
, dbs_info
->freq_lo
,
203 return dbs_info
->freq_lo_delay_us
;
208 if (dbs_info
->freq_lo
) {
209 /* Setup timer for SUB_SAMPLE */
210 dbs_info
->sample_type
= OD_SUB_SAMPLE
;
211 return dbs_info
->freq_hi_delay_us
;
214 return dbs_data
->sampling_rate
* policy_dbs
->rate_mult
;
217 /************************** sysfs interface ************************/
218 static struct dbs_governor od_dbs_gov
;
220 static ssize_t
store_io_is_busy(struct dbs_data
*dbs_data
, const char *buf
,
223 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
228 ret
= sscanf(buf
, "%u", &input
);
231 od_tuners
->io_is_busy
= !!input
;
233 /* we need to re-evaluate prev_cpu_idle */
234 for_each_online_cpu(j
) {
235 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
237 dbs_info
->cdbs
.prev_cpu_idle
= get_cpu_idle_time(j
,
238 &dbs_info
->cdbs
.prev_cpu_wall
, od_tuners
->io_is_busy
);
243 static ssize_t
store_up_threshold(struct dbs_data
*dbs_data
, const char *buf
,
248 ret
= sscanf(buf
, "%u", &input
);
250 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
||
251 input
< MIN_FREQUENCY_UP_THRESHOLD
) {
255 dbs_data
->up_threshold
= input
;
259 static ssize_t
store_sampling_down_factor(struct dbs_data
*dbs_data
,
260 const char *buf
, size_t count
)
262 struct policy_dbs_info
*policy_dbs
;
265 ret
= sscanf(buf
, "%u", &input
);
267 if (ret
!= 1 || input
> MAX_SAMPLING_DOWN_FACTOR
|| input
< 1)
270 dbs_data
->sampling_down_factor
= input
;
272 /* Reset down sampling multiplier in case it was active */
273 list_for_each_entry(policy_dbs
, &dbs_data
->policy_dbs_list
, list
) {
275 * Doing this without locking might lead to using different
276 * rate_mult values in od_update() and od_dbs_timer().
278 mutex_lock(&policy_dbs
->timer_mutex
);
279 policy_dbs
->rate_mult
= 1;
280 mutex_unlock(&policy_dbs
->timer_mutex
);
286 static ssize_t
store_ignore_nice_load(struct dbs_data
*dbs_data
,
287 const char *buf
, size_t count
)
289 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
295 ret
= sscanf(buf
, "%u", &input
);
302 if (input
== dbs_data
->ignore_nice_load
) { /* nothing to do */
305 dbs_data
->ignore_nice_load
= input
;
307 /* we need to re-evaluate prev_cpu_idle */
308 for_each_online_cpu(j
) {
309 struct od_cpu_dbs_info_s
*dbs_info
;
310 dbs_info
= &per_cpu(od_cpu_dbs_info
, j
);
311 dbs_info
->cdbs
.prev_cpu_idle
= get_cpu_idle_time(j
,
312 &dbs_info
->cdbs
.prev_cpu_wall
, od_tuners
->io_is_busy
);
313 if (dbs_data
->ignore_nice_load
)
314 dbs_info
->cdbs
.prev_cpu_nice
=
315 kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
321 static ssize_t
store_powersave_bias(struct dbs_data
*dbs_data
, const char *buf
,
324 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
327 ret
= sscanf(buf
, "%u", &input
);
335 od_tuners
->powersave_bias
= input
;
336 ondemand_powersave_bias_init();
340 gov_show_one_common(sampling_rate
);
341 gov_show_one_common(up_threshold
);
342 gov_show_one_common(sampling_down_factor
);
343 gov_show_one_common(ignore_nice_load
);
344 gov_show_one_common(min_sampling_rate
);
345 gov_show_one(od
, io_is_busy
);
346 gov_show_one(od
, powersave_bias
);
348 gov_attr_rw(sampling_rate
);
349 gov_attr_rw(io_is_busy
);
350 gov_attr_rw(up_threshold
);
351 gov_attr_rw(sampling_down_factor
);
352 gov_attr_rw(ignore_nice_load
);
353 gov_attr_rw(powersave_bias
);
354 gov_attr_ro(min_sampling_rate
);
356 static struct attribute
*od_attributes
[] = {
357 &min_sampling_rate
.attr
,
360 &sampling_down_factor
.attr
,
361 &ignore_nice_load
.attr
,
362 &powersave_bias
.attr
,
367 /************************** sysfs end ************************/
369 static int od_init(struct dbs_data
*dbs_data
, bool notify
)
371 struct od_dbs_tuners
*tuners
;
375 tuners
= kzalloc(sizeof(*tuners
), GFP_KERNEL
);
377 pr_err("%s: kzalloc failed\n", __func__
);
382 idle_time
= get_cpu_idle_time_us(cpu
, NULL
);
384 if (idle_time
!= -1ULL) {
385 /* Idle micro accounting is supported. Use finer thresholds */
386 dbs_data
->up_threshold
= MICRO_FREQUENCY_UP_THRESHOLD
;
388 * In nohz/micro accounting case we set the minimum frequency
389 * not depending on HZ, but fixed (very low). The deferred
390 * timer might skip some samples if idle/sleeping as needed.
392 dbs_data
->min_sampling_rate
= MICRO_FREQUENCY_MIN_SAMPLE_RATE
;
394 dbs_data
->up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
;
396 /* For correct statistics, we need 10 ticks for each measure */
397 dbs_data
->min_sampling_rate
= MIN_SAMPLING_RATE_RATIO
*
398 jiffies_to_usecs(10);
401 dbs_data
->sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
;
402 dbs_data
->ignore_nice_load
= 0;
403 tuners
->powersave_bias
= default_powersave_bias
;
404 tuners
->io_is_busy
= should_io_be_busy();
406 dbs_data
->tuners
= tuners
;
410 static void od_exit(struct dbs_data
*dbs_data
, bool notify
)
412 kfree(dbs_data
->tuners
);
415 define_get_cpu_dbs_routines(od_cpu_dbs_info
);
417 static struct od_ops od_ops
= {
418 .powersave_bias_init_cpu
= ondemand_powersave_bias_init_cpu
,
419 .powersave_bias_target
= generic_powersave_bias_target
,
422 static struct dbs_governor od_dbs_gov
= {
425 .governor
= cpufreq_governor_dbs
,
426 .max_transition_latency
= TRANSITION_LATENCY_LIMIT
,
427 .owner
= THIS_MODULE
,
429 .governor
= GOV_ONDEMAND
,
430 .kobj_type
= { .default_attrs
= od_attributes
},
431 .get_cpu_cdbs
= get_cpu_cdbs
,
432 .get_cpu_dbs_info_s
= get_cpu_dbs_info_s
,
433 .gov_dbs_timer
= od_dbs_timer
,
439 #define CPU_FREQ_GOV_ONDEMAND (&od_dbs_gov.gov)
441 static void od_set_powersave_bias(unsigned int powersave_bias
)
443 struct cpufreq_policy
*policy
;
444 struct dbs_data
*dbs_data
;
445 struct od_dbs_tuners
*od_tuners
;
449 default_powersave_bias
= powersave_bias
;
450 cpumask_clear(&done
);
453 for_each_online_cpu(cpu
) {
454 struct policy_dbs_info
*policy_dbs
;
456 if (cpumask_test_cpu(cpu
, &done
))
459 policy_dbs
= per_cpu(od_cpu_dbs_info
, cpu
).cdbs
.policy_dbs
;
463 policy
= policy_dbs
->policy
;
464 cpumask_or(&done
, &done
, policy
->cpus
);
466 if (policy
->governor
!= CPU_FREQ_GOV_ONDEMAND
)
469 dbs_data
= policy_dbs
->dbs_data
;
470 od_tuners
= dbs_data
->tuners
;
471 od_tuners
->powersave_bias
= default_powersave_bias
;
476 void od_register_powersave_bias_handler(unsigned int (*f
)
477 (struct cpufreq_policy
*, unsigned int, unsigned int),
478 unsigned int powersave_bias
)
480 od_ops
.powersave_bias_target
= f
;
481 od_set_powersave_bias(powersave_bias
);
483 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler
);
485 void od_unregister_powersave_bias_handler(void)
487 od_ops
.powersave_bias_target
= generic_powersave_bias_target
;
488 od_set_powersave_bias(0);
490 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler
);
492 static int __init
cpufreq_gov_dbs_init(void)
494 return cpufreq_register_governor(CPU_FREQ_GOV_ONDEMAND
);
497 static void __exit
cpufreq_gov_dbs_exit(void)
499 cpufreq_unregister_governor(CPU_FREQ_GOV_ONDEMAND
);
502 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
503 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
504 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
505 "Low Latency Frequency Transition capable processors");
506 MODULE_LICENSE("GPL");
508 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
509 struct cpufreq_governor
*cpufreq_default_governor(void)
511 return CPU_FREQ_GOV_ONDEMAND
;
514 fs_initcall(cpufreq_gov_dbs_init
);
516 module_init(cpufreq_gov_dbs_init
);
518 module_exit(cpufreq_gov_dbs_exit
);