2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/cpu.h>
16 #include <linux/percpu-defs.h>
17 #include <linux/slab.h>
18 #include <linux/tick.h>
19 #include "cpufreq_governor.h"
21 /* On-demand governor macros */
22 #define DEF_FREQUENCY_UP_THRESHOLD (80)
23 #define DEF_SAMPLING_DOWN_FACTOR (1)
24 #define MAX_SAMPLING_DOWN_FACTOR (100000)
25 #define MICRO_FREQUENCY_UP_THRESHOLD (95)
26 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
27 #define MIN_FREQUENCY_UP_THRESHOLD (11)
28 #define MAX_FREQUENCY_UP_THRESHOLD (100)
30 static DEFINE_PER_CPU(struct od_cpu_dbs_info_s
, od_cpu_dbs_info
);
32 static struct od_ops od_ops
;
34 static unsigned int default_powersave_bias
;
36 static void ondemand_powersave_bias_init_cpu(int cpu
)
38 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
40 dbs_info
->freq_table
= cpufreq_frequency_get_table(cpu
);
41 dbs_info
->freq_lo
= 0;
45 * Not all CPUs want IO time to be accounted as busy; this depends on how
46 * efficient idling at a higher frequency/voltage is.
47 * Pavel Machek says this is not so for various generations of AMD and old
49 * Mike Chan (android.com) claims this is also not true for ARM.
50 * Because of this, whitelist specific known (series) of CPUs by default, and
51 * leave all others up to the user.
53 static int should_io_be_busy(void)
55 #if defined(CONFIG_X86)
57 * For Intel, Core 2 (model 15) and later have an efficient idle.
59 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
60 boot_cpu_data
.x86
== 6 &&
61 boot_cpu_data
.x86_model
>= 15)
68 * Find right freq to be set now with powersave_bias on.
69 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
70 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
72 static unsigned int generic_powersave_bias_target(struct cpufreq_policy
*policy
,
73 unsigned int freq_next
, unsigned int relation
)
75 unsigned int freq_req
, freq_reduc
, freq_avg
;
76 unsigned int freq_hi
, freq_lo
;
77 unsigned int index
= 0;
78 unsigned int jiffies_total
, jiffies_hi
, jiffies_lo
;
79 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
81 struct dbs_data
*dbs_data
= policy
->governor_data
;
82 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
84 if (!dbs_info
->freq_table
) {
85 dbs_info
->freq_lo
= 0;
86 dbs_info
->freq_lo_jiffies
= 0;
90 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_next
,
92 freq_req
= dbs_info
->freq_table
[index
].frequency
;
93 freq_reduc
= freq_req
* od_tuners
->powersave_bias
/ 1000;
94 freq_avg
= freq_req
- freq_reduc
;
96 /* Find freq bounds for freq_avg in freq_table */
98 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
99 CPUFREQ_RELATION_H
, &index
);
100 freq_lo
= dbs_info
->freq_table
[index
].frequency
;
102 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
103 CPUFREQ_RELATION_L
, &index
);
104 freq_hi
= dbs_info
->freq_table
[index
].frequency
;
106 /* Find out how long we have to be in hi and lo freqs */
107 if (freq_hi
== freq_lo
) {
108 dbs_info
->freq_lo
= 0;
109 dbs_info
->freq_lo_jiffies
= 0;
112 jiffies_total
= usecs_to_jiffies(od_tuners
->sampling_rate
);
113 jiffies_hi
= (freq_avg
- freq_lo
) * jiffies_total
;
114 jiffies_hi
+= ((freq_hi
- freq_lo
) / 2);
115 jiffies_hi
/= (freq_hi
- freq_lo
);
116 jiffies_lo
= jiffies_total
- jiffies_hi
;
117 dbs_info
->freq_lo
= freq_lo
;
118 dbs_info
->freq_lo_jiffies
= jiffies_lo
;
119 dbs_info
->freq_hi_jiffies
= jiffies_hi
;
123 static void ondemand_powersave_bias_init(void)
126 for_each_online_cpu(i
) {
127 ondemand_powersave_bias_init_cpu(i
);
131 static void dbs_freq_increase(struct cpufreq_policy
*policy
, unsigned int freq
)
133 struct dbs_data
*dbs_data
= policy
->governor_data
;
134 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
136 if (od_tuners
->powersave_bias
)
137 freq
= od_ops
.powersave_bias_target(policy
, freq
,
139 else if (policy
->cur
== policy
->max
)
142 __cpufreq_driver_target(policy
, freq
, od_tuners
->powersave_bias
?
143 CPUFREQ_RELATION_L
: CPUFREQ_RELATION_H
);
147 * Every sampling_rate, we check, if current idle time is less than 20%
148 * (default), then we try to increase frequency. Else, we adjust the frequency
149 * proportional to load.
151 static void od_check_cpu(int cpu
, unsigned int load
)
153 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
154 struct cpufreq_policy
*policy
= dbs_info
->cdbs
.shared
->policy
;
155 struct dbs_data
*dbs_data
= policy
->governor_data
;
156 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
158 dbs_info
->freq_lo
= 0;
160 /* Check for frequency increase */
161 if (load
> od_tuners
->up_threshold
) {
162 /* If switching to max speed, apply sampling_down_factor */
163 if (policy
->cur
< policy
->max
)
164 dbs_info
->rate_mult
=
165 od_tuners
->sampling_down_factor
;
166 dbs_freq_increase(policy
, policy
->max
);
168 /* Calculate the next frequency proportional to load */
169 unsigned int freq_next
, min_f
, max_f
;
171 min_f
= policy
->cpuinfo
.min_freq
;
172 max_f
= policy
->cpuinfo
.max_freq
;
173 freq_next
= min_f
+ load
* (max_f
- min_f
) / 100;
175 /* No longer fully busy, reset rate_mult */
176 dbs_info
->rate_mult
= 1;
178 if (!od_tuners
->powersave_bias
) {
179 __cpufreq_driver_target(policy
, freq_next
,
184 freq_next
= od_ops
.powersave_bias_target(policy
, freq_next
,
186 __cpufreq_driver_target(policy
, freq_next
, CPUFREQ_RELATION_C
);
190 static unsigned int od_dbs_timer(struct cpufreq_policy
*policy
)
192 struct dbs_data
*dbs_data
= policy
->governor_data
;
193 unsigned int cpu
= policy
->cpu
;
194 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
196 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
197 int delay
= 0, sample_type
= dbs_info
->sample_type
;
199 /* Common NORMAL_SAMPLE setup */
200 dbs_info
->sample_type
= OD_NORMAL_SAMPLE
;
201 if (sample_type
== OD_SUB_SAMPLE
) {
202 delay
= dbs_info
->freq_lo_jiffies
;
203 __cpufreq_driver_target(policy
, dbs_info
->freq_lo
,
206 dbs_check_cpu(dbs_data
, cpu
);
207 if (dbs_info
->freq_lo
) {
208 /* Setup timer for SUB_SAMPLE */
209 dbs_info
->sample_type
= OD_SUB_SAMPLE
;
210 delay
= dbs_info
->freq_hi_jiffies
;
215 delay
= delay_for_sampling_rate(od_tuners
->sampling_rate
216 * dbs_info
->rate_mult
);
221 /************************** sysfs interface ************************/
222 static struct dbs_governor od_dbs_gov
;
225 * update_sampling_rate - update sampling rate effective immediately if needed.
226 * @new_rate: new sampling rate
228 * If new rate is smaller than the old, simply updating
229 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
230 * original sampling_rate was 1 second and the requested new sampling rate is 10
231 * ms because the user needs immediate reaction from ondemand governor, but not
232 * sure if higher frequency will be required or not, then, the governor may
233 * change the sampling rate too late; up to 1 second later. Thus, if we are
234 * reducing the sampling rate, we need to make the new value effective
237 static void update_sampling_rate(struct dbs_data
*dbs_data
,
238 unsigned int new_rate
)
240 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
241 struct cpumask cpumask
;
244 od_tuners
->sampling_rate
= new_rate
= max(new_rate
,
245 dbs_data
->min_sampling_rate
);
248 * Lock governor so that governor start/stop can't execute in parallel.
250 mutex_lock(&dbs_data_mutex
);
252 cpumask_copy(&cpumask
, cpu_online_mask
);
254 for_each_cpu(cpu
, &cpumask
) {
255 struct cpufreq_policy
*policy
;
256 struct od_cpu_dbs_info_s
*dbs_info
;
257 struct cpu_dbs_info
*cdbs
;
258 struct cpu_common_dbs_info
*shared
;
260 dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
261 cdbs
= &dbs_info
->cdbs
;
262 shared
= cdbs
->shared
;
265 * A valid shared and shared->policy means governor hasn't
266 * stopped or exited yet.
268 if (!shared
|| !shared
->policy
)
271 policy
= shared
->policy
;
273 /* clear all CPUs of this policy */
274 cpumask_andnot(&cpumask
, &cpumask
, policy
->cpus
);
277 * Update sampling rate for CPUs whose policy is governed by
278 * dbs_data. In case of governor_per_policy, only a single
279 * policy will be governed by dbs_data, otherwise there can be
280 * multiple policies that are governed by the same dbs_data.
282 if (dbs_data
== policy
->governor_data
) {
283 mutex_lock(&shared
->timer_mutex
);
285 * On 32-bit architectures this may race with the
286 * sample_delay_ns read in dbs_update_util_handler(),
287 * but that really doesn't matter. If the read returns
288 * a value that's too big, the sample will be skipped,
289 * but the next invocation of dbs_update_util_handler()
290 * (when the update has been completed) will take a
291 * sample. If the returned value is too small, the
292 * sample will be taken immediately, but that isn't a
293 * problem, as we want the new rate to take effect
294 * immediately anyway.
296 * If this runs in parallel with dbs_work_handler(), we
297 * may end up overwriting the sample_delay_ns value that
298 * it has just written, but the difference should not be
299 * too big and it will be corrected next time a sample
300 * is taken, so it shouldn't be significant.
302 gov_update_sample_delay(shared
, new_rate
);
303 mutex_unlock(&shared
->timer_mutex
);
307 mutex_unlock(&dbs_data_mutex
);
310 static ssize_t
store_sampling_rate(struct dbs_data
*dbs_data
, const char *buf
,
315 ret
= sscanf(buf
, "%u", &input
);
319 update_sampling_rate(dbs_data
, input
);
323 static ssize_t
store_io_is_busy(struct dbs_data
*dbs_data
, const char *buf
,
326 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
331 ret
= sscanf(buf
, "%u", &input
);
334 od_tuners
->io_is_busy
= !!input
;
336 /* we need to re-evaluate prev_cpu_idle */
337 for_each_online_cpu(j
) {
338 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
340 dbs_info
->cdbs
.prev_cpu_idle
= get_cpu_idle_time(j
,
341 &dbs_info
->cdbs
.prev_cpu_wall
, od_tuners
->io_is_busy
);
346 static ssize_t
store_up_threshold(struct dbs_data
*dbs_data
, const char *buf
,
349 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
352 ret
= sscanf(buf
, "%u", &input
);
354 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
||
355 input
< MIN_FREQUENCY_UP_THRESHOLD
) {
359 od_tuners
->up_threshold
= input
;
363 static ssize_t
store_sampling_down_factor(struct dbs_data
*dbs_data
,
364 const char *buf
, size_t count
)
366 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
367 unsigned int input
, j
;
369 ret
= sscanf(buf
, "%u", &input
);
371 if (ret
!= 1 || input
> MAX_SAMPLING_DOWN_FACTOR
|| input
< 1)
373 od_tuners
->sampling_down_factor
= input
;
375 /* Reset down sampling multiplier in case it was active */
376 for_each_online_cpu(j
) {
377 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
379 dbs_info
->rate_mult
= 1;
384 static ssize_t
store_ignore_nice_load(struct dbs_data
*dbs_data
,
385 const char *buf
, size_t count
)
387 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
393 ret
= sscanf(buf
, "%u", &input
);
400 if (input
== od_tuners
->ignore_nice_load
) { /* nothing to do */
403 od_tuners
->ignore_nice_load
= input
;
405 /* we need to re-evaluate prev_cpu_idle */
406 for_each_online_cpu(j
) {
407 struct od_cpu_dbs_info_s
*dbs_info
;
408 dbs_info
= &per_cpu(od_cpu_dbs_info
, j
);
409 dbs_info
->cdbs
.prev_cpu_idle
= get_cpu_idle_time(j
,
410 &dbs_info
->cdbs
.prev_cpu_wall
, od_tuners
->io_is_busy
);
411 if (od_tuners
->ignore_nice_load
)
412 dbs_info
->cdbs
.prev_cpu_nice
=
413 kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
419 static ssize_t
store_powersave_bias(struct dbs_data
*dbs_data
, const char *buf
,
422 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
425 ret
= sscanf(buf
, "%u", &input
);
433 od_tuners
->powersave_bias
= input
;
434 ondemand_powersave_bias_init();
438 show_store_one(od
, sampling_rate
);
439 show_store_one(od
, io_is_busy
);
440 show_store_one(od
, up_threshold
);
441 show_store_one(od
, sampling_down_factor
);
442 show_store_one(od
, ignore_nice_load
);
443 show_store_one(od
, powersave_bias
);
444 declare_show_sampling_rate_min(od
);
446 gov_sys_pol_attr_rw(sampling_rate
);
447 gov_sys_pol_attr_rw(io_is_busy
);
448 gov_sys_pol_attr_rw(up_threshold
);
449 gov_sys_pol_attr_rw(sampling_down_factor
);
450 gov_sys_pol_attr_rw(ignore_nice_load
);
451 gov_sys_pol_attr_rw(powersave_bias
);
452 gov_sys_pol_attr_ro(sampling_rate_min
);
454 static struct attribute
*dbs_attributes_gov_sys
[] = {
455 &sampling_rate_min_gov_sys
.attr
,
456 &sampling_rate_gov_sys
.attr
,
457 &up_threshold_gov_sys
.attr
,
458 &sampling_down_factor_gov_sys
.attr
,
459 &ignore_nice_load_gov_sys
.attr
,
460 &powersave_bias_gov_sys
.attr
,
461 &io_is_busy_gov_sys
.attr
,
465 static struct attribute_group od_attr_group_gov_sys
= {
466 .attrs
= dbs_attributes_gov_sys
,
470 static struct attribute
*dbs_attributes_gov_pol
[] = {
471 &sampling_rate_min_gov_pol
.attr
,
472 &sampling_rate_gov_pol
.attr
,
473 &up_threshold_gov_pol
.attr
,
474 &sampling_down_factor_gov_pol
.attr
,
475 &ignore_nice_load_gov_pol
.attr
,
476 &powersave_bias_gov_pol
.attr
,
477 &io_is_busy_gov_pol
.attr
,
481 static struct attribute_group od_attr_group_gov_pol
= {
482 .attrs
= dbs_attributes_gov_pol
,
486 /************************** sysfs end ************************/
488 static int od_init(struct dbs_data
*dbs_data
, bool notify
)
490 struct od_dbs_tuners
*tuners
;
494 tuners
= kzalloc(sizeof(*tuners
), GFP_KERNEL
);
496 pr_err("%s: kzalloc failed\n", __func__
);
501 idle_time
= get_cpu_idle_time_us(cpu
, NULL
);
503 if (idle_time
!= -1ULL) {
504 /* Idle micro accounting is supported. Use finer thresholds */
505 tuners
->up_threshold
= MICRO_FREQUENCY_UP_THRESHOLD
;
507 * In nohz/micro accounting case we set the minimum frequency
508 * not depending on HZ, but fixed (very low). The deferred
509 * timer might skip some samples if idle/sleeping as needed.
511 dbs_data
->min_sampling_rate
= MICRO_FREQUENCY_MIN_SAMPLE_RATE
;
513 tuners
->up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
;
515 /* For correct statistics, we need 10 ticks for each measure */
516 dbs_data
->min_sampling_rate
= MIN_SAMPLING_RATE_RATIO
*
517 jiffies_to_usecs(10);
520 tuners
->sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
;
521 tuners
->ignore_nice_load
= 0;
522 tuners
->powersave_bias
= default_powersave_bias
;
523 tuners
->io_is_busy
= should_io_be_busy();
525 dbs_data
->tuners
= tuners
;
529 static void od_exit(struct dbs_data
*dbs_data
, bool notify
)
531 kfree(dbs_data
->tuners
);
534 define_get_cpu_dbs_routines(od_cpu_dbs_info
);
536 static struct od_ops od_ops
= {
537 .powersave_bias_init_cpu
= ondemand_powersave_bias_init_cpu
,
538 .powersave_bias_target
= generic_powersave_bias_target
,
539 .freq_increase
= dbs_freq_increase
,
542 static struct dbs_governor od_dbs_gov
= {
545 .governor
= cpufreq_governor_dbs
,
546 .max_transition_latency
= TRANSITION_LATENCY_LIMIT
,
547 .owner
= THIS_MODULE
,
549 .governor
= GOV_ONDEMAND
,
550 .attr_group_gov_sys
= &od_attr_group_gov_sys
,
551 .attr_group_gov_pol
= &od_attr_group_gov_pol
,
552 .get_cpu_cdbs
= get_cpu_cdbs
,
553 .get_cpu_dbs_info_s
= get_cpu_dbs_info_s
,
554 .gov_dbs_timer
= od_dbs_timer
,
555 .gov_check_cpu
= od_check_cpu
,
561 #define CPU_FREQ_GOV_ONDEMAND (&od_dbs_gov.gov)
563 static void od_set_powersave_bias(unsigned int powersave_bias
)
565 struct cpufreq_policy
*policy
;
566 struct dbs_data
*dbs_data
;
567 struct od_dbs_tuners
*od_tuners
;
571 default_powersave_bias
= powersave_bias
;
572 cpumask_clear(&done
);
575 for_each_online_cpu(cpu
) {
576 struct cpu_common_dbs_info
*shared
;
578 if (cpumask_test_cpu(cpu
, &done
))
581 shared
= per_cpu(od_cpu_dbs_info
, cpu
).cdbs
.shared
;
585 policy
= shared
->policy
;
586 cpumask_or(&done
, &done
, policy
->cpus
);
588 if (policy
->governor
!= CPU_FREQ_GOV_ONDEMAND
)
591 dbs_data
= policy
->governor_data
;
592 od_tuners
= dbs_data
->tuners
;
593 od_tuners
->powersave_bias
= default_powersave_bias
;
598 void od_register_powersave_bias_handler(unsigned int (*f
)
599 (struct cpufreq_policy
*, unsigned int, unsigned int),
600 unsigned int powersave_bias
)
602 od_ops
.powersave_bias_target
= f
;
603 od_set_powersave_bias(powersave_bias
);
605 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler
);
607 void od_unregister_powersave_bias_handler(void)
609 od_ops
.powersave_bias_target
= generic_powersave_bias_target
;
610 od_set_powersave_bias(0);
612 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler
);
614 static int __init
cpufreq_gov_dbs_init(void)
616 return cpufreq_register_governor(CPU_FREQ_GOV_ONDEMAND
);
619 static void __exit
cpufreq_gov_dbs_exit(void)
621 cpufreq_unregister_governor(CPU_FREQ_GOV_ONDEMAND
);
624 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
625 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
626 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
627 "Low Latency Frequency Transition capable processors");
628 MODULE_LICENSE("GPL");
630 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
631 struct cpufreq_governor
*cpufreq_default_governor(void)
633 return CPU_FREQ_GOV_ONDEMAND
;
636 fs_initcall(cpufreq_gov_dbs_init
);
638 module_init(cpufreq_gov_dbs_init
);
640 module_exit(cpufreq_gov_dbs_exit
);