2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/cpu.h>
16 #include <linux/percpu-defs.h>
17 #include <linux/slab.h>
18 #include <linux/tick.h>
19 #include "cpufreq_governor.h"
21 /* On-demand governor macros */
22 #define DEF_FREQUENCY_UP_THRESHOLD (80)
23 #define DEF_SAMPLING_DOWN_FACTOR (1)
24 #define MAX_SAMPLING_DOWN_FACTOR (100000)
25 #define MICRO_FREQUENCY_UP_THRESHOLD (95)
26 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
27 #define MIN_FREQUENCY_UP_THRESHOLD (11)
28 #define MAX_FREQUENCY_UP_THRESHOLD (100)
30 static DEFINE_PER_CPU(struct od_cpu_dbs_info_s
, od_cpu_dbs_info
);
32 static struct od_ops od_ops
;
34 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
35 static struct cpufreq_governor cpufreq_gov_ondemand
;
38 static unsigned int default_powersave_bias
;
40 static void ondemand_powersave_bias_init_cpu(int cpu
)
42 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
44 dbs_info
->freq_table
= cpufreq_frequency_get_table(cpu
);
45 dbs_info
->freq_lo
= 0;
49 * Not all CPUs want IO time to be accounted as busy; this depends on how
50 * efficient idling at a higher frequency/voltage is.
51 * Pavel Machek says this is not so for various generations of AMD and old
53 * Mike Chan (android.com) claims this is also not true for ARM.
54 * Because of this, whitelist specific known (series) of CPUs by default, and
55 * leave all others up to the user.
57 static int should_io_be_busy(void)
59 #if defined(CONFIG_X86)
61 * For Intel, Core 2 (model 15) and later have an efficient idle.
63 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
64 boot_cpu_data
.x86
== 6 &&
65 boot_cpu_data
.x86_model
>= 15)
72 * Find right freq to be set now with powersave_bias on.
73 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
74 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
76 static unsigned int generic_powersave_bias_target(struct cpufreq_policy
*policy
,
77 unsigned int freq_next
, unsigned int relation
)
79 unsigned int freq_req
, freq_reduc
, freq_avg
;
80 unsigned int freq_hi
, freq_lo
;
81 unsigned int index
= 0;
82 unsigned int jiffies_total
, jiffies_hi
, jiffies_lo
;
83 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
85 struct dbs_data
*dbs_data
= policy
->governor_data
;
86 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
88 if (!dbs_info
->freq_table
) {
89 dbs_info
->freq_lo
= 0;
90 dbs_info
->freq_lo_jiffies
= 0;
94 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_next
,
96 freq_req
= dbs_info
->freq_table
[index
].frequency
;
97 freq_reduc
= freq_req
* od_tuners
->powersave_bias
/ 1000;
98 freq_avg
= freq_req
- freq_reduc
;
100 /* Find freq bounds for freq_avg in freq_table */
102 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
103 CPUFREQ_RELATION_H
, &index
);
104 freq_lo
= dbs_info
->freq_table
[index
].frequency
;
106 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
107 CPUFREQ_RELATION_L
, &index
);
108 freq_hi
= dbs_info
->freq_table
[index
].frequency
;
110 /* Find out how long we have to be in hi and lo freqs */
111 if (freq_hi
== freq_lo
) {
112 dbs_info
->freq_lo
= 0;
113 dbs_info
->freq_lo_jiffies
= 0;
116 jiffies_total
= usecs_to_jiffies(od_tuners
->sampling_rate
);
117 jiffies_hi
= (freq_avg
- freq_lo
) * jiffies_total
;
118 jiffies_hi
+= ((freq_hi
- freq_lo
) / 2);
119 jiffies_hi
/= (freq_hi
- freq_lo
);
120 jiffies_lo
= jiffies_total
- jiffies_hi
;
121 dbs_info
->freq_lo
= freq_lo
;
122 dbs_info
->freq_lo_jiffies
= jiffies_lo
;
123 dbs_info
->freq_hi_jiffies
= jiffies_hi
;
127 static void ondemand_powersave_bias_init(void)
130 for_each_online_cpu(i
) {
131 ondemand_powersave_bias_init_cpu(i
);
135 static void dbs_freq_increase(struct cpufreq_policy
*policy
, unsigned int freq
)
137 struct dbs_data
*dbs_data
= policy
->governor_data
;
138 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
140 if (od_tuners
->powersave_bias
)
141 freq
= od_ops
.powersave_bias_target(policy
, freq
,
143 else if (policy
->cur
== policy
->max
)
146 __cpufreq_driver_target(policy
, freq
, od_tuners
->powersave_bias
?
147 CPUFREQ_RELATION_L
: CPUFREQ_RELATION_H
);
151 * Every sampling_rate, we check, if current idle time is less than 20%
152 * (default), then we try to increase frequency. Else, we adjust the frequency
153 * proportional to load.
155 static void od_check_cpu(int cpu
, unsigned int load
)
157 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
158 struct cpufreq_policy
*policy
= dbs_info
->cdbs
.shared
->policy
;
159 struct dbs_data
*dbs_data
= policy
->governor_data
;
160 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
162 dbs_info
->freq_lo
= 0;
164 /* Check for frequency increase */
165 if (load
> od_tuners
->up_threshold
) {
166 /* If switching to max speed, apply sampling_down_factor */
167 if (policy
->cur
< policy
->max
)
168 dbs_info
->rate_mult
=
169 od_tuners
->sampling_down_factor
;
170 dbs_freq_increase(policy
, policy
->max
);
172 /* Calculate the next frequency proportional to load */
173 unsigned int freq_next
, min_f
, max_f
;
175 min_f
= policy
->cpuinfo
.min_freq
;
176 max_f
= policy
->cpuinfo
.max_freq
;
177 freq_next
= min_f
+ load
* (max_f
- min_f
) / 100;
179 /* No longer fully busy, reset rate_mult */
180 dbs_info
->rate_mult
= 1;
182 if (!od_tuners
->powersave_bias
) {
183 __cpufreq_driver_target(policy
, freq_next
,
188 freq_next
= od_ops
.powersave_bias_target(policy
, freq_next
,
190 __cpufreq_driver_target(policy
, freq_next
, CPUFREQ_RELATION_C
);
194 static unsigned int od_dbs_timer(struct cpufreq_policy
*policy
, bool modify_all
)
196 struct dbs_data
*dbs_data
= policy
->governor_data
;
197 unsigned int cpu
= policy
->cpu
;
198 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
200 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
201 int delay
= 0, sample_type
= dbs_info
->sample_type
;
206 /* Common NORMAL_SAMPLE setup */
207 dbs_info
->sample_type
= OD_NORMAL_SAMPLE
;
208 if (sample_type
== OD_SUB_SAMPLE
) {
209 delay
= dbs_info
->freq_lo_jiffies
;
210 __cpufreq_driver_target(policy
, dbs_info
->freq_lo
,
213 dbs_check_cpu(dbs_data
, cpu
);
214 if (dbs_info
->freq_lo
) {
215 /* Setup timer for SUB_SAMPLE */
216 dbs_info
->sample_type
= OD_SUB_SAMPLE
;
217 delay
= dbs_info
->freq_hi_jiffies
;
223 delay
= delay_for_sampling_rate(od_tuners
->sampling_rate
224 * dbs_info
->rate_mult
);
229 /************************** sysfs interface ************************/
230 static struct common_dbs_data od_dbs_cdata
;
233 * update_sampling_rate - update sampling rate effective immediately if needed.
234 * @new_rate: new sampling rate
236 * If new rate is smaller than the old, simply updating
237 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
238 * original sampling_rate was 1 second and the requested new sampling rate is 10
239 * ms because the user needs immediate reaction from ondemand governor, but not
240 * sure if higher frequency will be required or not, then, the governor may
241 * change the sampling rate too late; up to 1 second later. Thus, if we are
242 * reducing the sampling rate, we need to make the new value effective
245 static void update_sampling_rate(struct dbs_data
*dbs_data
,
246 unsigned int new_rate
)
248 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
249 struct cpumask cpumask
;
252 od_tuners
->sampling_rate
= new_rate
= max(new_rate
,
253 dbs_data
->min_sampling_rate
);
256 * Lock governor so that governor start/stop can't execute in parallel.
258 mutex_lock(&od_dbs_cdata
.mutex
);
260 cpumask_copy(&cpumask
, cpu_online_mask
);
262 for_each_cpu(cpu
, &cpumask
) {
263 struct cpufreq_policy
*policy
;
264 struct od_cpu_dbs_info_s
*dbs_info
;
265 struct cpu_dbs_info
*cdbs
;
266 struct cpu_common_dbs_info
*shared
;
267 unsigned long next_sampling
, appointed_at
;
269 dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
270 cdbs
= &dbs_info
->cdbs
;
271 shared
= cdbs
->shared
;
274 * A valid shared and shared->policy means governor hasn't
275 * stopped or exited yet.
277 if (!shared
|| !shared
->policy
)
280 policy
= shared
->policy
;
282 /* clear all CPUs of this policy */
283 cpumask_andnot(&cpumask
, &cpumask
, policy
->cpus
);
286 * Update sampling rate for CPUs whose policy is governed by
287 * dbs_data. In case of governor_per_policy, only a single
288 * policy will be governed by dbs_data, otherwise there can be
289 * multiple policies that are governed by the same dbs_data.
291 if (dbs_data
!= policy
->governor_data
)
295 * Checking this for any CPU should be fine, timers for all of
296 * them are scheduled together.
298 next_sampling
= jiffies
+ usecs_to_jiffies(new_rate
);
299 appointed_at
= dbs_info
->cdbs
.timer
.expires
;
301 if (time_before(next_sampling
, appointed_at
)) {
302 gov_cancel_work(shared
);
303 gov_add_timers(policy
, usecs_to_jiffies(new_rate
));
308 mutex_unlock(&od_dbs_cdata
.mutex
);
311 static ssize_t
store_sampling_rate(struct dbs_data
*dbs_data
, const char *buf
,
316 ret
= sscanf(buf
, "%u", &input
);
320 update_sampling_rate(dbs_data
, input
);
324 static ssize_t
store_io_is_busy(struct dbs_data
*dbs_data
, const char *buf
,
327 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
332 ret
= sscanf(buf
, "%u", &input
);
335 od_tuners
->io_is_busy
= !!input
;
337 /* we need to re-evaluate prev_cpu_idle */
338 for_each_online_cpu(j
) {
339 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
341 dbs_info
->cdbs
.prev_cpu_idle
= get_cpu_idle_time(j
,
342 &dbs_info
->cdbs
.prev_cpu_wall
, od_tuners
->io_is_busy
);
347 static ssize_t
store_up_threshold(struct dbs_data
*dbs_data
, const char *buf
,
350 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
353 ret
= sscanf(buf
, "%u", &input
);
355 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
||
356 input
< MIN_FREQUENCY_UP_THRESHOLD
) {
360 od_tuners
->up_threshold
= input
;
364 static ssize_t
store_sampling_down_factor(struct dbs_data
*dbs_data
,
365 const char *buf
, size_t count
)
367 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
368 unsigned int input
, j
;
370 ret
= sscanf(buf
, "%u", &input
);
372 if (ret
!= 1 || input
> MAX_SAMPLING_DOWN_FACTOR
|| input
< 1)
374 od_tuners
->sampling_down_factor
= input
;
376 /* Reset down sampling multiplier in case it was active */
377 for_each_online_cpu(j
) {
378 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
380 dbs_info
->rate_mult
= 1;
385 static ssize_t
store_ignore_nice_load(struct dbs_data
*dbs_data
,
386 const char *buf
, size_t count
)
388 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
394 ret
= sscanf(buf
, "%u", &input
);
401 if (input
== od_tuners
->ignore_nice_load
) { /* nothing to do */
404 od_tuners
->ignore_nice_load
= input
;
406 /* we need to re-evaluate prev_cpu_idle */
407 for_each_online_cpu(j
) {
408 struct od_cpu_dbs_info_s
*dbs_info
;
409 dbs_info
= &per_cpu(od_cpu_dbs_info
, j
);
410 dbs_info
->cdbs
.prev_cpu_idle
= get_cpu_idle_time(j
,
411 &dbs_info
->cdbs
.prev_cpu_wall
, od_tuners
->io_is_busy
);
412 if (od_tuners
->ignore_nice_load
)
413 dbs_info
->cdbs
.prev_cpu_nice
=
414 kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
420 static ssize_t
store_powersave_bias(struct dbs_data
*dbs_data
, const char *buf
,
423 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
426 ret
= sscanf(buf
, "%u", &input
);
434 od_tuners
->powersave_bias
= input
;
435 ondemand_powersave_bias_init();
439 show_store_one(od
, sampling_rate
);
440 show_store_one(od
, io_is_busy
);
441 show_store_one(od
, up_threshold
);
442 show_store_one(od
, sampling_down_factor
);
443 show_store_one(od
, ignore_nice_load
);
444 show_store_one(od
, powersave_bias
);
445 declare_show_sampling_rate_min(od
);
447 gov_sys_pol_attr_rw(sampling_rate
);
448 gov_sys_pol_attr_rw(io_is_busy
);
449 gov_sys_pol_attr_rw(up_threshold
);
450 gov_sys_pol_attr_rw(sampling_down_factor
);
451 gov_sys_pol_attr_rw(ignore_nice_load
);
452 gov_sys_pol_attr_rw(powersave_bias
);
453 gov_sys_pol_attr_ro(sampling_rate_min
);
455 static struct attribute
*dbs_attributes_gov_sys
[] = {
456 &sampling_rate_min_gov_sys
.attr
,
457 &sampling_rate_gov_sys
.attr
,
458 &up_threshold_gov_sys
.attr
,
459 &sampling_down_factor_gov_sys
.attr
,
460 &ignore_nice_load_gov_sys
.attr
,
461 &powersave_bias_gov_sys
.attr
,
462 &io_is_busy_gov_sys
.attr
,
466 static struct attribute_group od_attr_group_gov_sys
= {
467 .attrs
= dbs_attributes_gov_sys
,
471 static struct attribute
*dbs_attributes_gov_pol
[] = {
472 &sampling_rate_min_gov_pol
.attr
,
473 &sampling_rate_gov_pol
.attr
,
474 &up_threshold_gov_pol
.attr
,
475 &sampling_down_factor_gov_pol
.attr
,
476 &ignore_nice_load_gov_pol
.attr
,
477 &powersave_bias_gov_pol
.attr
,
478 &io_is_busy_gov_pol
.attr
,
482 static struct attribute_group od_attr_group_gov_pol
= {
483 .attrs
= dbs_attributes_gov_pol
,
487 /************************** sysfs end ************************/
489 static int od_init(struct dbs_data
*dbs_data
, bool notify
)
491 struct od_dbs_tuners
*tuners
;
495 tuners
= kzalloc(sizeof(*tuners
), GFP_KERNEL
);
497 pr_err("%s: kzalloc failed\n", __func__
);
502 idle_time
= get_cpu_idle_time_us(cpu
, NULL
);
504 if (idle_time
!= -1ULL) {
505 /* Idle micro accounting is supported. Use finer thresholds */
506 tuners
->up_threshold
= MICRO_FREQUENCY_UP_THRESHOLD
;
508 * In nohz/micro accounting case we set the minimum frequency
509 * not depending on HZ, but fixed (very low). The deferred
510 * timer might skip some samples if idle/sleeping as needed.
512 dbs_data
->min_sampling_rate
= MICRO_FREQUENCY_MIN_SAMPLE_RATE
;
514 tuners
->up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
;
516 /* For correct statistics, we need 10 ticks for each measure */
517 dbs_data
->min_sampling_rate
= MIN_SAMPLING_RATE_RATIO
*
518 jiffies_to_usecs(10);
521 tuners
->sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
;
522 tuners
->ignore_nice_load
= 0;
523 tuners
->powersave_bias
= default_powersave_bias
;
524 tuners
->io_is_busy
= should_io_be_busy();
526 dbs_data
->tuners
= tuners
;
530 static void od_exit(struct dbs_data
*dbs_data
, bool notify
)
532 kfree(dbs_data
->tuners
);
535 define_get_cpu_dbs_routines(od_cpu_dbs_info
);
537 static struct od_ops od_ops
= {
538 .powersave_bias_init_cpu
= ondemand_powersave_bias_init_cpu
,
539 .powersave_bias_target
= generic_powersave_bias_target
,
540 .freq_increase
= dbs_freq_increase
,
543 static struct common_dbs_data od_dbs_cdata
= {
544 .governor
= GOV_ONDEMAND
,
545 .attr_group_gov_sys
= &od_attr_group_gov_sys
,
546 .attr_group_gov_pol
= &od_attr_group_gov_pol
,
547 .get_cpu_cdbs
= get_cpu_cdbs
,
548 .get_cpu_dbs_info_s
= get_cpu_dbs_info_s
,
549 .gov_dbs_timer
= od_dbs_timer
,
550 .gov_check_cpu
= od_check_cpu
,
554 .mutex
= __MUTEX_INITIALIZER(od_dbs_cdata
.mutex
),
557 static void od_set_powersave_bias(unsigned int powersave_bias
)
559 struct cpufreq_policy
*policy
;
560 struct dbs_data
*dbs_data
;
561 struct od_dbs_tuners
*od_tuners
;
565 default_powersave_bias
= powersave_bias
;
566 cpumask_clear(&done
);
569 for_each_online_cpu(cpu
) {
570 struct cpu_common_dbs_info
*shared
;
572 if (cpumask_test_cpu(cpu
, &done
))
575 shared
= per_cpu(od_cpu_dbs_info
, cpu
).cdbs
.shared
;
579 policy
= shared
->policy
;
580 cpumask_or(&done
, &done
, policy
->cpus
);
582 if (policy
->governor
!= &cpufreq_gov_ondemand
)
585 dbs_data
= policy
->governor_data
;
586 od_tuners
= dbs_data
->tuners
;
587 od_tuners
->powersave_bias
= default_powersave_bias
;
592 void od_register_powersave_bias_handler(unsigned int (*f
)
593 (struct cpufreq_policy
*, unsigned int, unsigned int),
594 unsigned int powersave_bias
)
596 od_ops
.powersave_bias_target
= f
;
597 od_set_powersave_bias(powersave_bias
);
599 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler
);
601 void od_unregister_powersave_bias_handler(void)
603 od_ops
.powersave_bias_target
= generic_powersave_bias_target
;
604 od_set_powersave_bias(0);
606 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler
);
608 static int od_cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
611 return cpufreq_governor_dbs(policy
, &od_dbs_cdata
, event
);
614 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
617 struct cpufreq_governor cpufreq_gov_ondemand
= {
619 .governor
= od_cpufreq_governor_dbs
,
620 .max_transition_latency
= TRANSITION_LATENCY_LIMIT
,
621 .owner
= THIS_MODULE
,
624 static int __init
cpufreq_gov_dbs_init(void)
626 return cpufreq_register_governor(&cpufreq_gov_ondemand
);
629 static void __exit
cpufreq_gov_dbs_exit(void)
631 cpufreq_unregister_governor(&cpufreq_gov_ondemand
);
634 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
635 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
636 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
637 "Low Latency Frequency Transition capable processors");
638 MODULE_LICENSE("GPL");
640 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
641 fs_initcall(cpufreq_gov_dbs_init
);
643 module_init(cpufreq_gov_dbs_init
);
645 module_exit(cpufreq_gov_dbs_exit
);