2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/cpu.h>
16 #include <linux/percpu-defs.h>
17 #include <linux/slab.h>
18 #include <linux/tick.h>
19 #include "cpufreq_governor.h"
21 /* On-demand governor macros */
22 #define DEF_FREQUENCY_UP_THRESHOLD (80)
23 #define DEF_SAMPLING_DOWN_FACTOR (1)
24 #define MAX_SAMPLING_DOWN_FACTOR (100000)
25 #define MICRO_FREQUENCY_UP_THRESHOLD (95)
26 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
27 #define MIN_FREQUENCY_UP_THRESHOLD (11)
28 #define MAX_FREQUENCY_UP_THRESHOLD (100)
30 static DEFINE_PER_CPU(struct od_cpu_dbs_info_s
, od_cpu_dbs_info
);
32 static struct od_ops od_ops
;
34 static struct cpufreq_governor cpufreq_gov_ondemand
;
36 static unsigned int default_powersave_bias
;
38 static void ondemand_powersave_bias_init_cpu(int cpu
)
40 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
42 dbs_info
->freq_table
= cpufreq_frequency_get_table(cpu
);
43 dbs_info
->freq_lo
= 0;
47 * Not all CPUs want IO time to be accounted as busy; this depends on how
48 * efficient idling at a higher frequency/voltage is.
49 * Pavel Machek says this is not so for various generations of AMD and old
51 * Mike Chan (android.com) claims this is also not true for ARM.
52 * Because of this, whitelist specific known (series) of CPUs by default, and
53 * leave all others up to the user.
55 static int should_io_be_busy(void)
57 #if defined(CONFIG_X86)
59 * For Intel, Core 2 (model 15) and later have an efficient idle.
61 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
62 boot_cpu_data
.x86
== 6 &&
63 boot_cpu_data
.x86_model
>= 15)
70 * Find right freq to be set now with powersave_bias on.
71 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
72 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
74 static unsigned int generic_powersave_bias_target(struct cpufreq_policy
*policy
,
75 unsigned int freq_next
, unsigned int relation
)
77 unsigned int freq_req
, freq_reduc
, freq_avg
;
78 unsigned int freq_hi
, freq_lo
;
79 unsigned int index
= 0;
80 unsigned int jiffies_total
, jiffies_hi
, jiffies_lo
;
81 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
83 struct dbs_data
*dbs_data
= policy
->governor_data
;
84 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
86 if (!dbs_info
->freq_table
) {
87 dbs_info
->freq_lo
= 0;
88 dbs_info
->freq_lo_jiffies
= 0;
92 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_next
,
94 freq_req
= dbs_info
->freq_table
[index
].frequency
;
95 freq_reduc
= freq_req
* od_tuners
->powersave_bias
/ 1000;
96 freq_avg
= freq_req
- freq_reduc
;
98 /* Find freq bounds for freq_avg in freq_table */
100 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
101 CPUFREQ_RELATION_H
, &index
);
102 freq_lo
= dbs_info
->freq_table
[index
].frequency
;
104 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
105 CPUFREQ_RELATION_L
, &index
);
106 freq_hi
= dbs_info
->freq_table
[index
].frequency
;
108 /* Find out how long we have to be in hi and lo freqs */
109 if (freq_hi
== freq_lo
) {
110 dbs_info
->freq_lo
= 0;
111 dbs_info
->freq_lo_jiffies
= 0;
114 jiffies_total
= usecs_to_jiffies(od_tuners
->sampling_rate
);
115 jiffies_hi
= (freq_avg
- freq_lo
) * jiffies_total
;
116 jiffies_hi
+= ((freq_hi
- freq_lo
) / 2);
117 jiffies_hi
/= (freq_hi
- freq_lo
);
118 jiffies_lo
= jiffies_total
- jiffies_hi
;
119 dbs_info
->freq_lo
= freq_lo
;
120 dbs_info
->freq_lo_jiffies
= jiffies_lo
;
121 dbs_info
->freq_hi_jiffies
= jiffies_hi
;
125 static void ondemand_powersave_bias_init(void)
128 for_each_online_cpu(i
) {
129 ondemand_powersave_bias_init_cpu(i
);
133 static void dbs_freq_increase(struct cpufreq_policy
*policy
, unsigned int freq
)
135 struct dbs_data
*dbs_data
= policy
->governor_data
;
136 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
138 if (od_tuners
->powersave_bias
)
139 freq
= od_ops
.powersave_bias_target(policy
, freq
,
141 else if (policy
->cur
== policy
->max
)
144 __cpufreq_driver_target(policy
, freq
, od_tuners
->powersave_bias
?
145 CPUFREQ_RELATION_L
: CPUFREQ_RELATION_H
);
149 * Every sampling_rate, we check, if current idle time is less than 20%
150 * (default), then we try to increase frequency. Else, we adjust the frequency
151 * proportional to load.
153 static void od_check_cpu(int cpu
, unsigned int load
)
155 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
156 struct cpufreq_policy
*policy
= dbs_info
->cdbs
.shared
->policy
;
157 struct dbs_data
*dbs_data
= policy
->governor_data
;
158 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
160 dbs_info
->freq_lo
= 0;
162 /* Check for frequency increase */
163 if (load
> od_tuners
->up_threshold
) {
164 /* If switching to max speed, apply sampling_down_factor */
165 if (policy
->cur
< policy
->max
)
166 dbs_info
->rate_mult
=
167 od_tuners
->sampling_down_factor
;
168 dbs_freq_increase(policy
, policy
->max
);
170 /* Calculate the next frequency proportional to load */
171 unsigned int freq_next
, min_f
, max_f
;
173 min_f
= policy
->cpuinfo
.min_freq
;
174 max_f
= policy
->cpuinfo
.max_freq
;
175 freq_next
= min_f
+ load
* (max_f
- min_f
) / 100;
177 /* No longer fully busy, reset rate_mult */
178 dbs_info
->rate_mult
= 1;
180 if (!od_tuners
->powersave_bias
) {
181 __cpufreq_driver_target(policy
, freq_next
,
186 freq_next
= od_ops
.powersave_bias_target(policy
, freq_next
,
188 __cpufreq_driver_target(policy
, freq_next
, CPUFREQ_RELATION_C
);
192 static unsigned int od_dbs_timer(struct cpufreq_policy
*policy
, bool modify_all
)
194 struct dbs_data
*dbs_data
= policy
->governor_data
;
195 unsigned int cpu
= policy
->cpu
;
196 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
198 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
199 int delay
= 0, sample_type
= dbs_info
->sample_type
;
204 /* Common NORMAL_SAMPLE setup */
205 dbs_info
->sample_type
= OD_NORMAL_SAMPLE
;
206 if (sample_type
== OD_SUB_SAMPLE
) {
207 delay
= dbs_info
->freq_lo_jiffies
;
208 __cpufreq_driver_target(policy
, dbs_info
->freq_lo
,
211 dbs_check_cpu(dbs_data
, cpu
);
212 if (dbs_info
->freq_lo
) {
213 /* Setup timer for SUB_SAMPLE */
214 dbs_info
->sample_type
= OD_SUB_SAMPLE
;
215 delay
= dbs_info
->freq_hi_jiffies
;
221 delay
= delay_for_sampling_rate(od_tuners
->sampling_rate
222 * dbs_info
->rate_mult
);
227 /************************** sysfs interface ************************/
228 static struct common_dbs_data od_dbs_cdata
;
231 * update_sampling_rate - update sampling rate effective immediately if needed.
232 * @new_rate: new sampling rate
234 * If new rate is smaller than the old, simply updating
235 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
236 * original sampling_rate was 1 second and the requested new sampling rate is 10
237 * ms because the user needs immediate reaction from ondemand governor, but not
238 * sure if higher frequency will be required or not, then, the governor may
239 * change the sampling rate too late; up to 1 second later. Thus, if we are
240 * reducing the sampling rate, we need to make the new value effective
243 static void update_sampling_rate(struct dbs_data
*dbs_data
,
244 unsigned int new_rate
)
246 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
247 struct cpumask cpumask
;
250 od_tuners
->sampling_rate
= new_rate
= max(new_rate
,
251 dbs_data
->min_sampling_rate
);
254 * Lock governor so that governor start/stop can't execute in parallel.
256 mutex_lock(&od_dbs_cdata
.mutex
);
258 cpumask_copy(&cpumask
, cpu_online_mask
);
260 for_each_cpu(cpu
, &cpumask
) {
261 struct cpufreq_policy
*policy
;
262 struct od_cpu_dbs_info_s
*dbs_info
;
263 struct cpu_dbs_info
*cdbs
;
264 struct cpu_common_dbs_info
*shared
;
265 unsigned long next_sampling
, appointed_at
;
267 dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
268 cdbs
= &dbs_info
->cdbs
;
269 shared
= cdbs
->shared
;
272 * A valid shared and shared->policy means governor hasn't
273 * stopped or exited yet.
275 if (!shared
|| !shared
->policy
)
278 policy
= shared
->policy
;
280 /* clear all CPUs of this policy */
281 cpumask_andnot(&cpumask
, &cpumask
, policy
->cpus
);
284 * Update sampling rate for CPUs whose policy is governed by
285 * dbs_data. In case of governor_per_policy, only a single
286 * policy will be governed by dbs_data, otherwise there can be
287 * multiple policies that are governed by the same dbs_data.
289 if (dbs_data
!= policy
->governor_data
)
293 * Checking this for any CPU should be fine, timers for all of
294 * them are scheduled together.
296 next_sampling
= jiffies
+ usecs_to_jiffies(new_rate
);
297 appointed_at
= dbs_info
->cdbs
.timer
.expires
;
299 if (time_before(next_sampling
, appointed_at
)) {
300 gov_cancel_work(shared
);
301 gov_add_timers(policy
, usecs_to_jiffies(new_rate
));
306 mutex_unlock(&od_dbs_cdata
.mutex
);
309 static ssize_t
store_sampling_rate(struct dbs_data
*dbs_data
, const char *buf
,
314 ret
= sscanf(buf
, "%u", &input
);
318 update_sampling_rate(dbs_data
, input
);
322 static ssize_t
store_io_is_busy(struct dbs_data
*dbs_data
, const char *buf
,
325 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
330 ret
= sscanf(buf
, "%u", &input
);
333 od_tuners
->io_is_busy
= !!input
;
335 /* we need to re-evaluate prev_cpu_idle */
336 for_each_online_cpu(j
) {
337 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
339 dbs_info
->cdbs
.prev_cpu_idle
= get_cpu_idle_time(j
,
340 &dbs_info
->cdbs
.prev_cpu_wall
, od_tuners
->io_is_busy
);
345 static ssize_t
store_up_threshold(struct dbs_data
*dbs_data
, const char *buf
,
348 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
351 ret
= sscanf(buf
, "%u", &input
);
353 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
||
354 input
< MIN_FREQUENCY_UP_THRESHOLD
) {
358 od_tuners
->up_threshold
= input
;
362 static ssize_t
store_sampling_down_factor(struct dbs_data
*dbs_data
,
363 const char *buf
, size_t count
)
365 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
366 unsigned int input
, j
;
368 ret
= sscanf(buf
, "%u", &input
);
370 if (ret
!= 1 || input
> MAX_SAMPLING_DOWN_FACTOR
|| input
< 1)
372 od_tuners
->sampling_down_factor
= input
;
374 /* Reset down sampling multiplier in case it was active */
375 for_each_online_cpu(j
) {
376 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
378 dbs_info
->rate_mult
= 1;
383 static ssize_t
store_ignore_nice_load(struct dbs_data
*dbs_data
,
384 const char *buf
, size_t count
)
386 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
392 ret
= sscanf(buf
, "%u", &input
);
399 if (input
== od_tuners
->ignore_nice_load
) { /* nothing to do */
402 od_tuners
->ignore_nice_load
= input
;
404 /* we need to re-evaluate prev_cpu_idle */
405 for_each_online_cpu(j
) {
406 struct od_cpu_dbs_info_s
*dbs_info
;
407 dbs_info
= &per_cpu(od_cpu_dbs_info
, j
);
408 dbs_info
->cdbs
.prev_cpu_idle
= get_cpu_idle_time(j
,
409 &dbs_info
->cdbs
.prev_cpu_wall
, od_tuners
->io_is_busy
);
410 if (od_tuners
->ignore_nice_load
)
411 dbs_info
->cdbs
.prev_cpu_nice
=
412 kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
418 static ssize_t
store_powersave_bias(struct dbs_data
*dbs_data
, const char *buf
,
421 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
424 ret
= sscanf(buf
, "%u", &input
);
432 od_tuners
->powersave_bias
= input
;
433 ondemand_powersave_bias_init();
437 show_store_one(od
, sampling_rate
);
438 show_store_one(od
, io_is_busy
);
439 show_store_one(od
, up_threshold
);
440 show_store_one(od
, sampling_down_factor
);
441 show_store_one(od
, ignore_nice_load
);
442 show_store_one(od
, powersave_bias
);
443 declare_show_sampling_rate_min(od
);
445 gov_sys_pol_attr_rw(sampling_rate
);
446 gov_sys_pol_attr_rw(io_is_busy
);
447 gov_sys_pol_attr_rw(up_threshold
);
448 gov_sys_pol_attr_rw(sampling_down_factor
);
449 gov_sys_pol_attr_rw(ignore_nice_load
);
450 gov_sys_pol_attr_rw(powersave_bias
);
451 gov_sys_pol_attr_ro(sampling_rate_min
);
453 static struct attribute
*dbs_attributes_gov_sys
[] = {
454 &sampling_rate_min_gov_sys
.attr
,
455 &sampling_rate_gov_sys
.attr
,
456 &up_threshold_gov_sys
.attr
,
457 &sampling_down_factor_gov_sys
.attr
,
458 &ignore_nice_load_gov_sys
.attr
,
459 &powersave_bias_gov_sys
.attr
,
460 &io_is_busy_gov_sys
.attr
,
464 static struct attribute_group od_attr_group_gov_sys
= {
465 .attrs
= dbs_attributes_gov_sys
,
469 static struct attribute
*dbs_attributes_gov_pol
[] = {
470 &sampling_rate_min_gov_pol
.attr
,
471 &sampling_rate_gov_pol
.attr
,
472 &up_threshold_gov_pol
.attr
,
473 &sampling_down_factor_gov_pol
.attr
,
474 &ignore_nice_load_gov_pol
.attr
,
475 &powersave_bias_gov_pol
.attr
,
476 &io_is_busy_gov_pol
.attr
,
480 static struct attribute_group od_attr_group_gov_pol
= {
481 .attrs
= dbs_attributes_gov_pol
,
485 /************************** sysfs end ************************/
487 static int od_init(struct dbs_data
*dbs_data
, bool notify
)
489 struct od_dbs_tuners
*tuners
;
493 tuners
= kzalloc(sizeof(*tuners
), GFP_KERNEL
);
495 pr_err("%s: kzalloc failed\n", __func__
);
500 idle_time
= get_cpu_idle_time_us(cpu
, NULL
);
502 if (idle_time
!= -1ULL) {
503 /* Idle micro accounting is supported. Use finer thresholds */
504 tuners
->up_threshold
= MICRO_FREQUENCY_UP_THRESHOLD
;
506 * In nohz/micro accounting case we set the minimum frequency
507 * not depending on HZ, but fixed (very low). The deferred
508 * timer might skip some samples if idle/sleeping as needed.
510 dbs_data
->min_sampling_rate
= MICRO_FREQUENCY_MIN_SAMPLE_RATE
;
512 tuners
->up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
;
514 /* For correct statistics, we need 10 ticks for each measure */
515 dbs_data
->min_sampling_rate
= MIN_SAMPLING_RATE_RATIO
*
516 jiffies_to_usecs(10);
519 tuners
->sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
;
520 tuners
->ignore_nice_load
= 0;
521 tuners
->powersave_bias
= default_powersave_bias
;
522 tuners
->io_is_busy
= should_io_be_busy();
524 dbs_data
->tuners
= tuners
;
528 static void od_exit(struct dbs_data
*dbs_data
, bool notify
)
530 kfree(dbs_data
->tuners
);
533 define_get_cpu_dbs_routines(od_cpu_dbs_info
);
535 static struct od_ops od_ops
= {
536 .powersave_bias_init_cpu
= ondemand_powersave_bias_init_cpu
,
537 .powersave_bias_target
= generic_powersave_bias_target
,
538 .freq_increase
= dbs_freq_increase
,
541 static struct common_dbs_data od_dbs_cdata
= {
542 .governor
= GOV_ONDEMAND
,
543 .attr_group_gov_sys
= &od_attr_group_gov_sys
,
544 .attr_group_gov_pol
= &od_attr_group_gov_pol
,
545 .get_cpu_cdbs
= get_cpu_cdbs
,
546 .get_cpu_dbs_info_s
= get_cpu_dbs_info_s
,
547 .gov_dbs_timer
= od_dbs_timer
,
548 .gov_check_cpu
= od_check_cpu
,
552 .mutex
= __MUTEX_INITIALIZER(od_dbs_cdata
.mutex
),
555 static int od_cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
558 return cpufreq_governor_dbs(policy
, &od_dbs_cdata
, event
);
561 static struct cpufreq_governor cpufreq_gov_ondemand
= {
563 .governor
= od_cpufreq_governor_dbs
,
564 .max_transition_latency
= TRANSITION_LATENCY_LIMIT
,
565 .owner
= THIS_MODULE
,
568 static void od_set_powersave_bias(unsigned int powersave_bias
)
570 struct cpufreq_policy
*policy
;
571 struct dbs_data
*dbs_data
;
572 struct od_dbs_tuners
*od_tuners
;
576 default_powersave_bias
= powersave_bias
;
577 cpumask_clear(&done
);
580 for_each_online_cpu(cpu
) {
581 struct cpu_common_dbs_info
*shared
;
583 if (cpumask_test_cpu(cpu
, &done
))
586 shared
= per_cpu(od_cpu_dbs_info
, cpu
).cdbs
.shared
;
590 policy
= shared
->policy
;
591 cpumask_or(&done
, &done
, policy
->cpus
);
593 if (policy
->governor
!= &cpufreq_gov_ondemand
)
596 dbs_data
= policy
->governor_data
;
597 od_tuners
= dbs_data
->tuners
;
598 od_tuners
->powersave_bias
= default_powersave_bias
;
603 void od_register_powersave_bias_handler(unsigned int (*f
)
604 (struct cpufreq_policy
*, unsigned int, unsigned int),
605 unsigned int powersave_bias
)
607 od_ops
.powersave_bias_target
= f
;
608 od_set_powersave_bias(powersave_bias
);
610 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler
);
612 void od_unregister_powersave_bias_handler(void)
614 od_ops
.powersave_bias_target
= generic_powersave_bias_target
;
615 od_set_powersave_bias(0);
617 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler
);
619 static int __init
cpufreq_gov_dbs_init(void)
621 return cpufreq_register_governor(&cpufreq_gov_ondemand
);
624 static void __exit
cpufreq_gov_dbs_exit(void)
626 cpufreq_unregister_governor(&cpufreq_gov_ondemand
);
629 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
630 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
631 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
632 "Low Latency Frequency Transition capable processors");
633 MODULE_LICENSE("GPL");
635 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
636 struct cpufreq_governor
*cpufreq_default_governor(void)
638 return &cpufreq_gov_ondemand
;
641 fs_initcall(cpufreq_gov_dbs_init
);
643 module_init(cpufreq_gov_dbs_init
);
645 module_exit(cpufreq_gov_dbs_exit
);