2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/cpu.h>
16 #include <linux/percpu-defs.h>
17 #include <linux/slab.h>
18 #include <linux/tick.h>
19 #include "cpufreq_governor.h"
21 /* On-demand governor macros */
22 #define DEF_FREQUENCY_UP_THRESHOLD (80)
23 #define DEF_SAMPLING_DOWN_FACTOR (1)
24 #define MAX_SAMPLING_DOWN_FACTOR (100000)
25 #define MICRO_FREQUENCY_UP_THRESHOLD (95)
26 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
27 #define MIN_FREQUENCY_UP_THRESHOLD (11)
28 #define MAX_FREQUENCY_UP_THRESHOLD (100)
30 static DEFINE_PER_CPU(struct od_cpu_dbs_info_s
, od_cpu_dbs_info
);
32 static struct dbs_governor od_dbs_gov
;
33 static struct od_ops od_ops
;
35 static unsigned int default_powersave_bias
;
38 * Not all CPUs want IO time to be accounted as busy; this depends on how
39 * efficient idling at a higher frequency/voltage is.
40 * Pavel Machek says this is not so for various generations of AMD and old
42 * Mike Chan (android.com) claims this is also not true for ARM.
43 * Because of this, whitelist specific known (series) of CPUs by default, and
44 * leave all others up to the user.
46 static int should_io_be_busy(void)
48 #if defined(CONFIG_X86)
50 * For Intel, Core 2 (model 15) and later have an efficient idle.
52 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
53 boot_cpu_data
.x86
== 6 &&
54 boot_cpu_data
.x86_model
>= 15)
61 * Find right freq to be set now with powersave_bias on.
62 * Returns the freq_hi to be used right now and will set freq_hi_delay_us,
63 * freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
65 static unsigned int generic_powersave_bias_target(struct cpufreq_policy
*policy
,
66 unsigned int freq_next
, unsigned int relation
)
68 unsigned int freq_req
, freq_reduc
, freq_avg
;
69 unsigned int freq_hi
, freq_lo
;
70 unsigned int index
= 0;
71 unsigned int delay_hi_us
;
72 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
74 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
75 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
76 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
78 if (!dbs_info
->freq_table
) {
79 dbs_info
->freq_lo
= 0;
80 dbs_info
->freq_lo_delay_us
= 0;
84 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_next
,
86 freq_req
= dbs_info
->freq_table
[index
].frequency
;
87 freq_reduc
= freq_req
* od_tuners
->powersave_bias
/ 1000;
88 freq_avg
= freq_req
- freq_reduc
;
90 /* Find freq bounds for freq_avg in freq_table */
92 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
93 CPUFREQ_RELATION_H
, &index
);
94 freq_lo
= dbs_info
->freq_table
[index
].frequency
;
96 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
97 CPUFREQ_RELATION_L
, &index
);
98 freq_hi
= dbs_info
->freq_table
[index
].frequency
;
100 /* Find out how long we have to be in hi and lo freqs */
101 if (freq_hi
== freq_lo
) {
102 dbs_info
->freq_lo
= 0;
103 dbs_info
->freq_lo_delay_us
= 0;
106 delay_hi_us
= (freq_avg
- freq_lo
) * dbs_data
->sampling_rate
;
107 delay_hi_us
+= (freq_hi
- freq_lo
) / 2;
108 delay_hi_us
/= freq_hi
- freq_lo
;
109 dbs_info
->freq_hi_delay_us
= delay_hi_us
;
110 dbs_info
->freq_lo
= freq_lo
;
111 dbs_info
->freq_lo_delay_us
= dbs_data
->sampling_rate
- delay_hi_us
;
115 static void ondemand_powersave_bias_init(struct cpufreq_policy
*policy
)
117 unsigned int cpu
= policy
->cpu
;
118 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
120 dbs_info
->freq_table
= cpufreq_frequency_get_table(cpu
);
121 dbs_info
->freq_lo
= 0;
124 static void dbs_freq_increase(struct cpufreq_policy
*policy
, unsigned int freq
)
126 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
127 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
128 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
130 if (od_tuners
->powersave_bias
)
131 freq
= od_ops
.powersave_bias_target(policy
, freq
,
133 else if (policy
->cur
== policy
->max
)
136 __cpufreq_driver_target(policy
, freq
, od_tuners
->powersave_bias
?
137 CPUFREQ_RELATION_L
: CPUFREQ_RELATION_H
);
141 * Every sampling_rate, we check, if current idle time is less than 20%
142 * (default), then we try to increase frequency. Else, we adjust the frequency
143 * proportional to load.
145 static void od_update(struct cpufreq_policy
*policy
)
147 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, policy
->cpu
);
148 struct policy_dbs_info
*policy_dbs
= dbs_info
->cdbs
.policy_dbs
;
149 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
150 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
151 unsigned int load
= dbs_update(policy
);
153 dbs_info
->freq_lo
= 0;
155 /* Check for frequency increase */
156 if (load
> dbs_data
->up_threshold
) {
157 /* If switching to max speed, apply sampling_down_factor */
158 if (policy
->cur
< policy
->max
)
159 policy_dbs
->rate_mult
= dbs_data
->sampling_down_factor
;
160 dbs_freq_increase(policy
, policy
->max
);
162 /* Calculate the next frequency proportional to load */
163 unsigned int freq_next
, min_f
, max_f
;
165 min_f
= policy
->cpuinfo
.min_freq
;
166 max_f
= policy
->cpuinfo
.max_freq
;
167 freq_next
= min_f
+ load
* (max_f
- min_f
) / 100;
169 /* No longer fully busy, reset rate_mult */
170 policy_dbs
->rate_mult
= 1;
172 if (od_tuners
->powersave_bias
)
173 freq_next
= od_ops
.powersave_bias_target(policy
,
177 __cpufreq_driver_target(policy
, freq_next
, CPUFREQ_RELATION_C
);
181 static unsigned int od_dbs_timer(struct cpufreq_policy
*policy
)
183 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
184 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
185 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, policy
->cpu
);
186 int sample_type
= dbs_info
->sample_type
;
188 /* Common NORMAL_SAMPLE setup */
189 dbs_info
->sample_type
= OD_NORMAL_SAMPLE
;
191 * OD_SUB_SAMPLE doesn't make sense if sample_delay_ns is 0, so ignore
194 if (sample_type
== OD_SUB_SAMPLE
&& policy_dbs
->sample_delay_ns
> 0) {
195 __cpufreq_driver_target(policy
, dbs_info
->freq_lo
,
197 return dbs_info
->freq_lo_delay_us
;
202 if (dbs_info
->freq_lo
) {
203 /* Setup timer for SUB_SAMPLE */
204 dbs_info
->sample_type
= OD_SUB_SAMPLE
;
205 return dbs_info
->freq_hi_delay_us
;
208 return dbs_data
->sampling_rate
* policy_dbs
->rate_mult
;
211 /************************** sysfs interface ************************/
212 static struct dbs_governor od_dbs_gov
;
214 static ssize_t
store_io_is_busy(struct dbs_data
*dbs_data
, const char *buf
,
220 ret
= sscanf(buf
, "%u", &input
);
223 dbs_data
->io_is_busy
= !!input
;
225 /* we need to re-evaluate prev_cpu_idle */
226 gov_update_cpu_data(&od_dbs_gov
, dbs_data
);
231 static ssize_t
store_up_threshold(struct dbs_data
*dbs_data
, const char *buf
,
236 ret
= sscanf(buf
, "%u", &input
);
238 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
||
239 input
< MIN_FREQUENCY_UP_THRESHOLD
) {
243 dbs_data
->up_threshold
= input
;
247 static ssize_t
store_sampling_down_factor(struct dbs_data
*dbs_data
,
248 const char *buf
, size_t count
)
250 struct policy_dbs_info
*policy_dbs
;
253 ret
= sscanf(buf
, "%u", &input
);
255 if (ret
!= 1 || input
> MAX_SAMPLING_DOWN_FACTOR
|| input
< 1)
258 dbs_data
->sampling_down_factor
= input
;
260 /* Reset down sampling multiplier in case it was active */
261 list_for_each_entry(policy_dbs
, &dbs_data
->policy_dbs_list
, list
) {
263 * Doing this without locking might lead to using different
264 * rate_mult values in od_update() and od_dbs_timer().
266 mutex_lock(&policy_dbs
->timer_mutex
);
267 policy_dbs
->rate_mult
= 1;
268 mutex_unlock(&policy_dbs
->timer_mutex
);
274 static ssize_t
store_ignore_nice_load(struct dbs_data
*dbs_data
,
275 const char *buf
, size_t count
)
280 ret
= sscanf(buf
, "%u", &input
);
287 if (input
== dbs_data
->ignore_nice_load
) { /* nothing to do */
290 dbs_data
->ignore_nice_load
= input
;
292 /* we need to re-evaluate prev_cpu_idle */
293 gov_update_cpu_data(&od_dbs_gov
, dbs_data
);
298 static ssize_t
store_powersave_bias(struct dbs_data
*dbs_data
, const char *buf
,
301 struct od_dbs_tuners
*od_tuners
= dbs_data
->tuners
;
302 struct policy_dbs_info
*policy_dbs
;
305 ret
= sscanf(buf
, "%u", &input
);
313 od_tuners
->powersave_bias
= input
;
315 list_for_each_entry(policy_dbs
, &dbs_data
->policy_dbs_list
, list
)
316 ondemand_powersave_bias_init(policy_dbs
->policy
);
321 gov_show_one_common(sampling_rate
);
322 gov_show_one_common(up_threshold
);
323 gov_show_one_common(sampling_down_factor
);
324 gov_show_one_common(ignore_nice_load
);
325 gov_show_one_common(min_sampling_rate
);
326 gov_show_one_common(io_is_busy
);
327 gov_show_one(od
, powersave_bias
);
329 gov_attr_rw(sampling_rate
);
330 gov_attr_rw(io_is_busy
);
331 gov_attr_rw(up_threshold
);
332 gov_attr_rw(sampling_down_factor
);
333 gov_attr_rw(ignore_nice_load
);
334 gov_attr_rw(powersave_bias
);
335 gov_attr_ro(min_sampling_rate
);
337 static struct attribute
*od_attributes
[] = {
338 &min_sampling_rate
.attr
,
341 &sampling_down_factor
.attr
,
342 &ignore_nice_load
.attr
,
343 &powersave_bias
.attr
,
348 /************************** sysfs end ************************/
350 static int od_init(struct dbs_data
*dbs_data
, bool notify
)
352 struct od_dbs_tuners
*tuners
;
356 tuners
= kzalloc(sizeof(*tuners
), GFP_KERNEL
);
358 pr_err("%s: kzalloc failed\n", __func__
);
363 idle_time
= get_cpu_idle_time_us(cpu
, NULL
);
365 if (idle_time
!= -1ULL) {
366 /* Idle micro accounting is supported. Use finer thresholds */
367 dbs_data
->up_threshold
= MICRO_FREQUENCY_UP_THRESHOLD
;
369 * In nohz/micro accounting case we set the minimum frequency
370 * not depending on HZ, but fixed (very low). The deferred
371 * timer might skip some samples if idle/sleeping as needed.
373 dbs_data
->min_sampling_rate
= MICRO_FREQUENCY_MIN_SAMPLE_RATE
;
375 dbs_data
->up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
;
377 /* For correct statistics, we need 10 ticks for each measure */
378 dbs_data
->min_sampling_rate
= MIN_SAMPLING_RATE_RATIO
*
379 jiffies_to_usecs(10);
382 dbs_data
->sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
;
383 dbs_data
->ignore_nice_load
= 0;
384 tuners
->powersave_bias
= default_powersave_bias
;
385 dbs_data
->io_is_busy
= should_io_be_busy();
387 dbs_data
->tuners
= tuners
;
391 static void od_exit(struct dbs_data
*dbs_data
, bool notify
)
393 kfree(dbs_data
->tuners
);
396 static void od_start(struct cpufreq_policy
*policy
)
398 struct od_cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, policy
->cpu
);
400 dbs_info
->sample_type
= OD_NORMAL_SAMPLE
;
401 ondemand_powersave_bias_init(policy
);
404 define_get_cpu_dbs_routines(od_cpu_dbs_info
);
406 static struct od_ops od_ops
= {
407 .powersave_bias_target
= generic_powersave_bias_target
,
410 static struct dbs_governor od_dbs_gov
= {
413 .governor
= cpufreq_governor_dbs
,
414 .max_transition_latency
= TRANSITION_LATENCY_LIMIT
,
415 .owner
= THIS_MODULE
,
417 .kobj_type
= { .default_attrs
= od_attributes
},
418 .get_cpu_cdbs
= get_cpu_cdbs
,
419 .gov_dbs_timer
= od_dbs_timer
,
425 #define CPU_FREQ_GOV_ONDEMAND (&od_dbs_gov.gov)
427 static void od_set_powersave_bias(unsigned int powersave_bias
)
429 struct cpufreq_policy
*policy
;
430 struct dbs_data
*dbs_data
;
431 struct od_dbs_tuners
*od_tuners
;
435 default_powersave_bias
= powersave_bias
;
436 cpumask_clear(&done
);
439 for_each_online_cpu(cpu
) {
440 struct policy_dbs_info
*policy_dbs
;
442 if (cpumask_test_cpu(cpu
, &done
))
445 policy_dbs
= per_cpu(od_cpu_dbs_info
, cpu
).cdbs
.policy_dbs
;
449 policy
= policy_dbs
->policy
;
450 cpumask_or(&done
, &done
, policy
->cpus
);
452 if (policy
->governor
!= CPU_FREQ_GOV_ONDEMAND
)
455 dbs_data
= policy_dbs
->dbs_data
;
456 od_tuners
= dbs_data
->tuners
;
457 od_tuners
->powersave_bias
= default_powersave_bias
;
462 void od_register_powersave_bias_handler(unsigned int (*f
)
463 (struct cpufreq_policy
*, unsigned int, unsigned int),
464 unsigned int powersave_bias
)
466 od_ops
.powersave_bias_target
= f
;
467 od_set_powersave_bias(powersave_bias
);
469 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler
);
471 void od_unregister_powersave_bias_handler(void)
473 od_ops
.powersave_bias_target
= generic_powersave_bias_target
;
474 od_set_powersave_bias(0);
476 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler
);
478 static int __init
cpufreq_gov_dbs_init(void)
480 return cpufreq_register_governor(CPU_FREQ_GOV_ONDEMAND
);
483 static void __exit
cpufreq_gov_dbs_exit(void)
485 cpufreq_unregister_governor(CPU_FREQ_GOV_ONDEMAND
);
488 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
489 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
490 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
491 "Low Latency Frequency Transition capable processors");
492 MODULE_LICENSE("GPL");
494 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
495 struct cpufreq_governor
*cpufreq_default_governor(void)
497 return CPU_FREQ_GOV_ONDEMAND
;
500 fs_initcall(cpufreq_gov_dbs_init
);
502 module_init(cpufreq_gov_dbs_init
);
504 module_exit(cpufreq_gov_dbs_exit
);