2 * drivers/cpufreq/cpufreq_conservative.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/slab.h>
15 #include "cpufreq_governor.h"
17 struct cs_policy_dbs_info
{
18 struct policy_dbs_info policy_dbs
;
19 unsigned int down_skip
;
20 unsigned int requested_freq
;
23 static inline struct cs_policy_dbs_info
*to_dbs_info(struct policy_dbs_info
*policy_dbs
)
25 return container_of(policy_dbs
, struct cs_policy_dbs_info
, policy_dbs
);
28 /* Conservative governor macros */
29 #define DEF_FREQUENCY_UP_THRESHOLD (80)
30 #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
31 #define DEF_FREQUENCY_STEP (5)
32 #define DEF_SAMPLING_DOWN_FACTOR (1)
33 #define MAX_SAMPLING_DOWN_FACTOR (10)
35 static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s
, cs_cpu_dbs_info
);
37 static struct dbs_governor cs_dbs_gov
;
39 static inline unsigned int get_freq_target(struct cs_dbs_tuners
*cs_tuners
,
40 struct cpufreq_policy
*policy
)
42 unsigned int freq_target
= (cs_tuners
->freq_step
* policy
->max
) / 100;
44 /* max freq cannot be less than 100. But who knows... */
45 if (unlikely(freq_target
== 0))
46 freq_target
= DEF_FREQUENCY_STEP
;
52 * Every sampling_rate, we check, if current idle time is less than 20%
53 * (default), then we try to increase frequency. Every sampling_rate *
54 * sampling_down_factor, we check, if current idle time is more than 80%
55 * (default), then we try to decrease frequency
57 * Any frequency increase takes it to the maximum frequency. Frequency reduction
58 * happens at minimum steps of 5% (default) of maximum frequency
60 static unsigned int cs_dbs_timer(struct cpufreq_policy
*policy
)
62 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
63 struct cs_policy_dbs_info
*dbs_info
= to_dbs_info(policy_dbs
);
64 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
65 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
66 unsigned int load
= dbs_update(policy
);
69 * break out if we 'cannot' reduce the speed as the user might
70 * want freq_step to be zero
72 if (cs_tuners
->freq_step
== 0)
75 /* Check for frequency increase */
76 if (load
> dbs_data
->up_threshold
) {
77 dbs_info
->down_skip
= 0;
79 /* if we are already at full speed then break out early */
80 if (dbs_info
->requested_freq
== policy
->max
)
83 dbs_info
->requested_freq
+= get_freq_target(cs_tuners
, policy
);
85 if (dbs_info
->requested_freq
> policy
->max
)
86 dbs_info
->requested_freq
= policy
->max
;
88 __cpufreq_driver_target(policy
, dbs_info
->requested_freq
,
93 /* if sampling_down_factor is active break out early */
94 if (++dbs_info
->down_skip
< dbs_data
->sampling_down_factor
)
96 dbs_info
->down_skip
= 0;
98 /* Check for frequency decrease */
99 if (load
< cs_tuners
->down_threshold
) {
100 unsigned int freq_target
;
102 * if we cannot reduce the frequency anymore, break out early
104 if (policy
->cur
== policy
->min
)
107 freq_target
= get_freq_target(cs_tuners
, policy
);
108 if (dbs_info
->requested_freq
> freq_target
)
109 dbs_info
->requested_freq
-= freq_target
;
111 dbs_info
->requested_freq
= policy
->min
;
113 __cpufreq_driver_target(policy
, dbs_info
->requested_freq
,
118 return dbs_data
->sampling_rate
;
121 static int dbs_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
,
124 static struct notifier_block cs_cpufreq_notifier_block
= {
125 .notifier_call
= dbs_cpufreq_notifier
,
128 /************************** sysfs interface ************************/
129 static struct dbs_governor cs_dbs_gov
;
131 static ssize_t
store_sampling_down_factor(struct dbs_data
*dbs_data
,
132 const char *buf
, size_t count
)
136 ret
= sscanf(buf
, "%u", &input
);
138 if (ret
!= 1 || input
> MAX_SAMPLING_DOWN_FACTOR
|| input
< 1)
141 dbs_data
->sampling_down_factor
= input
;
145 static ssize_t
store_up_threshold(struct dbs_data
*dbs_data
, const char *buf
,
148 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
151 ret
= sscanf(buf
, "%u", &input
);
153 if (ret
!= 1 || input
> 100 || input
<= cs_tuners
->down_threshold
)
156 dbs_data
->up_threshold
= input
;
160 static ssize_t
store_down_threshold(struct dbs_data
*dbs_data
, const char *buf
,
163 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
166 ret
= sscanf(buf
, "%u", &input
);
168 /* cannot be lower than 11 otherwise freq will not fall */
169 if (ret
!= 1 || input
< 11 || input
> 100 ||
170 input
>= dbs_data
->up_threshold
)
173 cs_tuners
->down_threshold
= input
;
177 static ssize_t
store_ignore_nice_load(struct dbs_data
*dbs_data
,
178 const char *buf
, size_t count
)
183 ret
= sscanf(buf
, "%u", &input
);
190 if (input
== dbs_data
->ignore_nice_load
) /* nothing to do */
193 dbs_data
->ignore_nice_load
= input
;
195 /* we need to re-evaluate prev_cpu_idle */
196 gov_update_cpu_data(&cs_dbs_gov
, dbs_data
);
201 static ssize_t
store_freq_step(struct dbs_data
*dbs_data
, const char *buf
,
204 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
207 ret
= sscanf(buf
, "%u", &input
);
216 * no need to test here if freq_step is zero as the user might actually
217 * want this, they would be crazy though :)
219 cs_tuners
->freq_step
= input
;
223 gov_show_one_common(sampling_rate
);
224 gov_show_one_common(sampling_down_factor
);
225 gov_show_one_common(up_threshold
);
226 gov_show_one_common(ignore_nice_load
);
227 gov_show_one_common(min_sampling_rate
);
228 gov_show_one(cs
, down_threshold
);
229 gov_show_one(cs
, freq_step
);
231 gov_attr_rw(sampling_rate
);
232 gov_attr_rw(sampling_down_factor
);
233 gov_attr_rw(up_threshold
);
234 gov_attr_rw(ignore_nice_load
);
235 gov_attr_ro(min_sampling_rate
);
236 gov_attr_rw(down_threshold
);
237 gov_attr_rw(freq_step
);
239 static struct attribute
*cs_attributes
[] = {
240 &min_sampling_rate
.attr
,
242 &sampling_down_factor
.attr
,
244 &down_threshold
.attr
,
245 &ignore_nice_load
.attr
,
250 /************************** sysfs end ************************/
252 static struct policy_dbs_info
*cs_alloc(void)
254 struct cs_policy_dbs_info
*dbs_info
;
256 dbs_info
= kzalloc(sizeof(*dbs_info
), GFP_KERNEL
);
257 return dbs_info
? &dbs_info
->policy_dbs
: NULL
;
260 static void cs_free(struct policy_dbs_info
*policy_dbs
)
262 kfree(to_dbs_info(policy_dbs
));
265 static int cs_init(struct dbs_data
*dbs_data
, bool notify
)
267 struct cs_dbs_tuners
*tuners
;
269 tuners
= kzalloc(sizeof(*tuners
), GFP_KERNEL
);
271 pr_err("%s: kzalloc failed\n", __func__
);
275 tuners
->down_threshold
= DEF_FREQUENCY_DOWN_THRESHOLD
;
276 tuners
->freq_step
= DEF_FREQUENCY_STEP
;
277 dbs_data
->up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
;
278 dbs_data
->sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
;
279 dbs_data
->ignore_nice_load
= 0;
281 dbs_data
->tuners
= tuners
;
282 dbs_data
->min_sampling_rate
= MIN_SAMPLING_RATE_RATIO
*
283 jiffies_to_usecs(10);
286 cpufreq_register_notifier(&cs_cpufreq_notifier_block
,
287 CPUFREQ_TRANSITION_NOTIFIER
);
292 static void cs_exit(struct dbs_data
*dbs_data
, bool notify
)
295 cpufreq_unregister_notifier(&cs_cpufreq_notifier_block
,
296 CPUFREQ_TRANSITION_NOTIFIER
);
298 kfree(dbs_data
->tuners
);
301 static void cs_start(struct cpufreq_policy
*policy
)
303 struct cs_policy_dbs_info
*dbs_info
= to_dbs_info(policy
->governor_data
);
305 dbs_info
->down_skip
= 0;
306 dbs_info
->requested_freq
= policy
->cur
;
309 define_get_cpu_dbs_routines(cs_cpu_dbs_info
);
311 static struct dbs_governor cs_dbs_gov
= {
313 .name
= "conservative",
314 .governor
= cpufreq_governor_dbs
,
315 .max_transition_latency
= TRANSITION_LATENCY_LIMIT
,
316 .owner
= THIS_MODULE
,
318 .kobj_type
= { .default_attrs
= cs_attributes
},
319 .get_cpu_cdbs
= get_cpu_cdbs
,
320 .gov_dbs_timer
= cs_dbs_timer
,
328 #define CPU_FREQ_GOV_CONSERVATIVE (&cs_dbs_gov.gov)
330 static int dbs_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
,
333 struct cpufreq_freqs
*freq
= data
;
334 struct cpufreq_policy
*policy
= cpufreq_cpu_get_raw(freq
->cpu
);
335 struct cs_policy_dbs_info
*dbs_info
;
340 /* policy isn't governed by conservative governor */
341 if (policy
->governor
!= CPU_FREQ_GOV_CONSERVATIVE
)
344 dbs_info
= to_dbs_info(policy
->governor_data
);
346 * we only care if our internally tracked freq moves outside the 'valid'
347 * ranges of frequency available to us otherwise we do not change it
349 if (dbs_info
->requested_freq
> policy
->max
350 || dbs_info
->requested_freq
< policy
->min
)
351 dbs_info
->requested_freq
= freq
->new;
356 static int __init
cpufreq_gov_dbs_init(void)
358 return cpufreq_register_governor(CPU_FREQ_GOV_CONSERVATIVE
);
361 static void __exit
cpufreq_gov_dbs_exit(void)
363 cpufreq_unregister_governor(CPU_FREQ_GOV_CONSERVATIVE
);
366 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
367 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
368 "Low Latency Frequency Transition capable processors "
369 "optimised for use in a battery environment");
370 MODULE_LICENSE("GPL");
372 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
373 struct cpufreq_governor
*cpufreq_default_governor(void)
375 return CPU_FREQ_GOV_CONSERVATIVE
;
378 fs_initcall(cpufreq_gov_dbs_init
);
380 module_init(cpufreq_gov_dbs_init
);
382 module_exit(cpufreq_gov_dbs_exit
);