Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
[deliverable/linux.git] / drivers / cpufreq / cpufreq_conservative.c
1 /*
2 * drivers/cpufreq/cpufreq_conservative.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #include <linux/slab.h>
15 #include "cpufreq_governor.h"
16
17 /* Conservative governor macros */
18 #define DEF_FREQUENCY_UP_THRESHOLD (80)
19 #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
20 #define DEF_FREQUENCY_STEP (5)
21 #define DEF_SAMPLING_DOWN_FACTOR (1)
22 #define MAX_SAMPLING_DOWN_FACTOR (10)
23
24 static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
25
26 static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
27 unsigned int event);
28
29 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
30 static
31 #endif
32 struct cpufreq_governor cpufreq_gov_conservative = {
33 .name = "conservative",
34 .governor = cs_cpufreq_governor_dbs,
35 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
36 .owner = THIS_MODULE,
37 };
38
39 static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
40 struct cpufreq_policy *policy)
41 {
42 unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100;
43
44 /* max freq cannot be less than 100. But who knows... */
45 if (unlikely(freq_target == 0))
46 freq_target = DEF_FREQUENCY_STEP;
47
48 return freq_target;
49 }
50
51 /*
52 * Every sampling_rate, we check, if current idle time is less than 20%
53 * (default), then we try to increase frequency. Every sampling_rate *
54 * sampling_down_factor, we check, if current idle time is more than 80%
55 * (default), then we try to decrease frequency
56 *
57 * Any frequency increase takes it to the maximum frequency. Frequency reduction
58 * happens at minimum steps of 5% (default) of maximum frequency
59 */
60 static void cs_check_cpu(int cpu, unsigned int load)
61 {
62 struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
63 struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
64 struct dbs_data *dbs_data = policy->governor_data;
65 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
66
67 /*
68 * break out if we 'cannot' reduce the speed as the user might
69 * want freq_step to be zero
70 */
71 if (cs_tuners->freq_step == 0)
72 return;
73
74 /* Check for frequency increase */
75 if (load > cs_tuners->up_threshold) {
76 dbs_info->down_skip = 0;
77
78 /* if we are already at full speed then break out early */
79 if (dbs_info->requested_freq == policy->max)
80 return;
81
82 dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
83
84 if (dbs_info->requested_freq > policy->max)
85 dbs_info->requested_freq = policy->max;
86
87 __cpufreq_driver_target(policy, dbs_info->requested_freq,
88 CPUFREQ_RELATION_H);
89 return;
90 }
91
92 /* if sampling_down_factor is active break out early */
93 if (++dbs_info->down_skip < cs_tuners->sampling_down_factor)
94 return;
95 dbs_info->down_skip = 0;
96
97 /* Check for frequency decrease */
98 if (load < cs_tuners->down_threshold) {
99 unsigned int freq_target;
100 /*
101 * if we cannot reduce the frequency anymore, break out early
102 */
103 if (policy->cur == policy->min)
104 return;
105
106 freq_target = get_freq_target(cs_tuners, policy);
107 if (dbs_info->requested_freq > freq_target)
108 dbs_info->requested_freq -= freq_target;
109 else
110 dbs_info->requested_freq = policy->min;
111
112 __cpufreq_driver_target(policy, dbs_info->requested_freq,
113 CPUFREQ_RELATION_L);
114 return;
115 }
116 }
117
118 static unsigned int cs_dbs_timer(struct cpufreq_policy *policy, bool modify_all)
119 {
120 struct dbs_data *dbs_data = policy->governor_data;
121 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
122
123 if (modify_all)
124 dbs_check_cpu(dbs_data, policy->cpu);
125
126 return delay_for_sampling_rate(cs_tuners->sampling_rate);
127 }
128
129 static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
130 void *data)
131 {
132 struct cpufreq_freqs *freq = data;
133 struct cs_cpu_dbs_info_s *dbs_info =
134 &per_cpu(cs_cpu_dbs_info, freq->cpu);
135 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
136
137 if (!policy)
138 return 0;
139
140 /* policy isn't governed by conservative governor */
141 if (policy->governor != &cpufreq_gov_conservative)
142 return 0;
143
144 /*
145 * we only care if our internally tracked freq moves outside the 'valid'
146 * ranges of frequency available to us otherwise we do not change it
147 */
148 if (dbs_info->requested_freq > policy->max
149 || dbs_info->requested_freq < policy->min)
150 dbs_info->requested_freq = freq->new;
151
152 return 0;
153 }
154
155 static struct notifier_block cs_cpufreq_notifier_block = {
156 .notifier_call = dbs_cpufreq_notifier,
157 };
158
159 /************************** sysfs interface ************************/
160 static struct common_dbs_data cs_dbs_cdata;
161
162 static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
163 const char *buf, size_t count)
164 {
165 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
166 unsigned int input;
167 int ret;
168 ret = sscanf(buf, "%u", &input);
169
170 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
171 return -EINVAL;
172
173 cs_tuners->sampling_down_factor = input;
174 return count;
175 }
176
177 static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
178 size_t count)
179 {
180 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
181 unsigned int input;
182 int ret;
183 ret = sscanf(buf, "%u", &input);
184
185 if (ret != 1)
186 return -EINVAL;
187
188 cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
189 return count;
190 }
191
192 static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
193 size_t count)
194 {
195 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
196 unsigned int input;
197 int ret;
198 ret = sscanf(buf, "%u", &input);
199
200 if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
201 return -EINVAL;
202
203 cs_tuners->up_threshold = input;
204 return count;
205 }
206
207 static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
208 size_t count)
209 {
210 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
211 unsigned int input;
212 int ret;
213 ret = sscanf(buf, "%u", &input);
214
215 /* cannot be lower than 11 otherwise freq will not fall */
216 if (ret != 1 || input < 11 || input > 100 ||
217 input >= cs_tuners->up_threshold)
218 return -EINVAL;
219
220 cs_tuners->down_threshold = input;
221 return count;
222 }
223
224 static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
225 const char *buf, size_t count)
226 {
227 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
228 unsigned int input, j;
229 int ret;
230
231 ret = sscanf(buf, "%u", &input);
232 if (ret != 1)
233 return -EINVAL;
234
235 if (input > 1)
236 input = 1;
237
238 if (input == cs_tuners->ignore_nice_load) /* nothing to do */
239 return count;
240
241 cs_tuners->ignore_nice_load = input;
242
243 /* we need to re-evaluate prev_cpu_idle */
244 for_each_online_cpu(j) {
245 struct cs_cpu_dbs_info_s *dbs_info;
246 dbs_info = &per_cpu(cs_cpu_dbs_info, j);
247 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
248 &dbs_info->cdbs.prev_cpu_wall, 0);
249 if (cs_tuners->ignore_nice_load)
250 dbs_info->cdbs.prev_cpu_nice =
251 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
252 }
253 return count;
254 }
255
256 static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
257 size_t count)
258 {
259 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
260 unsigned int input;
261 int ret;
262 ret = sscanf(buf, "%u", &input);
263
264 if (ret != 1)
265 return -EINVAL;
266
267 if (input > 100)
268 input = 100;
269
270 /*
271 * no need to test here if freq_step is zero as the user might actually
272 * want this, they would be crazy though :)
273 */
274 cs_tuners->freq_step = input;
275 return count;
276 }
277
278 show_store_one(cs, sampling_rate);
279 show_store_one(cs, sampling_down_factor);
280 show_store_one(cs, up_threshold);
281 show_store_one(cs, down_threshold);
282 show_store_one(cs, ignore_nice_load);
283 show_store_one(cs, freq_step);
284 declare_show_sampling_rate_min(cs);
285
286 gov_sys_pol_attr_rw(sampling_rate);
287 gov_sys_pol_attr_rw(sampling_down_factor);
288 gov_sys_pol_attr_rw(up_threshold);
289 gov_sys_pol_attr_rw(down_threshold);
290 gov_sys_pol_attr_rw(ignore_nice_load);
291 gov_sys_pol_attr_rw(freq_step);
292 gov_sys_pol_attr_ro(sampling_rate_min);
293
294 static struct attribute *dbs_attributes_gov_sys[] = {
295 &sampling_rate_min_gov_sys.attr,
296 &sampling_rate_gov_sys.attr,
297 &sampling_down_factor_gov_sys.attr,
298 &up_threshold_gov_sys.attr,
299 &down_threshold_gov_sys.attr,
300 &ignore_nice_load_gov_sys.attr,
301 &freq_step_gov_sys.attr,
302 NULL
303 };
304
305 static struct attribute_group cs_attr_group_gov_sys = {
306 .attrs = dbs_attributes_gov_sys,
307 .name = "conservative",
308 };
309
310 static struct attribute *dbs_attributes_gov_pol[] = {
311 &sampling_rate_min_gov_pol.attr,
312 &sampling_rate_gov_pol.attr,
313 &sampling_down_factor_gov_pol.attr,
314 &up_threshold_gov_pol.attr,
315 &down_threshold_gov_pol.attr,
316 &ignore_nice_load_gov_pol.attr,
317 &freq_step_gov_pol.attr,
318 NULL
319 };
320
321 static struct attribute_group cs_attr_group_gov_pol = {
322 .attrs = dbs_attributes_gov_pol,
323 .name = "conservative",
324 };
325
326 /************************** sysfs end ************************/
327
328 static int cs_init(struct dbs_data *dbs_data, bool notify)
329 {
330 struct cs_dbs_tuners *tuners;
331
332 tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
333 if (!tuners) {
334 pr_err("%s: kzalloc failed\n", __func__);
335 return -ENOMEM;
336 }
337
338 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
339 tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
340 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
341 tuners->ignore_nice_load = 0;
342 tuners->freq_step = DEF_FREQUENCY_STEP;
343
344 dbs_data->tuners = tuners;
345 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
346 jiffies_to_usecs(10);
347
348 if (notify)
349 cpufreq_register_notifier(&cs_cpufreq_notifier_block,
350 CPUFREQ_TRANSITION_NOTIFIER);
351
352 return 0;
353 }
354
355 static void cs_exit(struct dbs_data *dbs_data, bool notify)
356 {
357 if (notify)
358 cpufreq_unregister_notifier(&cs_cpufreq_notifier_block,
359 CPUFREQ_TRANSITION_NOTIFIER);
360
361 kfree(dbs_data->tuners);
362 }
363
364 define_get_cpu_dbs_routines(cs_cpu_dbs_info);
365
366 static struct common_dbs_data cs_dbs_cdata = {
367 .governor = GOV_CONSERVATIVE,
368 .attr_group_gov_sys = &cs_attr_group_gov_sys,
369 .attr_group_gov_pol = &cs_attr_group_gov_pol,
370 .get_cpu_cdbs = get_cpu_cdbs,
371 .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
372 .gov_dbs_timer = cs_dbs_timer,
373 .gov_check_cpu = cs_check_cpu,
374 .init = cs_init,
375 .exit = cs_exit,
376 .mutex = __MUTEX_INITIALIZER(cs_dbs_cdata.mutex),
377 };
378
379 static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
380 unsigned int event)
381 {
382 return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
383 }
384
385 static int __init cpufreq_gov_dbs_init(void)
386 {
387 return cpufreq_register_governor(&cpufreq_gov_conservative);
388 }
389
390 static void __exit cpufreq_gov_dbs_exit(void)
391 {
392 cpufreq_unregister_governor(&cpufreq_gov_conservative);
393 }
394
395 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
396 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
397 "Low Latency Frequency Transition capable processors "
398 "optimised for use in a battery environment");
399 MODULE_LICENSE("GPL");
400
401 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
402 fs_initcall(cpufreq_gov_dbs_init);
403 #else
404 module_init(cpufreq_gov_dbs_init);
405 #endif
406 module_exit(cpufreq_gov_dbs_exit);
This page took 0.038346 seconds and 6 git commands to generate.