cpufreq: conservative: Fix relation when decreasing frequency
[deliverable/linux.git] / drivers / cpufreq / cpufreq_ondemand.c
CommitLineData
1da177e4
LT
1/*
2 * drivers/cpufreq/cpufreq_ondemand.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
4471a34f
VK
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
1da177e4 15#include <linux/cpufreq.h>
4471a34f
VK
16#include <linux/init.h>
17#include <linux/kernel.h>
1da177e4 18#include <linux/kernel_stat.h>
4471a34f
VK
19#include <linux/kobject.h>
20#include <linux/module.h>
3fc54d37 21#include <linux/mutex.h>
4471a34f 22#include <linux/percpu-defs.h>
4d5dcc42 23#include <linux/slab.h>
4471a34f 24#include <linux/sysfs.h>
80800913 25#include <linux/tick.h>
4471a34f 26#include <linux/types.h>
1da177e4 27
4471a34f 28#include "cpufreq_governor.h"
1da177e4 29
06eb09d1 30/* On-demand governor macros */
e9d95bf7 31#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
1da177e4 32#define DEF_FREQUENCY_UP_THRESHOLD (80)
3f78a9f7
DN
33#define DEF_SAMPLING_DOWN_FACTOR (1)
34#define MAX_SAMPLING_DOWN_FACTOR (100000)
80800913 35#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
36#define MICRO_FREQUENCY_UP_THRESHOLD (95)
cef9615a 37#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
c29f1403 38#define MIN_FREQUENCY_UP_THRESHOLD (11)
1da177e4
LT
39#define MAX_FREQUENCY_UP_THRESHOLD (100)
40
4471a34f 41static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
1da177e4 42
3e33ee9e
FB
43#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
44static struct cpufreq_governor cpufreq_gov_ondemand;
45#endif
46
4471a34f 47static void ondemand_powersave_bias_init_cpu(int cpu)
6b8fcd90 48{
4471a34f 49 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
6b8fcd90 50
4471a34f
VK
51 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
52 dbs_info->freq_lo = 0;
53}
6b8fcd90 54
4471a34f
VK
55/*
56 * Not all CPUs want IO time to be accounted as busy; this depends on how
57 * efficient idling at a higher frequency/voltage is.
58 * Pavel Machek says this is not so for various generations of AMD and old
59 * Intel systems.
06eb09d1 60 * Mike Chan (android.com) claims this is also not true for ARM.
4471a34f
VK
61 * Because of this, whitelist specific known (series) of CPUs by default, and
62 * leave all others up to the user.
63 */
64static int should_io_be_busy(void)
65{
66#if defined(CONFIG_X86)
67 /*
06eb09d1 68 * For Intel, Core 2 (model 15) and later have an efficient idle.
4471a34f
VK
69 */
70 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
71 boot_cpu_data.x86 == 6 &&
72 boot_cpu_data.x86_model >= 15)
73 return 1;
74#endif
75 return 0;
6b8fcd90
AV
76}
77
05ca0350
AS
78/*
79 * Find right freq to be set now with powersave_bias on.
80 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
81 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
82 */
b5ecf60f 83static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
4471a34f 84 unsigned int freq_next, unsigned int relation)
05ca0350
AS
85{
86 unsigned int freq_req, freq_reduc, freq_avg;
87 unsigned int freq_hi, freq_lo;
88 unsigned int index = 0;
89 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
4471a34f 90 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
245b2e70 91 policy->cpu);
4d5dcc42
VK
92 struct dbs_data *dbs_data = policy->governor_data;
93 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
05ca0350
AS
94
95 if (!dbs_info->freq_table) {
96 dbs_info->freq_lo = 0;
97 dbs_info->freq_lo_jiffies = 0;
98 return freq_next;
99 }
100
101 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
102 relation, &index);
103 freq_req = dbs_info->freq_table[index].frequency;
4d5dcc42 104 freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
05ca0350
AS
105 freq_avg = freq_req - freq_reduc;
106
107 /* Find freq bounds for freq_avg in freq_table */
108 index = 0;
109 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
110 CPUFREQ_RELATION_H, &index);
111 freq_lo = dbs_info->freq_table[index].frequency;
112 index = 0;
113 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
114 CPUFREQ_RELATION_L, &index);
115 freq_hi = dbs_info->freq_table[index].frequency;
116
117 /* Find out how long we have to be in hi and lo freqs */
118 if (freq_hi == freq_lo) {
119 dbs_info->freq_lo = 0;
120 dbs_info->freq_lo_jiffies = 0;
121 return freq_lo;
122 }
4d5dcc42 123 jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate);
05ca0350
AS
124 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
125 jiffies_hi += ((freq_hi - freq_lo) / 2);
126 jiffies_hi /= (freq_hi - freq_lo);
127 jiffies_lo = jiffies_total - jiffies_hi;
128 dbs_info->freq_lo = freq_lo;
129 dbs_info->freq_lo_jiffies = jiffies_lo;
130 dbs_info->freq_hi_jiffies = jiffies_hi;
131 return freq_hi;
132}
133
134static void ondemand_powersave_bias_init(void)
135{
136 int i;
137 for_each_online_cpu(i) {
5a75c828 138 ondemand_powersave_bias_init_cpu(i);
05ca0350
AS
139 }
140}
141
4471a34f
VK
142static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
143{
4d5dcc42
VK
144 struct dbs_data *dbs_data = p->governor_data;
145 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
146
147 if (od_tuners->powersave_bias)
4471a34f
VK
148 freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
149 else if (p->cur == p->max)
150 return;
0e625ac1 151
4d5dcc42 152 __cpufreq_driver_target(p, freq, od_tuners->powersave_bias ?
4471a34f
VK
153 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
154}
155
156/*
157 * Every sampling_rate, we check, if current idle time is less than 20%
06eb09d1
SK
158 * (default), then we try to increase frequency. Every sampling_rate, we look
159 * for the lowest frequency which can sustain the load while keeping idle time
4471a34f
VK
160 * over 30%. If such a frequency exist, we try to decrease to this frequency.
161 *
162 * Any frequency increase takes it to the maximum frequency. Frequency reduction
163 * happens at minimum steps of 5% (default) of current frequency
164 */
165static void od_check_cpu(int cpu, unsigned int load_freq)
1da177e4 166{
4471a34f
VK
167 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
168 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
4d5dcc42
VK
169 struct dbs_data *dbs_data = policy->governor_data;
170 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
4471a34f
VK
171
172 dbs_info->freq_lo = 0;
173
174 /* Check for frequency increase */
4d5dcc42 175 if (load_freq > od_tuners->up_threshold * policy->cur) {
4471a34f
VK
176 /* If switching to max speed, apply sampling_down_factor */
177 if (policy->cur < policy->max)
178 dbs_info->rate_mult =
4d5dcc42 179 od_tuners->sampling_down_factor;
4471a34f
VK
180 dbs_freq_increase(policy, policy->max);
181 return;
182 }
183
184 /* Check for frequency decrease */
185 /* if we cannot reduce the frequency anymore, break out early */
186 if (policy->cur == policy->min)
187 return;
188
189 /*
190 * The optimal frequency is the frequency that is the lowest that can
191 * support the current CPU usage without triggering the up policy. To be
192 * safe, we focus 10 points under the threshold.
193 */
4d5dcc42
VK
194 if (load_freq < od_tuners->adj_up_threshold
195 * policy->cur) {
4471a34f 196 unsigned int freq_next;
4d5dcc42 197 freq_next = load_freq / od_tuners->adj_up_threshold;
4471a34f
VK
198
199 /* No longer fully busy, reset rate_mult */
200 dbs_info->rate_mult = 1;
201
202 if (freq_next < policy->min)
203 freq_next = policy->min;
204
4d5dcc42 205 if (!od_tuners->powersave_bias) {
4471a34f
VK
206 __cpufreq_driver_target(policy, freq_next,
207 CPUFREQ_RELATION_L);
208 } else {
209 int freq = powersave_bias_target(policy, freq_next,
210 CPUFREQ_RELATION_L);
211 __cpufreq_driver_target(policy, freq,
212 CPUFREQ_RELATION_L);
213 }
214 }
1da177e4
LT
215}
216
4447266b 217static void od_dbs_timer(struct work_struct *work)
4471a34f 218{
4447266b
VK
219 struct od_cpu_dbs_info_s *dbs_info =
220 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
09dca5ae 221 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
4447266b
VK
222 struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
223 cpu);
4d5dcc42
VK
224 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
225 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
9d445920 226 int delay = 0, sample_type = core_dbs_info->sample_type;
031299b3 227 bool modify_all = true;
4447266b
VK
228
229 mutex_lock(&core_dbs_info->cdbs.timer_mutex);
031299b3
VK
230 if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
231 modify_all = false;
9d445920 232 goto max_delay;
031299b3 233 }
1da177e4 234
4471a34f 235 /* Common NORMAL_SAMPLE setup */
4447266b 236 core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
4471a34f 237 if (sample_type == OD_SUB_SAMPLE) {
4447266b 238 delay = core_dbs_info->freq_lo_jiffies;
9d445920
VK
239 __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
240 core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
4471a34f 241 } else {
9d445920 242 dbs_check_cpu(dbs_data, cpu);
4447266b 243 if (core_dbs_info->freq_lo) {
4471a34f 244 /* Setup timer for SUB_SAMPLE */
4447266b
VK
245 core_dbs_info->sample_type = OD_SUB_SAMPLE;
246 delay = core_dbs_info->freq_hi_jiffies;
4471a34f
VK
247 }
248 }
249
9d445920
VK
250max_delay:
251 if (!delay)
252 delay = delay_for_sampling_rate(od_tuners->sampling_rate
253 * core_dbs_info->rate_mult);
254
031299b3 255 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
4447266b 256 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
da53d61e
FB
257}
258
4471a34f 259/************************** sysfs interface ************************/
4d5dcc42 260static struct common_dbs_data od_dbs_cdata;
1da177e4 261
fd0ef7a0
MH
262/**
263 * update_sampling_rate - update sampling rate effective immediately if needed.
264 * @new_rate: new sampling rate
265 *
06eb09d1 266 * If new rate is smaller than the old, simply updating
4471a34f
VK
267 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
268 * original sampling_rate was 1 second and the requested new sampling rate is 10
269 * ms because the user needs immediate reaction from ondemand governor, but not
270 * sure if higher frequency will be required or not, then, the governor may
271 * change the sampling rate too late; up to 1 second later. Thus, if we are
272 * reducing the sampling rate, we need to make the new value effective
273 * immediately.
fd0ef7a0 274 */
4d5dcc42
VK
275static void update_sampling_rate(struct dbs_data *dbs_data,
276 unsigned int new_rate)
fd0ef7a0 277{
4d5dcc42 278 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
fd0ef7a0
MH
279 int cpu;
280
4d5dcc42
VK
281 od_tuners->sampling_rate = new_rate = max(new_rate,
282 dbs_data->min_sampling_rate);
fd0ef7a0
MH
283
284 for_each_online_cpu(cpu) {
285 struct cpufreq_policy *policy;
4471a34f 286 struct od_cpu_dbs_info_s *dbs_info;
fd0ef7a0
MH
287 unsigned long next_sampling, appointed_at;
288
289 policy = cpufreq_cpu_get(cpu);
290 if (!policy)
291 continue;
3e33ee9e
FB
292 if (policy->governor != &cpufreq_gov_ondemand) {
293 cpufreq_cpu_put(policy);
294 continue;
295 }
8ee2ec51 296 dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
fd0ef7a0
MH
297 cpufreq_cpu_put(policy);
298
4471a34f 299 mutex_lock(&dbs_info->cdbs.timer_mutex);
fd0ef7a0 300
4471a34f
VK
301 if (!delayed_work_pending(&dbs_info->cdbs.work)) {
302 mutex_unlock(&dbs_info->cdbs.timer_mutex);
fd0ef7a0
MH
303 continue;
304 }
305
4471a34f
VK
306 next_sampling = jiffies + usecs_to_jiffies(new_rate);
307 appointed_at = dbs_info->cdbs.work.timer.expires;
fd0ef7a0
MH
308
309 if (time_before(next_sampling, appointed_at)) {
310
4471a34f
VK
311 mutex_unlock(&dbs_info->cdbs.timer_mutex);
312 cancel_delayed_work_sync(&dbs_info->cdbs.work);
313 mutex_lock(&dbs_info->cdbs.timer_mutex);
fd0ef7a0 314
031299b3
VK
315 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
316 usecs_to_jiffies(new_rate), true);
fd0ef7a0
MH
317
318 }
4471a34f 319 mutex_unlock(&dbs_info->cdbs.timer_mutex);
fd0ef7a0
MH
320 }
321}
322
4d5dcc42
VK
323static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
324 size_t count)
1da177e4
LT
325{
326 unsigned int input;
327 int ret;
ffac80e9 328 ret = sscanf(buf, "%u", &input);
5a75c828 329 if (ret != 1)
330 return -EINVAL;
4d5dcc42
VK
331
332 update_sampling_rate(dbs_data, input);
1da177e4
LT
333 return count;
334}
335
4d5dcc42
VK
336static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
337 size_t count)
19379b11 338{
4d5dcc42 339 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
19379b11
AV
340 unsigned int input;
341 int ret;
342
343 ret = sscanf(buf, "%u", &input);
344 if (ret != 1)
345 return -EINVAL;
4d5dcc42 346 od_tuners->io_is_busy = !!input;
19379b11
AV
347 return count;
348}
349
4d5dcc42
VK
350static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
351 size_t count)
1da177e4 352{
4d5dcc42 353 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
1da177e4
LT
354 unsigned int input;
355 int ret;
ffac80e9 356 ret = sscanf(buf, "%u", &input);
1da177e4 357
32ee8c3e 358 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
c29f1403 359 input < MIN_FREQUENCY_UP_THRESHOLD) {
1da177e4
LT
360 return -EINVAL;
361 }
4bd4e428 362 /* Calculate the new adj_up_threshold */
4d5dcc42
VK
363 od_tuners->adj_up_threshold += input;
364 od_tuners->adj_up_threshold -= od_tuners->up_threshold;
4bd4e428 365
4d5dcc42 366 od_tuners->up_threshold = input;
1da177e4
LT
367 return count;
368}
369
4d5dcc42
VK
370static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
371 const char *buf, size_t count)
3f78a9f7 372{
4d5dcc42 373 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
3f78a9f7
DN
374 unsigned int input, j;
375 int ret;
376 ret = sscanf(buf, "%u", &input);
377
378 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
379 return -EINVAL;
4d5dcc42 380 od_tuners->sampling_down_factor = input;
3f78a9f7
DN
381
382 /* Reset down sampling multiplier in case it was active */
383 for_each_online_cpu(j) {
4471a34f
VK
384 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
385 j);
3f78a9f7
DN
386 dbs_info->rate_mult = 1;
387 }
3f78a9f7
DN
388 return count;
389}
390
4d5dcc42
VK
391static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
392 size_t count)
3d5ee9e5 393{
4d5dcc42 394 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
3d5ee9e5
DJ
395 unsigned int input;
396 int ret;
397
398 unsigned int j;
32ee8c3e 399
ffac80e9 400 ret = sscanf(buf, "%u", &input);
2b03f891 401 if (ret != 1)
3d5ee9e5
DJ
402 return -EINVAL;
403
2b03f891 404 if (input > 1)
3d5ee9e5 405 input = 1;
32ee8c3e 406
4d5dcc42 407 if (input == od_tuners->ignore_nice) { /* nothing to do */
3d5ee9e5
DJ
408 return count;
409 }
4d5dcc42 410 od_tuners->ignore_nice = input;
3d5ee9e5 411
ccb2fe20 412 /* we need to re-evaluate prev_cpu_idle */
dac1c1a5 413 for_each_online_cpu(j) {
4471a34f 414 struct od_cpu_dbs_info_s *dbs_info;
245b2e70 415 dbs_info = &per_cpu(od_cpu_dbs_info, j);
4471a34f
VK
416 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
417 &dbs_info->cdbs.prev_cpu_wall);
4d5dcc42 418 if (od_tuners->ignore_nice)
4471a34f
VK
419 dbs_info->cdbs.prev_cpu_nice =
420 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
1ca3abdb 421
3d5ee9e5 422 }
3d5ee9e5
DJ
423 return count;
424}
425
4d5dcc42
VK
426static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
427 size_t count)
05ca0350 428{
4d5dcc42 429 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
05ca0350
AS
430 unsigned int input;
431 int ret;
432 ret = sscanf(buf, "%u", &input);
433
434 if (ret != 1)
435 return -EINVAL;
436
437 if (input > 1000)
438 input = 1000;
439
4d5dcc42 440 od_tuners->powersave_bias = input;
05ca0350 441 ondemand_powersave_bias_init();
05ca0350
AS
442 return count;
443}
444
4d5dcc42
VK
445show_store_one(od, sampling_rate);
446show_store_one(od, io_is_busy);
447show_store_one(od, up_threshold);
448show_store_one(od, sampling_down_factor);
449show_store_one(od, ignore_nice);
450show_store_one(od, powersave_bias);
451declare_show_sampling_rate_min(od);
452
453gov_sys_pol_attr_rw(sampling_rate);
454gov_sys_pol_attr_rw(io_is_busy);
455gov_sys_pol_attr_rw(up_threshold);
456gov_sys_pol_attr_rw(sampling_down_factor);
457gov_sys_pol_attr_rw(ignore_nice);
458gov_sys_pol_attr_rw(powersave_bias);
459gov_sys_pol_attr_ro(sampling_rate_min);
460
461static struct attribute *dbs_attributes_gov_sys[] = {
462 &sampling_rate_min_gov_sys.attr,
463 &sampling_rate_gov_sys.attr,
464 &up_threshold_gov_sys.attr,
465 &sampling_down_factor_gov_sys.attr,
466 &ignore_nice_gov_sys.attr,
467 &powersave_bias_gov_sys.attr,
468 &io_is_busy_gov_sys.attr,
1da177e4
LT
469 NULL
470};
471
4d5dcc42
VK
472static struct attribute_group od_attr_group_gov_sys = {
473 .attrs = dbs_attributes_gov_sys,
474 .name = "ondemand",
475};
476
477static struct attribute *dbs_attributes_gov_pol[] = {
478 &sampling_rate_min_gov_pol.attr,
479 &sampling_rate_gov_pol.attr,
480 &up_threshold_gov_pol.attr,
481 &sampling_down_factor_gov_pol.attr,
482 &ignore_nice_gov_pol.attr,
483 &powersave_bias_gov_pol.attr,
484 &io_is_busy_gov_pol.attr,
485 NULL
486};
487
488static struct attribute_group od_attr_group_gov_pol = {
489 .attrs = dbs_attributes_gov_pol,
1da177e4
LT
490 .name = "ondemand",
491};
492
493/************************** sysfs end ************************/
494
4d5dcc42
VK
495static int od_init(struct dbs_data *dbs_data)
496{
497 struct od_dbs_tuners *tuners;
498 u64 idle_time;
499 int cpu;
500
501 tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL);
502 if (!tuners) {
503 pr_err("%s: kzalloc failed\n", __func__);
504 return -ENOMEM;
505 }
506
507 cpu = get_cpu();
508 idle_time = get_cpu_idle_time_us(cpu, NULL);
509 put_cpu();
510 if (idle_time != -1ULL) {
511 /* Idle micro accounting is supported. Use finer thresholds */
512 tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
513 tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
514 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
515 /*
516 * In nohz/micro accounting case we set the minimum frequency
517 * not depending on HZ, but fixed (very low). The deferred
518 * timer might skip some samples if idle/sleeping as needed.
519 */
520 dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
521 } else {
522 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
523 tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
524 DEF_FREQUENCY_DOWN_DIFFERENTIAL;
525
526 /* For correct statistics, we need 10 ticks for each measure */
527 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
528 jiffies_to_usecs(10);
529 }
530
531 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
532 tuners->ignore_nice = 0;
533 tuners->powersave_bias = 0;
534 tuners->io_is_busy = should_io_be_busy();
535
536 dbs_data->tuners = tuners;
537 pr_info("%s: tuners %p\n", __func__, tuners);
538 mutex_init(&dbs_data->mutex);
539 return 0;
540}
541
542static void od_exit(struct dbs_data *dbs_data)
543{
544 kfree(dbs_data->tuners);
545}
546
4471a34f 547define_get_cpu_dbs_routines(od_cpu_dbs_info);
6b8fcd90 548
4471a34f 549static struct od_ops od_ops = {
4471a34f
VK
550 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
551 .powersave_bias_target = powersave_bias_target,
552 .freq_increase = dbs_freq_increase,
553};
2f8a835c 554
4d5dcc42 555static struct common_dbs_data od_dbs_cdata = {
4471a34f 556 .governor = GOV_ONDEMAND,
4d5dcc42
VK
557 .attr_group_gov_sys = &od_attr_group_gov_sys,
558 .attr_group_gov_pol = &od_attr_group_gov_pol,
4471a34f
VK
559 .get_cpu_cdbs = get_cpu_cdbs,
560 .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
561 .gov_dbs_timer = od_dbs_timer,
562 .gov_check_cpu = od_check_cpu,
563 .gov_ops = &od_ops,
4d5dcc42
VK
564 .init = od_init,
565 .exit = od_exit,
4471a34f 566};
1da177e4 567
4471a34f
VK
568static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
569 unsigned int event)
1da177e4 570{
4d5dcc42 571 return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
1da177e4
LT
572}
573
4471a34f
VK
574#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
575static
19379b11 576#endif
4471a34f
VK
577struct cpufreq_governor cpufreq_gov_ondemand = {
578 .name = "ondemand",
579 .governor = od_cpufreq_governor_dbs,
580 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
581 .owner = THIS_MODULE,
582};
1da177e4 583
1da177e4
LT
584static int __init cpufreq_gov_dbs_init(void)
585{
57df5573 586 return cpufreq_register_governor(&cpufreq_gov_ondemand);
1da177e4
LT
587}
588
589static void __exit cpufreq_gov_dbs_exit(void)
590{
1c256245 591 cpufreq_unregister_governor(&cpufreq_gov_ondemand);
1da177e4
LT
592}
593
ffac80e9
VP
594MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
595MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
596MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
2b03f891 597 "Low Latency Frequency Transition capable processors");
ffac80e9 598MODULE_LICENSE("GPL");
1da177e4 599
6915719b
JW
600#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
601fs_initcall(cpufreq_gov_dbs_init);
602#else
1da177e4 603module_init(cpufreq_gov_dbs_init);
6915719b 604#endif
1da177e4 605module_exit(cpufreq_gov_dbs_exit);
This page took 0.615351 seconds and 5 git commands to generate.