2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/cpufreq.h>
17 #include <linux/cpu.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mutex.h>
21 #include <linux/hrtimer.h>
22 #include <linux/tick.h>
23 #include <linux/ktime.h>
24 #include <linux/sched.h>
27 * dbs is used in this file as a shortform for demandbased switching
28 * It helps to keep variable names smaller, simpler
31 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
32 #define DEF_FREQUENCY_UP_THRESHOLD (80)
33 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
34 #define MICRO_FREQUENCY_UP_THRESHOLD (95)
35 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
36 #define MIN_FREQUENCY_UP_THRESHOLD (11)
37 #define MAX_FREQUENCY_UP_THRESHOLD (100)
40 * The polling frequency of this governor depends on the capability of
41 * the processor. Default polling frequency is 1000 times the transition
42 * latency of the processor. The governor will work on any processor with
43 * transition latency <= 10mS, using appropriate sampling
45 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
46 * this governor will not work.
47 * All times here are in uS.
49 #define MIN_SAMPLING_RATE_RATIO (2)
51 static unsigned int min_sampling_rate
;
53 #define LATENCY_MULTIPLIER (1000)
54 #define MIN_LATENCY_MULTIPLIER (100)
55 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
57 static void do_dbs_timer(struct work_struct
*work
);
58 static int cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
61 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
64 struct cpufreq_governor cpufreq_gov_ondemand
= {
66 .governor
= cpufreq_governor_dbs
,
67 .max_transition_latency
= TRANSITION_LATENCY_LIMIT
,
72 enum {DBS_NORMAL_SAMPLE
, DBS_SUB_SAMPLE
};
74 struct cpu_dbs_info_s
{
75 cputime64_t prev_cpu_idle
;
76 cputime64_t prev_cpu_iowait
;
77 cputime64_t prev_cpu_wall
;
78 cputime64_t prev_cpu_nice
;
79 struct cpufreq_policy
*cur_policy
;
80 struct delayed_work work
;
81 struct cpufreq_frequency_table
*freq_table
;
83 unsigned int freq_lo_jiffies
;
84 unsigned int freq_hi_jiffies
;
86 unsigned int sample_type
:1;
88 * percpu mutex that serializes governor limit change with
89 * do_dbs_timer invocation. We do not want do_dbs_timer to run
90 * when user is changing the governor or limits.
92 struct mutex timer_mutex
;
94 static DEFINE_PER_CPU(struct cpu_dbs_info_s
, od_cpu_dbs_info
);
96 static unsigned int dbs_enable
; /* number of CPUs using this policy */
99 * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
100 * different CPUs. It protects dbs_enable in governor start/stop.
102 static DEFINE_MUTEX(dbs_mutex
);
104 static struct workqueue_struct
*kondemand_wq
;
106 static struct dbs_tuners
{
107 unsigned int sampling_rate
;
108 unsigned int up_threshold
;
109 unsigned int down_differential
;
110 unsigned int ignore_nice
;
111 unsigned int powersave_bias
;
112 unsigned int io_is_busy
;
114 .up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
,
115 .down_differential
= DEF_FREQUENCY_DOWN_DIFFERENTIAL
,
120 static inline cputime64_t
get_cpu_idle_time_jiffy(unsigned int cpu
,
123 cputime64_t idle_time
;
124 cputime64_t cur_wall_time
;
125 cputime64_t busy_time
;
127 cur_wall_time
= jiffies64_to_cputime64(get_jiffies_64());
128 busy_time
= cputime64_add(kstat_cpu(cpu
).cpustat
.user
,
129 kstat_cpu(cpu
).cpustat
.system
);
131 busy_time
= cputime64_add(busy_time
, kstat_cpu(cpu
).cpustat
.irq
);
132 busy_time
= cputime64_add(busy_time
, kstat_cpu(cpu
).cpustat
.softirq
);
133 busy_time
= cputime64_add(busy_time
, kstat_cpu(cpu
).cpustat
.steal
);
134 busy_time
= cputime64_add(busy_time
, kstat_cpu(cpu
).cpustat
.nice
);
136 idle_time
= cputime64_sub(cur_wall_time
, busy_time
);
138 *wall
= (cputime64_t
)jiffies_to_usecs(cur_wall_time
);
140 return (cputime64_t
)jiffies_to_usecs(idle_time
);
143 static inline cputime64_t
get_cpu_idle_time(unsigned int cpu
, cputime64_t
*wall
)
145 u64 idle_time
= get_cpu_idle_time_us(cpu
, wall
);
147 if (idle_time
== -1ULL)
148 return get_cpu_idle_time_jiffy(cpu
, wall
);
153 static inline cputime64_t
get_cpu_iowait_time(unsigned int cpu
, cputime64_t
*wall
)
155 u64 iowait_time
= get_cpu_iowait_time_us(cpu
, wall
);
157 if (iowait_time
== -1ULL)
164 * Find right freq to be set now with powersave_bias on.
165 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
166 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
168 static unsigned int powersave_bias_target(struct cpufreq_policy
*policy
,
169 unsigned int freq_next
,
170 unsigned int relation
)
172 unsigned int freq_req
, freq_reduc
, freq_avg
;
173 unsigned int freq_hi
, freq_lo
;
174 unsigned int index
= 0;
175 unsigned int jiffies_total
, jiffies_hi
, jiffies_lo
;
176 struct cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
,
179 if (!dbs_info
->freq_table
) {
180 dbs_info
->freq_lo
= 0;
181 dbs_info
->freq_lo_jiffies
= 0;
185 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_next
,
187 freq_req
= dbs_info
->freq_table
[index
].frequency
;
188 freq_reduc
= freq_req
* dbs_tuners_ins
.powersave_bias
/ 1000;
189 freq_avg
= freq_req
- freq_reduc
;
191 /* Find freq bounds for freq_avg in freq_table */
193 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
194 CPUFREQ_RELATION_H
, &index
);
195 freq_lo
= dbs_info
->freq_table
[index
].frequency
;
197 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
198 CPUFREQ_RELATION_L
, &index
);
199 freq_hi
= dbs_info
->freq_table
[index
].frequency
;
201 /* Find out how long we have to be in hi and lo freqs */
202 if (freq_hi
== freq_lo
) {
203 dbs_info
->freq_lo
= 0;
204 dbs_info
->freq_lo_jiffies
= 0;
207 jiffies_total
= usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
);
208 jiffies_hi
= (freq_avg
- freq_lo
) * jiffies_total
;
209 jiffies_hi
+= ((freq_hi
- freq_lo
) / 2);
210 jiffies_hi
/= (freq_hi
- freq_lo
);
211 jiffies_lo
= jiffies_total
- jiffies_hi
;
212 dbs_info
->freq_lo
= freq_lo
;
213 dbs_info
->freq_lo_jiffies
= jiffies_lo
;
214 dbs_info
->freq_hi_jiffies
= jiffies_hi
;
218 static void ondemand_powersave_bias_init_cpu(int cpu
)
220 struct cpu_dbs_info_s
*dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
221 dbs_info
->freq_table
= cpufreq_frequency_get_table(cpu
);
222 dbs_info
->freq_lo
= 0;
225 static void ondemand_powersave_bias_init(void)
228 for_each_online_cpu(i
) {
229 ondemand_powersave_bias_init_cpu(i
);
233 /************************** sysfs interface ************************/
235 static ssize_t
show_sampling_rate_max(struct kobject
*kobj
,
236 struct attribute
*attr
, char *buf
)
238 printk_once(KERN_INFO
"CPUFREQ: ondemand sampling_rate_max "
239 "sysfs file is deprecated - used by: %s\n", current
->comm
);
240 return sprintf(buf
, "%u\n", -1U);
243 static ssize_t
show_sampling_rate_min(struct kobject
*kobj
,
244 struct attribute
*attr
, char *buf
)
246 return sprintf(buf
, "%u\n", min_sampling_rate
);
249 #define define_one_ro(_name) \
250 static struct global_attr _name = \
251 __ATTR(_name, 0444, show_##_name, NULL)
253 define_one_ro(sampling_rate_max
);
254 define_one_ro(sampling_rate_min
);
256 /* cpufreq_ondemand Governor Tunables */
257 #define show_one(file_name, object) \
258 static ssize_t show_##file_name \
259 (struct kobject *kobj, struct attribute *attr, char *buf) \
261 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
263 show_one(sampling_rate
, sampling_rate
);
264 show_one(io_is_busy
, io_is_busy
);
265 show_one(up_threshold
, up_threshold
);
266 show_one(ignore_nice_load
, ignore_nice
);
267 show_one(powersave_bias
, powersave_bias
);
269 /*** delete after deprecation time ***/
271 #define DEPRECATION_MSG(file_name) \
272 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
273 "interface is deprecated - " #file_name "\n");
275 #define show_one_old(file_name) \
276 static ssize_t show_##file_name##_old \
277 (struct cpufreq_policy *unused, char *buf) \
279 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
280 "interface is deprecated - " #file_name "\n"); \
281 return show_##file_name(NULL, NULL, buf); \
283 show_one_old(sampling_rate
);
284 show_one_old(up_threshold
);
285 show_one_old(ignore_nice_load
);
286 show_one_old(powersave_bias
);
287 show_one_old(sampling_rate_min
);
288 show_one_old(sampling_rate_max
);
290 #define define_one_ro_old(object, _name) \
291 static struct freq_attr object = \
292 __ATTR(_name, 0444, show_##_name##_old, NULL)
294 define_one_ro_old(sampling_rate_min_old
, sampling_rate_min
);
295 define_one_ro_old(sampling_rate_max_old
, sampling_rate_max
);
297 /*** delete after deprecation time ***/
299 static ssize_t
store_sampling_rate(struct kobject
*a
, struct attribute
*b
,
300 const char *buf
, size_t count
)
304 ret
= sscanf(buf
, "%u", &input
);
308 mutex_lock(&dbs_mutex
);
309 dbs_tuners_ins
.sampling_rate
= max(input
, min_sampling_rate
);
310 mutex_unlock(&dbs_mutex
);
315 static ssize_t
store_io_is_busy(struct kobject
*a
, struct attribute
*b
,
316 const char *buf
, size_t count
)
321 ret
= sscanf(buf
, "%u", &input
);
325 mutex_lock(&dbs_mutex
);
326 dbs_tuners_ins
.io_is_busy
= !!input
;
327 mutex_unlock(&dbs_mutex
);
332 static ssize_t
store_up_threshold(struct kobject
*a
, struct attribute
*b
,
333 const char *buf
, size_t count
)
337 ret
= sscanf(buf
, "%u", &input
);
339 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
||
340 input
< MIN_FREQUENCY_UP_THRESHOLD
) {
344 mutex_lock(&dbs_mutex
);
345 dbs_tuners_ins
.up_threshold
= input
;
346 mutex_unlock(&dbs_mutex
);
351 static ssize_t
store_ignore_nice_load(struct kobject
*a
, struct attribute
*b
,
352 const char *buf
, size_t count
)
359 ret
= sscanf(buf
, "%u", &input
);
366 mutex_lock(&dbs_mutex
);
367 if (input
== dbs_tuners_ins
.ignore_nice
) { /* nothing to do */
368 mutex_unlock(&dbs_mutex
);
371 dbs_tuners_ins
.ignore_nice
= input
;
373 /* we need to re-evaluate prev_cpu_idle */
374 for_each_online_cpu(j
) {
375 struct cpu_dbs_info_s
*dbs_info
;
376 dbs_info
= &per_cpu(od_cpu_dbs_info
, j
);
377 dbs_info
->prev_cpu_idle
= get_cpu_idle_time(j
,
378 &dbs_info
->prev_cpu_wall
);
379 if (dbs_tuners_ins
.ignore_nice
)
380 dbs_info
->prev_cpu_nice
= kstat_cpu(j
).cpustat
.nice
;
383 mutex_unlock(&dbs_mutex
);
388 static ssize_t
store_powersave_bias(struct kobject
*a
, struct attribute
*b
,
389 const char *buf
, size_t count
)
393 ret
= sscanf(buf
, "%u", &input
);
401 mutex_lock(&dbs_mutex
);
402 dbs_tuners_ins
.powersave_bias
= input
;
403 ondemand_powersave_bias_init();
404 mutex_unlock(&dbs_mutex
);
409 #define define_one_rw(_name) \
410 static struct global_attr _name = \
411 __ATTR(_name, 0644, show_##_name, store_##_name)
413 define_one_rw(sampling_rate
);
414 define_one_rw(io_is_busy
);
415 define_one_rw(up_threshold
);
416 define_one_rw(ignore_nice_load
);
417 define_one_rw(powersave_bias
);
419 static struct attribute
*dbs_attributes
[] = {
420 &sampling_rate_max
.attr
,
421 &sampling_rate_min
.attr
,
424 &ignore_nice_load
.attr
,
425 &powersave_bias
.attr
,
430 static struct attribute_group dbs_attr_group
= {
431 .attrs
= dbs_attributes
,
435 /*** delete after deprecation time ***/
437 #define write_one_old(file_name) \
438 static ssize_t store_##file_name##_old \
439 (struct cpufreq_policy *unused, const char *buf, size_t count) \
441 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
442 "interface is deprecated - " #file_name "\n"); \
443 return store_##file_name(NULL, NULL, buf, count); \
445 write_one_old(sampling_rate
);
446 write_one_old(up_threshold
);
447 write_one_old(ignore_nice_load
);
448 write_one_old(powersave_bias
);
450 #define define_one_rw_old(object, _name) \
451 static struct freq_attr object = \
452 __ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
454 define_one_rw_old(sampling_rate_old
, sampling_rate
);
455 define_one_rw_old(up_threshold_old
, up_threshold
);
456 define_one_rw_old(ignore_nice_load_old
, ignore_nice_load
);
457 define_one_rw_old(powersave_bias_old
, powersave_bias
);
459 static struct attribute
*dbs_attributes_old
[] = {
460 &sampling_rate_max_old
.attr
,
461 &sampling_rate_min_old
.attr
,
462 &sampling_rate_old
.attr
,
463 &up_threshold_old
.attr
,
464 &ignore_nice_load_old
.attr
,
465 &powersave_bias_old
.attr
,
469 static struct attribute_group dbs_attr_group_old
= {
470 .attrs
= dbs_attributes_old
,
474 /*** delete after deprecation time ***/
476 /************************** sysfs end ************************/
478 static void dbs_check_cpu(struct cpu_dbs_info_s
*this_dbs_info
)
480 unsigned int max_load_freq
;
482 struct cpufreq_policy
*policy
;
485 this_dbs_info
->freq_lo
= 0;
486 policy
= this_dbs_info
->cur_policy
;
489 * Every sampling_rate, we check, if current idle time is less
490 * than 20% (default), then we try to increase frequency
491 * Every sampling_rate, we look for a the lowest
492 * frequency which can sustain the load while keeping idle time over
493 * 30%. If such a frequency exist, we try to decrease to this frequency.
495 * Any frequency increase takes it to the maximum frequency.
496 * Frequency reduction happens at minimum steps of
497 * 5% (default) of current frequency
500 /* Get Absolute Load - in terms of freq */
503 for_each_cpu(j
, policy
->cpus
) {
504 struct cpu_dbs_info_s
*j_dbs_info
;
505 cputime64_t cur_wall_time
, cur_idle_time
, cur_iowait_time
;
506 unsigned int idle_time
, wall_time
, iowait_time
;
507 unsigned int load
, load_freq
;
510 j_dbs_info
= &per_cpu(od_cpu_dbs_info
, j
);
512 cur_idle_time
= get_cpu_idle_time(j
, &cur_wall_time
);
513 cur_iowait_time
= get_cpu_iowait_time(j
, &cur_wall_time
);
515 wall_time
= (unsigned int) cputime64_sub(cur_wall_time
,
516 j_dbs_info
->prev_cpu_wall
);
517 j_dbs_info
->prev_cpu_wall
= cur_wall_time
;
519 idle_time
= (unsigned int) cputime64_sub(cur_idle_time
,
520 j_dbs_info
->prev_cpu_idle
);
521 j_dbs_info
->prev_cpu_idle
= cur_idle_time
;
523 iowait_time
= (unsigned int) cputime64_sub(cur_iowait_time
,
524 j_dbs_info
->prev_cpu_iowait
);
525 j_dbs_info
->prev_cpu_iowait
= cur_iowait_time
;
527 if (dbs_tuners_ins
.ignore_nice
) {
528 cputime64_t cur_nice
;
529 unsigned long cur_nice_jiffies
;
531 cur_nice
= cputime64_sub(kstat_cpu(j
).cpustat
.nice
,
532 j_dbs_info
->prev_cpu_nice
);
534 * Assumption: nice time between sampling periods will
535 * be less than 2^32 jiffies for 32 bit sys
537 cur_nice_jiffies
= (unsigned long)
538 cputime64_to_jiffies64(cur_nice
);
540 j_dbs_info
->prev_cpu_nice
= kstat_cpu(j
).cpustat
.nice
;
541 idle_time
+= jiffies_to_usecs(cur_nice_jiffies
);
545 * For the purpose of ondemand, waiting for disk IO is an
546 * indication that you're performance critical, and not that
547 * the system is actually idle. So subtract the iowait time
548 * from the cpu idle time.
551 if (dbs_tuners_ins
.io_is_busy
&& idle_time
>= iowait_time
)
552 idle_time
-= iowait_time
;
554 if (unlikely(!wall_time
|| wall_time
< idle_time
))
557 load
= 100 * (wall_time
- idle_time
) / wall_time
;
559 freq_avg
= __cpufreq_driver_getavg(policy
, j
);
561 freq_avg
= policy
->cur
;
563 load_freq
= load
* freq_avg
;
564 if (load_freq
> max_load_freq
)
565 max_load_freq
= load_freq
;
568 /* Check for frequency increase */
569 if (max_load_freq
> dbs_tuners_ins
.up_threshold
* policy
->cur
) {
570 /* if we are already at full speed then break out early */
571 if (!dbs_tuners_ins
.powersave_bias
) {
572 if (policy
->cur
== policy
->max
)
575 __cpufreq_driver_target(policy
, policy
->max
,
578 int freq
= powersave_bias_target(policy
, policy
->max
,
580 __cpufreq_driver_target(policy
, freq
,
586 /* Check for frequency decrease */
587 /* if we cannot reduce the frequency anymore, break out early */
588 if (policy
->cur
== policy
->min
)
592 * The optimal frequency is the frequency that is the lowest that
593 * can support the current CPU usage without triggering the up
594 * policy. To be safe, we focus 10 points under the threshold.
597 (dbs_tuners_ins
.up_threshold
- dbs_tuners_ins
.down_differential
) *
599 unsigned int freq_next
;
600 freq_next
= max_load_freq
/
601 (dbs_tuners_ins
.up_threshold
-
602 dbs_tuners_ins
.down_differential
);
604 if (freq_next
< policy
->min
)
605 freq_next
= policy
->min
;
607 if (!dbs_tuners_ins
.powersave_bias
) {
608 __cpufreq_driver_target(policy
, freq_next
,
611 int freq
= powersave_bias_target(policy
, freq_next
,
613 __cpufreq_driver_target(policy
, freq
,
619 static void do_dbs_timer(struct work_struct
*work
)
621 struct cpu_dbs_info_s
*dbs_info
=
622 container_of(work
, struct cpu_dbs_info_s
, work
.work
);
623 unsigned int cpu
= dbs_info
->cpu
;
624 int sample_type
= dbs_info
->sample_type
;
626 /* We want all CPUs to do sampling nearly on same jiffy */
627 int delay
= usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
);
629 delay
-= jiffies
% delay
;
630 mutex_lock(&dbs_info
->timer_mutex
);
632 /* Common NORMAL_SAMPLE setup */
633 dbs_info
->sample_type
= DBS_NORMAL_SAMPLE
;
634 if (!dbs_tuners_ins
.powersave_bias
||
635 sample_type
== DBS_NORMAL_SAMPLE
) {
636 dbs_check_cpu(dbs_info
);
637 if (dbs_info
->freq_lo
) {
638 /* Setup timer for SUB_SAMPLE */
639 dbs_info
->sample_type
= DBS_SUB_SAMPLE
;
640 delay
= dbs_info
->freq_hi_jiffies
;
643 __cpufreq_driver_target(dbs_info
->cur_policy
,
644 dbs_info
->freq_lo
, CPUFREQ_RELATION_H
);
646 queue_delayed_work_on(cpu
, kondemand_wq
, &dbs_info
->work
, delay
);
647 mutex_unlock(&dbs_info
->timer_mutex
);
650 static inline void dbs_timer_init(struct cpu_dbs_info_s
*dbs_info
)
652 /* We want all CPUs to do sampling nearly on same jiffy */
653 int delay
= usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
);
654 delay
-= jiffies
% delay
;
656 dbs_info
->sample_type
= DBS_NORMAL_SAMPLE
;
657 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info
->work
, do_dbs_timer
);
658 queue_delayed_work_on(dbs_info
->cpu
, kondemand_wq
, &dbs_info
->work
,
662 static inline void dbs_timer_exit(struct cpu_dbs_info_s
*dbs_info
)
664 cancel_delayed_work_sync(&dbs_info
->work
);
668 * Not all CPUs want IO time to be accounted as busy; this dependson how
669 * efficient idling at a higher frequency/voltage is.
670 * Pavel Machek says this is not so for various generations of AMD and old
672 * Mike Chan (androidlcom) calis this is also not true for ARM.
673 * Because of this, whitelist specific known (series) of CPUs by default, and
674 * leave all others up to the user.
676 static int should_io_be_busy(void)
678 #if defined(CONFIG_X86)
680 * For Intel, Core 2 (model 15) andl later have an efficient idle.
682 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
683 boot_cpu_data
.x86
== 6 &&
684 boot_cpu_data
.x86_model
>= 15)
690 static int cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
693 unsigned int cpu
= policy
->cpu
;
694 struct cpu_dbs_info_s
*this_dbs_info
;
698 this_dbs_info
= &per_cpu(od_cpu_dbs_info
, cpu
);
701 case CPUFREQ_GOV_START
:
702 if ((!cpu_online(cpu
)) || (!policy
->cur
))
705 mutex_lock(&dbs_mutex
);
707 rc
= sysfs_create_group(&policy
->kobj
, &dbs_attr_group_old
);
709 mutex_unlock(&dbs_mutex
);
714 for_each_cpu(j
, policy
->cpus
) {
715 struct cpu_dbs_info_s
*j_dbs_info
;
716 j_dbs_info
= &per_cpu(od_cpu_dbs_info
, j
);
717 j_dbs_info
->cur_policy
= policy
;
719 j_dbs_info
->prev_cpu_idle
= get_cpu_idle_time(j
,
720 &j_dbs_info
->prev_cpu_wall
);
721 if (dbs_tuners_ins
.ignore_nice
) {
722 j_dbs_info
->prev_cpu_nice
=
723 kstat_cpu(j
).cpustat
.nice
;
726 this_dbs_info
->cpu
= cpu
;
727 ondemand_powersave_bias_init_cpu(cpu
);
729 * Start the timerschedule work, when this governor
730 * is used for first time
732 if (dbs_enable
== 1) {
733 unsigned int latency
;
735 rc
= sysfs_create_group(cpufreq_global_kobject
,
738 mutex_unlock(&dbs_mutex
);
742 /* policy latency is in nS. Convert it to uS first */
743 latency
= policy
->cpuinfo
.transition_latency
/ 1000;
746 /* Bring kernel and HW constraints together */
747 min_sampling_rate
= max(min_sampling_rate
,
748 MIN_LATENCY_MULTIPLIER
* latency
);
749 dbs_tuners_ins
.sampling_rate
=
750 max(min_sampling_rate
,
751 latency
* LATENCY_MULTIPLIER
);
752 dbs_tuners_ins
.io_is_busy
= should_io_be_busy();
754 mutex_unlock(&dbs_mutex
);
756 mutex_init(&this_dbs_info
->timer_mutex
);
757 dbs_timer_init(this_dbs_info
);
760 case CPUFREQ_GOV_STOP
:
761 dbs_timer_exit(this_dbs_info
);
763 mutex_lock(&dbs_mutex
);
764 sysfs_remove_group(&policy
->kobj
, &dbs_attr_group_old
);
765 mutex_destroy(&this_dbs_info
->timer_mutex
);
767 mutex_unlock(&dbs_mutex
);
769 sysfs_remove_group(cpufreq_global_kobject
,
774 case CPUFREQ_GOV_LIMITS
:
775 mutex_lock(&this_dbs_info
->timer_mutex
);
776 if (policy
->max
< this_dbs_info
->cur_policy
->cur
)
777 __cpufreq_driver_target(this_dbs_info
->cur_policy
,
778 policy
->max
, CPUFREQ_RELATION_H
);
779 else if (policy
->min
> this_dbs_info
->cur_policy
->cur
)
780 __cpufreq_driver_target(this_dbs_info
->cur_policy
,
781 policy
->min
, CPUFREQ_RELATION_L
);
782 mutex_unlock(&this_dbs_info
->timer_mutex
);
788 static int __init
cpufreq_gov_dbs_init(void)
795 idle_time
= get_cpu_idle_time_us(cpu
, &wall
);
797 if (idle_time
!= -1ULL) {
798 /* Idle micro accounting is supported. Use finer thresholds */
799 dbs_tuners_ins
.up_threshold
= MICRO_FREQUENCY_UP_THRESHOLD
;
800 dbs_tuners_ins
.down_differential
=
801 MICRO_FREQUENCY_DOWN_DIFFERENTIAL
;
803 * In no_hz/micro accounting case we set the minimum frequency
804 * not depending on HZ, but fixed (very low). The deferred
805 * timer might skip some samples if idle/sleeping as needed.
807 min_sampling_rate
= MICRO_FREQUENCY_MIN_SAMPLE_RATE
;
809 /* For correct statistics, we need 10 ticks for each measure */
811 MIN_SAMPLING_RATE_RATIO
* jiffies_to_usecs(10);
814 kondemand_wq
= create_workqueue("kondemand");
816 printk(KERN_ERR
"Creation of kondemand failed\n");
819 err
= cpufreq_register_governor(&cpufreq_gov_ondemand
);
821 destroy_workqueue(kondemand_wq
);
826 static void __exit
cpufreq_gov_dbs_exit(void)
828 cpufreq_unregister_governor(&cpufreq_gov_ondemand
);
829 destroy_workqueue(kondemand_wq
);
833 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
834 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
835 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
836 "Low Latency Frequency Transition capable processors");
837 MODULE_LICENSE("GPL");
839 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
840 fs_initcall(cpufreq_gov_dbs_init
);
842 module_init(cpufreq_gov_dbs_init
);
844 module_exit(cpufreq_gov_dbs_exit
);