2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list
);
36 static inline bool policy_is_inactive(struct cpufreq_policy
*policy
)
38 return cpumask_empty(policy
->cpus
);
41 /* Macros to iterate over CPU policies */
42 #define for_each_suitable_policy(__policy, __active) \
43 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
44 if ((__active) == !policy_is_inactive(__policy))
46 #define for_each_active_policy(__policy) \
47 for_each_suitable_policy(__policy, true)
48 #define for_each_inactive_policy(__policy) \
49 for_each_suitable_policy(__policy, false)
51 #define for_each_policy(__policy) \
52 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
54 /* Iterate over governors */
55 static LIST_HEAD(cpufreq_governor_list
);
56 #define for_each_governor(__governor) \
57 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
60 * The "cpufreq driver" - the arch- or hardware-dependent low
61 * level driver of CPUFreq support, and its spinlock. This lock
62 * also protects the cpufreq_cpu_data array.
64 static struct cpufreq_driver
*cpufreq_driver
;
65 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data
);
66 static DEFINE_RWLOCK(cpufreq_driver_lock
);
67 DEFINE_MUTEX(cpufreq_governor_lock
);
69 /* Flag to suspend/resume CPUFreq governors */
70 static bool cpufreq_suspended
;
72 static inline bool has_target(void)
74 return cpufreq_driver
->target_index
|| cpufreq_driver
->target
;
77 /* internal prototypes */
78 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
80 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
);
81 static void handle_update(struct work_struct
*work
);
84 * Two notifier lists: the "policy" list is involved in the
85 * validation process for a new CPU frequency policy; the
86 * "transition" list for kernel code that needs to handle
87 * changes to devices when the CPU clock speed changes.
88 * The mutex locks both lists.
90 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list
);
91 static struct srcu_notifier_head cpufreq_transition_notifier_list
;
93 static bool init_cpufreq_transition_notifier_list_called
;
94 static int __init
init_cpufreq_transition_notifier_list(void)
96 srcu_init_notifier_head(&cpufreq_transition_notifier_list
);
97 init_cpufreq_transition_notifier_list_called
= true;
100 pure_initcall(init_cpufreq_transition_notifier_list
);
102 static int off __read_mostly
;
103 static int cpufreq_disabled(void)
107 void disable_cpufreq(void)
111 static DEFINE_MUTEX(cpufreq_governor_mutex
);
113 bool have_governor_per_policy(void)
115 return !!(cpufreq_driver
->flags
& CPUFREQ_HAVE_GOVERNOR_PER_POLICY
);
117 EXPORT_SYMBOL_GPL(have_governor_per_policy
);
119 struct kobject
*get_governor_parent_kobj(struct cpufreq_policy
*policy
)
121 if (have_governor_per_policy())
122 return &policy
->kobj
;
124 return cpufreq_global_kobject
;
126 EXPORT_SYMBOL_GPL(get_governor_parent_kobj
);
128 struct cpufreq_frequency_table
*cpufreq_frequency_get_table(unsigned int cpu
)
130 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
132 return policy
&& !policy_is_inactive(policy
) ?
133 policy
->freq_table
: NULL
;
135 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table
);
137 static inline u64
get_cpu_idle_time_jiffy(unsigned int cpu
, u64
*wall
)
143 cur_wall_time
= jiffies64_to_cputime64(get_jiffies_64());
145 busy_time
= kcpustat_cpu(cpu
).cpustat
[CPUTIME_USER
];
146 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SYSTEM
];
147 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_IRQ
];
148 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SOFTIRQ
];
149 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_STEAL
];
150 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_NICE
];
152 idle_time
= cur_wall_time
- busy_time
;
154 *wall
= cputime_to_usecs(cur_wall_time
);
156 return cputime_to_usecs(idle_time
);
159 u64
get_cpu_idle_time(unsigned int cpu
, u64
*wall
, int io_busy
)
161 u64 idle_time
= get_cpu_idle_time_us(cpu
, io_busy
? wall
: NULL
);
163 if (idle_time
== -1ULL)
164 return get_cpu_idle_time_jiffy(cpu
, wall
);
166 idle_time
+= get_cpu_iowait_time_us(cpu
, wall
);
170 EXPORT_SYMBOL_GPL(get_cpu_idle_time
);
173 * This is a generic cpufreq init() routine which can be used by cpufreq
174 * drivers of SMP systems. It will do following:
175 * - validate & show freq table passed
176 * - set policies transition latency
177 * - policy->cpus with all possible CPUs
179 int cpufreq_generic_init(struct cpufreq_policy
*policy
,
180 struct cpufreq_frequency_table
*table
,
181 unsigned int transition_latency
)
185 ret
= cpufreq_table_validate_and_show(policy
, table
);
187 pr_err("%s: invalid frequency table: %d\n", __func__
, ret
);
191 policy
->cpuinfo
.transition_latency
= transition_latency
;
194 * The driver only supports the SMP configuration where all processors
195 * share the clock and voltage and clock.
197 cpumask_setall(policy
->cpus
);
201 EXPORT_SYMBOL_GPL(cpufreq_generic_init
);
203 struct cpufreq_policy
*cpufreq_cpu_get_raw(unsigned int cpu
)
205 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
207 return policy
&& cpumask_test_cpu(cpu
, policy
->cpus
) ? policy
: NULL
;
209 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw
);
211 unsigned int cpufreq_generic_get(unsigned int cpu
)
213 struct cpufreq_policy
*policy
= cpufreq_cpu_get_raw(cpu
);
215 if (!policy
|| IS_ERR(policy
->clk
)) {
216 pr_err("%s: No %s associated to cpu: %d\n",
217 __func__
, policy
? "clk" : "policy", cpu
);
221 return clk_get_rate(policy
->clk
) / 1000;
223 EXPORT_SYMBOL_GPL(cpufreq_generic_get
);
226 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
228 * @cpu: cpu to find policy for.
230 * This returns policy for 'cpu', returns NULL if it doesn't exist.
231 * It also increments the kobject reference count to mark it busy and so would
232 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
233 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
234 * freed as that depends on the kobj count.
236 * Return: A valid policy on success, otherwise NULL on failure.
238 struct cpufreq_policy
*cpufreq_cpu_get(unsigned int cpu
)
240 struct cpufreq_policy
*policy
= NULL
;
243 if (WARN_ON(cpu
>= nr_cpu_ids
))
246 /* get the cpufreq driver */
247 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
249 if (cpufreq_driver
) {
251 policy
= cpufreq_cpu_get_raw(cpu
);
253 kobject_get(&policy
->kobj
);
256 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
260 EXPORT_SYMBOL_GPL(cpufreq_cpu_get
);
263 * cpufreq_cpu_put: Decrements the usage count of a policy
265 * @policy: policy earlier returned by cpufreq_cpu_get().
267 * This decrements the kobject reference count incremented earlier by calling
270 void cpufreq_cpu_put(struct cpufreq_policy
*policy
)
272 kobject_put(&policy
->kobj
);
274 EXPORT_SYMBOL_GPL(cpufreq_cpu_put
);
276 /*********************************************************************
277 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
278 *********************************************************************/
281 * adjust_jiffies - adjust the system "loops_per_jiffy"
283 * This function alters the system "loops_per_jiffy" for the clock
284 * speed change. Note that loops_per_jiffy cannot be updated on SMP
285 * systems as each CPU might be scaled differently. So, use the arch
286 * per-CPU loops_per_jiffy value wherever possible.
288 static void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
291 static unsigned long l_p_j_ref
;
292 static unsigned int l_p_j_ref_freq
;
294 if (ci
->flags
& CPUFREQ_CONST_LOOPS
)
297 if (!l_p_j_ref_freq
) {
298 l_p_j_ref
= loops_per_jiffy
;
299 l_p_j_ref_freq
= ci
->old
;
300 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
301 l_p_j_ref
, l_p_j_ref_freq
);
303 if (val
== CPUFREQ_POSTCHANGE
&& ci
->old
!= ci
->new) {
304 loops_per_jiffy
= cpufreq_scale(l_p_j_ref
, l_p_j_ref_freq
,
306 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
307 loops_per_jiffy
, ci
->new);
312 static void __cpufreq_notify_transition(struct cpufreq_policy
*policy
,
313 struct cpufreq_freqs
*freqs
, unsigned int state
)
315 BUG_ON(irqs_disabled());
317 if (cpufreq_disabled())
320 freqs
->flags
= cpufreq_driver
->flags
;
321 pr_debug("notification %u of frequency transition to %u kHz\n",
326 case CPUFREQ_PRECHANGE
:
327 /* detect if the driver reported a value as "old frequency"
328 * which is not equal to what the cpufreq core thinks is
331 if (!(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
332 if ((policy
) && (policy
->cpu
== freqs
->cpu
) &&
333 (policy
->cur
) && (policy
->cur
!= freqs
->old
)) {
334 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
335 freqs
->old
, policy
->cur
);
336 freqs
->old
= policy
->cur
;
339 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
340 CPUFREQ_PRECHANGE
, freqs
);
341 adjust_jiffies(CPUFREQ_PRECHANGE
, freqs
);
344 case CPUFREQ_POSTCHANGE
:
345 adjust_jiffies(CPUFREQ_POSTCHANGE
, freqs
);
346 pr_debug("FREQ: %lu - CPU: %lu\n",
347 (unsigned long)freqs
->new, (unsigned long)freqs
->cpu
);
348 trace_cpu_frequency(freqs
->new, freqs
->cpu
);
349 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
350 CPUFREQ_POSTCHANGE
, freqs
);
351 if (likely(policy
) && likely(policy
->cpu
== freqs
->cpu
))
352 policy
->cur
= freqs
->new;
358 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
359 * on frequency transition.
361 * This function calls the transition notifiers and the "adjust_jiffies"
362 * function. It is called twice on all CPU frequency changes that have
365 static void cpufreq_notify_transition(struct cpufreq_policy
*policy
,
366 struct cpufreq_freqs
*freqs
, unsigned int state
)
368 for_each_cpu(freqs
->cpu
, policy
->cpus
)
369 __cpufreq_notify_transition(policy
, freqs
, state
);
372 /* Do post notifications when there are chances that transition has failed */
373 static void cpufreq_notify_post_transition(struct cpufreq_policy
*policy
,
374 struct cpufreq_freqs
*freqs
, int transition_failed
)
376 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
377 if (!transition_failed
)
380 swap(freqs
->old
, freqs
->new);
381 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
382 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
385 void cpufreq_freq_transition_begin(struct cpufreq_policy
*policy
,
386 struct cpufreq_freqs
*freqs
)
390 * Catch double invocations of _begin() which lead to self-deadlock.
391 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
392 * doesn't invoke _begin() on their behalf, and hence the chances of
393 * double invocations are very low. Moreover, there are scenarios
394 * where these checks can emit false-positive warnings in these
395 * drivers; so we avoid that by skipping them altogether.
397 WARN_ON(!(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
)
398 && current
== policy
->transition_task
);
401 wait_event(policy
->transition_wait
, !policy
->transition_ongoing
);
403 spin_lock(&policy
->transition_lock
);
405 if (unlikely(policy
->transition_ongoing
)) {
406 spin_unlock(&policy
->transition_lock
);
410 policy
->transition_ongoing
= true;
411 policy
->transition_task
= current
;
413 spin_unlock(&policy
->transition_lock
);
415 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
417 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin
);
419 void cpufreq_freq_transition_end(struct cpufreq_policy
*policy
,
420 struct cpufreq_freqs
*freqs
, int transition_failed
)
422 if (unlikely(WARN_ON(!policy
->transition_ongoing
)))
425 cpufreq_notify_post_transition(policy
, freqs
, transition_failed
);
427 policy
->transition_ongoing
= false;
428 policy
->transition_task
= NULL
;
430 wake_up(&policy
->transition_wait
);
432 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end
);
435 /*********************************************************************
437 *********************************************************************/
438 static ssize_t
show_boost(struct kobject
*kobj
,
439 struct attribute
*attr
, char *buf
)
441 return sprintf(buf
, "%d\n", cpufreq_driver
->boost_enabled
);
444 static ssize_t
store_boost(struct kobject
*kobj
, struct attribute
*attr
,
445 const char *buf
, size_t count
)
449 ret
= sscanf(buf
, "%d", &enable
);
450 if (ret
!= 1 || enable
< 0 || enable
> 1)
453 if (cpufreq_boost_trigger_state(enable
)) {
454 pr_err("%s: Cannot %s BOOST!\n",
455 __func__
, enable
? "enable" : "disable");
459 pr_debug("%s: cpufreq BOOST %s\n",
460 __func__
, enable
? "enabled" : "disabled");
464 define_one_global_rw(boost
);
466 static struct cpufreq_governor
*find_governor(const char *str_governor
)
468 struct cpufreq_governor
*t
;
471 if (!strncasecmp(str_governor
, t
->name
, CPUFREQ_NAME_LEN
))
478 * cpufreq_parse_governor - parse a governor string
480 static int cpufreq_parse_governor(char *str_governor
, unsigned int *policy
,
481 struct cpufreq_governor
**governor
)
485 if (cpufreq_driver
->setpolicy
) {
486 if (!strncasecmp(str_governor
, "performance", CPUFREQ_NAME_LEN
)) {
487 *policy
= CPUFREQ_POLICY_PERFORMANCE
;
489 } else if (!strncasecmp(str_governor
, "powersave",
491 *policy
= CPUFREQ_POLICY_POWERSAVE
;
495 struct cpufreq_governor
*t
;
497 mutex_lock(&cpufreq_governor_mutex
);
499 t
= find_governor(str_governor
);
504 mutex_unlock(&cpufreq_governor_mutex
);
505 ret
= request_module("cpufreq_%s", str_governor
);
506 mutex_lock(&cpufreq_governor_mutex
);
509 t
= find_governor(str_governor
);
517 mutex_unlock(&cpufreq_governor_mutex
);
523 * cpufreq_per_cpu_attr_read() / show_##file_name() -
524 * print out cpufreq information
526 * Write out information from cpufreq_driver->policy[cpu]; object must be
530 #define show_one(file_name, object) \
531 static ssize_t show_##file_name \
532 (struct cpufreq_policy *policy, char *buf) \
534 return sprintf(buf, "%u\n", policy->object); \
537 show_one(cpuinfo_min_freq
, cpuinfo
.min_freq
);
538 show_one(cpuinfo_max_freq
, cpuinfo
.max_freq
);
539 show_one(cpuinfo_transition_latency
, cpuinfo
.transition_latency
);
540 show_one(scaling_min_freq
, min
);
541 show_one(scaling_max_freq
, max
);
543 static ssize_t
show_scaling_cur_freq(struct cpufreq_policy
*policy
, char *buf
)
547 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
548 ret
= sprintf(buf
, "%u\n", cpufreq_driver
->get(policy
->cpu
));
550 ret
= sprintf(buf
, "%u\n", policy
->cur
);
554 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
555 struct cpufreq_policy
*new_policy
);
558 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
560 #define store_one(file_name, object) \
561 static ssize_t store_##file_name \
562 (struct cpufreq_policy *policy, const char *buf, size_t count) \
565 struct cpufreq_policy new_policy; \
567 memcpy(&new_policy, policy, sizeof(*policy)); \
569 ret = sscanf(buf, "%u", &new_policy.object); \
573 temp = new_policy.object; \
574 ret = cpufreq_set_policy(policy, &new_policy); \
576 policy->user_policy.object = temp; \
578 return ret ? ret : count; \
581 store_one(scaling_min_freq
, min
);
582 store_one(scaling_max_freq
, max
);
585 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
587 static ssize_t
show_cpuinfo_cur_freq(struct cpufreq_policy
*policy
,
590 unsigned int cur_freq
= __cpufreq_get(policy
);
592 return sprintf(buf
, "<unknown>");
593 return sprintf(buf
, "%u\n", cur_freq
);
597 * show_scaling_governor - show the current policy for the specified CPU
599 static ssize_t
show_scaling_governor(struct cpufreq_policy
*policy
, char *buf
)
601 if (policy
->policy
== CPUFREQ_POLICY_POWERSAVE
)
602 return sprintf(buf
, "powersave\n");
603 else if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
604 return sprintf(buf
, "performance\n");
605 else if (policy
->governor
)
606 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n",
607 policy
->governor
->name
);
612 * store_scaling_governor - store policy for the specified CPU
614 static ssize_t
store_scaling_governor(struct cpufreq_policy
*policy
,
615 const char *buf
, size_t count
)
618 char str_governor
[16];
619 struct cpufreq_policy new_policy
;
621 memcpy(&new_policy
, policy
, sizeof(*policy
));
623 ret
= sscanf(buf
, "%15s", str_governor
);
627 if (cpufreq_parse_governor(str_governor
, &new_policy
.policy
,
628 &new_policy
.governor
))
631 ret
= cpufreq_set_policy(policy
, &new_policy
);
632 return ret
? ret
: count
;
636 * show_scaling_driver - show the cpufreq driver currently loaded
638 static ssize_t
show_scaling_driver(struct cpufreq_policy
*policy
, char *buf
)
640 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n", cpufreq_driver
->name
);
644 * show_scaling_available_governors - show the available CPUfreq governors
646 static ssize_t
show_scaling_available_governors(struct cpufreq_policy
*policy
,
650 struct cpufreq_governor
*t
;
653 i
+= sprintf(buf
, "performance powersave");
657 for_each_governor(t
) {
658 if (i
>= (ssize_t
) ((PAGE_SIZE
/ sizeof(char))
659 - (CPUFREQ_NAME_LEN
+ 2)))
661 i
+= scnprintf(&buf
[i
], CPUFREQ_NAME_PLEN
, "%s ", t
->name
);
664 i
+= sprintf(&buf
[i
], "\n");
668 ssize_t
cpufreq_show_cpus(const struct cpumask
*mask
, char *buf
)
673 for_each_cpu(cpu
, mask
) {
675 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), " ");
676 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), "%u", cpu
);
677 if (i
>= (PAGE_SIZE
- 5))
680 i
+= sprintf(&buf
[i
], "\n");
683 EXPORT_SYMBOL_GPL(cpufreq_show_cpus
);
686 * show_related_cpus - show the CPUs affected by each transition even if
687 * hw coordination is in use
689 static ssize_t
show_related_cpus(struct cpufreq_policy
*policy
, char *buf
)
691 return cpufreq_show_cpus(policy
->related_cpus
, buf
);
695 * show_affected_cpus - show the CPUs affected by each transition
697 static ssize_t
show_affected_cpus(struct cpufreq_policy
*policy
, char *buf
)
699 return cpufreq_show_cpus(policy
->cpus
, buf
);
702 static ssize_t
store_scaling_setspeed(struct cpufreq_policy
*policy
,
703 const char *buf
, size_t count
)
705 unsigned int freq
= 0;
708 if (!policy
->governor
|| !policy
->governor
->store_setspeed
)
711 ret
= sscanf(buf
, "%u", &freq
);
715 policy
->governor
->store_setspeed(policy
, freq
);
720 static ssize_t
show_scaling_setspeed(struct cpufreq_policy
*policy
, char *buf
)
722 if (!policy
->governor
|| !policy
->governor
->show_setspeed
)
723 return sprintf(buf
, "<unsupported>\n");
725 return policy
->governor
->show_setspeed(policy
, buf
);
729 * show_bios_limit - show the current cpufreq HW/BIOS limitation
731 static ssize_t
show_bios_limit(struct cpufreq_policy
*policy
, char *buf
)
735 if (cpufreq_driver
->bios_limit
) {
736 ret
= cpufreq_driver
->bios_limit(policy
->cpu
, &limit
);
738 return sprintf(buf
, "%u\n", limit
);
740 return sprintf(buf
, "%u\n", policy
->cpuinfo
.max_freq
);
743 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq
, 0400);
744 cpufreq_freq_attr_ro(cpuinfo_min_freq
);
745 cpufreq_freq_attr_ro(cpuinfo_max_freq
);
746 cpufreq_freq_attr_ro(cpuinfo_transition_latency
);
747 cpufreq_freq_attr_ro(scaling_available_governors
);
748 cpufreq_freq_attr_ro(scaling_driver
);
749 cpufreq_freq_attr_ro(scaling_cur_freq
);
750 cpufreq_freq_attr_ro(bios_limit
);
751 cpufreq_freq_attr_ro(related_cpus
);
752 cpufreq_freq_attr_ro(affected_cpus
);
753 cpufreq_freq_attr_rw(scaling_min_freq
);
754 cpufreq_freq_attr_rw(scaling_max_freq
);
755 cpufreq_freq_attr_rw(scaling_governor
);
756 cpufreq_freq_attr_rw(scaling_setspeed
);
758 static struct attribute
*default_attrs
[] = {
759 &cpuinfo_min_freq
.attr
,
760 &cpuinfo_max_freq
.attr
,
761 &cpuinfo_transition_latency
.attr
,
762 &scaling_min_freq
.attr
,
763 &scaling_max_freq
.attr
,
766 &scaling_governor
.attr
,
767 &scaling_driver
.attr
,
768 &scaling_available_governors
.attr
,
769 &scaling_setspeed
.attr
,
773 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
774 #define to_attr(a) container_of(a, struct freq_attr, attr)
776 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
778 struct cpufreq_policy
*policy
= to_policy(kobj
);
779 struct freq_attr
*fattr
= to_attr(attr
);
782 down_read(&policy
->rwsem
);
783 ret
= fattr
->show(policy
, buf
);
784 up_read(&policy
->rwsem
);
789 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
790 const char *buf
, size_t count
)
792 struct cpufreq_policy
*policy
= to_policy(kobj
);
793 struct freq_attr
*fattr
= to_attr(attr
);
794 ssize_t ret
= -EINVAL
;
798 if (cpu_online(policy
->cpu
)) {
799 down_write(&policy
->rwsem
);
800 ret
= fattr
->store(policy
, buf
, count
);
801 up_write(&policy
->rwsem
);
809 static void cpufreq_sysfs_release(struct kobject
*kobj
)
811 struct cpufreq_policy
*policy
= to_policy(kobj
);
812 pr_debug("last reference is dropped\n");
813 complete(&policy
->kobj_unregister
);
816 static const struct sysfs_ops sysfs_ops
= {
821 static struct kobj_type ktype_cpufreq
= {
822 .sysfs_ops
= &sysfs_ops
,
823 .default_attrs
= default_attrs
,
824 .release
= cpufreq_sysfs_release
,
827 static int add_cpu_dev_symlink(struct cpufreq_policy
*policy
, int cpu
)
829 struct device
*cpu_dev
;
831 pr_debug("%s: Adding symlink for CPU: %u\n", __func__
, cpu
);
836 cpu_dev
= get_cpu_device(cpu
);
837 if (WARN_ON(!cpu_dev
))
840 return sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
, "cpufreq");
843 static void remove_cpu_dev_symlink(struct cpufreq_policy
*policy
, int cpu
)
845 struct device
*cpu_dev
;
847 pr_debug("%s: Removing symlink for CPU: %u\n", __func__
, cpu
);
849 cpu_dev
= get_cpu_device(cpu
);
850 if (WARN_ON(!cpu_dev
))
853 sysfs_remove_link(&cpu_dev
->kobj
, "cpufreq");
856 /* Add/remove symlinks for all related CPUs */
857 static int cpufreq_add_dev_symlink(struct cpufreq_policy
*policy
)
862 /* Some related CPUs might not be present (physically hotplugged) */
863 for_each_cpu(j
, policy
->real_cpus
) {
864 ret
= add_cpu_dev_symlink(policy
, j
);
872 static void cpufreq_remove_dev_symlink(struct cpufreq_policy
*policy
)
876 /* Some related CPUs might not be present (physically hotplugged) */
877 for_each_cpu(j
, policy
->real_cpus
)
878 remove_cpu_dev_symlink(policy
, j
);
881 static int cpufreq_add_dev_interface(struct cpufreq_policy
*policy
)
883 struct freq_attr
**drv_attr
;
886 /* set up files for this cpu device */
887 drv_attr
= cpufreq_driver
->attr
;
888 while (drv_attr
&& *drv_attr
) {
889 ret
= sysfs_create_file(&policy
->kobj
, &((*drv_attr
)->attr
));
894 if (cpufreq_driver
->get
) {
895 ret
= sysfs_create_file(&policy
->kobj
, &cpuinfo_cur_freq
.attr
);
900 ret
= sysfs_create_file(&policy
->kobj
, &scaling_cur_freq
.attr
);
904 if (cpufreq_driver
->bios_limit
) {
905 ret
= sysfs_create_file(&policy
->kobj
, &bios_limit
.attr
);
910 return cpufreq_add_dev_symlink(policy
);
913 __weak
struct cpufreq_governor
*cpufreq_default_governor(void)
918 static int cpufreq_init_policy(struct cpufreq_policy
*policy
)
920 struct cpufreq_governor
*gov
= NULL
;
921 struct cpufreq_policy new_policy
;
923 memcpy(&new_policy
, policy
, sizeof(*policy
));
925 /* Update governor of new_policy to the governor used before hotplug */
926 gov
= find_governor(policy
->last_governor
);
928 pr_debug("Restoring governor %s for cpu %d\n",
929 policy
->governor
->name
, policy
->cpu
);
931 gov
= cpufreq_default_governor();
936 new_policy
.governor
= gov
;
938 /* Use the default policy if there is no last_policy. */
939 if (cpufreq_driver
->setpolicy
) {
940 if (policy
->last_policy
)
941 new_policy
.policy
= policy
->last_policy
;
943 cpufreq_parse_governor(gov
->name
, &new_policy
.policy
,
946 /* set default policy */
947 return cpufreq_set_policy(policy
, &new_policy
);
950 static int cpufreq_add_policy_cpu(struct cpufreq_policy
*policy
, unsigned int cpu
)
954 /* Has this CPU been taken care of already? */
955 if (cpumask_test_cpu(cpu
, policy
->cpus
))
959 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
961 pr_err("%s: Failed to stop governor\n", __func__
);
966 down_write(&policy
->rwsem
);
967 cpumask_set_cpu(cpu
, policy
->cpus
);
968 up_write(&policy
->rwsem
);
971 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
973 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
976 pr_err("%s: Failed to start governor\n", __func__
);
984 static struct cpufreq_policy
*cpufreq_policy_alloc(unsigned int cpu
)
986 struct device
*dev
= get_cpu_device(cpu
);
987 struct cpufreq_policy
*policy
;
992 policy
= kzalloc(sizeof(*policy
), GFP_KERNEL
);
996 if (!alloc_cpumask_var(&policy
->cpus
, GFP_KERNEL
))
997 goto err_free_policy
;
999 if (!zalloc_cpumask_var(&policy
->related_cpus
, GFP_KERNEL
))
1000 goto err_free_cpumask
;
1002 if (!zalloc_cpumask_var(&policy
->real_cpus
, GFP_KERNEL
))
1003 goto err_free_rcpumask
;
1005 kobject_init(&policy
->kobj
, &ktype_cpufreq
);
1006 INIT_LIST_HEAD(&policy
->policy_list
);
1007 init_rwsem(&policy
->rwsem
);
1008 spin_lock_init(&policy
->transition_lock
);
1009 init_waitqueue_head(&policy
->transition_wait
);
1010 init_completion(&policy
->kobj_unregister
);
1011 INIT_WORK(&policy
->update
, handle_update
);
1017 free_cpumask_var(policy
->related_cpus
);
1019 free_cpumask_var(policy
->cpus
);
1026 static void cpufreq_policy_put_kobj(struct cpufreq_policy
*policy
, bool notify
)
1028 struct kobject
*kobj
;
1029 struct completion
*cmp
;
1032 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1033 CPUFREQ_REMOVE_POLICY
, policy
);
1035 down_write(&policy
->rwsem
);
1036 cpufreq_remove_dev_symlink(policy
);
1037 kobj
= &policy
->kobj
;
1038 cmp
= &policy
->kobj_unregister
;
1039 up_write(&policy
->rwsem
);
1043 * We need to make sure that the underlying kobj is
1044 * actually not referenced anymore by anybody before we
1045 * proceed with unloading.
1047 pr_debug("waiting for dropping of refcount\n");
1048 wait_for_completion(cmp
);
1049 pr_debug("wait complete\n");
1052 static void cpufreq_policy_free(struct cpufreq_policy
*policy
, bool notify
)
1054 unsigned long flags
;
1057 /* Remove policy from list */
1058 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1059 list_del(&policy
->policy_list
);
1061 for_each_cpu(cpu
, policy
->related_cpus
)
1062 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1063 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1065 cpufreq_policy_put_kobj(policy
, notify
);
1066 free_cpumask_var(policy
->real_cpus
);
1067 free_cpumask_var(policy
->related_cpus
);
1068 free_cpumask_var(policy
->cpus
);
1072 static int cpufreq_online(unsigned int cpu
)
1074 struct cpufreq_policy
*policy
;
1076 unsigned long flags
;
1080 pr_debug("%s: bringing CPU%u online\n", __func__
, cpu
);
1082 /* Check if this CPU already has a policy to manage it */
1083 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1085 WARN_ON(!cpumask_test_cpu(cpu
, policy
->related_cpus
));
1086 if (!policy_is_inactive(policy
))
1087 return cpufreq_add_policy_cpu(policy
, cpu
);
1089 /* This is the only online CPU for the policy. Start over. */
1091 down_write(&policy
->rwsem
);
1093 policy
->governor
= NULL
;
1094 up_write(&policy
->rwsem
);
1097 policy
= cpufreq_policy_alloc(cpu
);
1102 cpumask_copy(policy
->cpus
, cpumask_of(cpu
));
1104 /* call driver. From then on the cpufreq must be able
1105 * to accept all calls to ->verify and ->setpolicy for this CPU
1107 ret
= cpufreq_driver
->init(policy
);
1109 pr_debug("initialization failed\n");
1110 goto out_free_policy
;
1113 down_write(&policy
->rwsem
);
1116 /* related_cpus should at least include policy->cpus. */
1117 cpumask_copy(policy
->related_cpus
, policy
->cpus
);
1118 /* Remember CPUs present at the policy creation time. */
1119 cpumask_and(policy
->real_cpus
, policy
->cpus
, cpu_present_mask
);
1121 /* Name and add the kobject */
1122 ret
= kobject_add(&policy
->kobj
, cpufreq_global_kobject
,
1124 cpumask_first(policy
->related_cpus
));
1126 pr_err("%s: failed to add policy->kobj: %d\n", __func__
,
1128 goto out_exit_policy
;
1133 * affected cpus must always be the one, which are online. We aren't
1134 * managing offline cpus here.
1136 cpumask_and(policy
->cpus
, policy
->cpus
, cpu_online_mask
);
1139 policy
->user_policy
.min
= policy
->min
;
1140 policy
->user_policy
.max
= policy
->max
;
1142 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1143 for_each_cpu(j
, policy
->related_cpus
)
1144 per_cpu(cpufreq_cpu_data
, j
) = policy
;
1145 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1148 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
1149 policy
->cur
= cpufreq_driver
->get(policy
->cpu
);
1151 pr_err("%s: ->get() failed\n", __func__
);
1152 goto out_exit_policy
;
1157 * Sometimes boot loaders set CPU frequency to a value outside of
1158 * frequency table present with cpufreq core. In such cases CPU might be
1159 * unstable if it has to run on that frequency for long duration of time
1160 * and so its better to set it to a frequency which is specified in
1161 * freq-table. This also makes cpufreq stats inconsistent as
1162 * cpufreq-stats would fail to register because current frequency of CPU
1163 * isn't found in freq-table.
1165 * Because we don't want this change to effect boot process badly, we go
1166 * for the next freq which is >= policy->cur ('cur' must be set by now,
1167 * otherwise we will end up setting freq to lowest of the table as 'cur'
1168 * is initialized to zero).
1170 * We are passing target-freq as "policy->cur - 1" otherwise
1171 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1172 * equal to target-freq.
1174 if ((cpufreq_driver
->flags
& CPUFREQ_NEED_INITIAL_FREQ_CHECK
)
1176 /* Are we running at unknown frequency ? */
1177 ret
= cpufreq_frequency_table_get_index(policy
, policy
->cur
);
1178 if (ret
== -EINVAL
) {
1179 /* Warn user and fix it */
1180 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1181 __func__
, policy
->cpu
, policy
->cur
);
1182 ret
= __cpufreq_driver_target(policy
, policy
->cur
- 1,
1183 CPUFREQ_RELATION_L
);
1186 * Reaching here after boot in a few seconds may not
1187 * mean that system will remain stable at "unknown"
1188 * frequency for longer duration. Hence, a BUG_ON().
1191 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1192 __func__
, policy
->cpu
, policy
->cur
);
1196 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1197 CPUFREQ_START
, policy
);
1200 ret
= cpufreq_add_dev_interface(policy
);
1202 goto out_exit_policy
;
1203 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1204 CPUFREQ_CREATE_POLICY
, policy
);
1206 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1207 list_add(&policy
->policy_list
, &cpufreq_policy_list
);
1208 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1211 ret
= cpufreq_init_policy(policy
);
1213 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1214 __func__
, cpu
, ret
);
1215 /* cpufreq_policy_free() will notify based on this */
1217 goto out_exit_policy
;
1220 up_write(&policy
->rwsem
);
1222 kobject_uevent(&policy
->kobj
, KOBJ_ADD
);
1224 /* Callback for handling stuff after policy is ready */
1225 if (cpufreq_driver
->ready
)
1226 cpufreq_driver
->ready(policy
);
1228 pr_debug("initialization complete\n");
1233 up_write(&policy
->rwsem
);
1235 if (cpufreq_driver
->exit
)
1236 cpufreq_driver
->exit(policy
);
1238 cpufreq_policy_free(policy
, !new_policy
);
1243 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1245 * @sif: Subsystem interface structure pointer (not used)
1247 static int cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
1249 unsigned cpu
= dev
->id
;
1252 dev_dbg(dev
, "%s: adding CPU%u\n", __func__
, cpu
);
1254 if (cpu_online(cpu
)) {
1255 ret
= cpufreq_online(cpu
);
1258 * A hotplug notifier will follow and we will handle it as CPU
1259 * online then. For now, just create the sysfs link, unless
1260 * there is no policy or the link is already present.
1262 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1264 ret
= policy
&& !cpumask_test_and_set_cpu(cpu
, policy
->real_cpus
)
1265 ? add_cpu_dev_symlink(policy
, cpu
) : 0;
1271 static void cpufreq_offline_prepare(unsigned int cpu
)
1273 struct cpufreq_policy
*policy
;
1275 pr_debug("%s: unregistering CPU %u\n", __func__
, cpu
);
1277 policy
= cpufreq_cpu_get_raw(cpu
);
1279 pr_debug("%s: No cpu_data found\n", __func__
);
1284 int ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1286 pr_err("%s: Failed to stop governor\n", __func__
);
1289 down_write(&policy
->rwsem
);
1290 cpumask_clear_cpu(cpu
, policy
->cpus
);
1292 if (policy_is_inactive(policy
)) {
1294 strncpy(policy
->last_governor
, policy
->governor
->name
,
1297 policy
->last_policy
= policy
->policy
;
1298 } else if (cpu
== policy
->cpu
) {
1299 /* Nominate new CPU */
1300 policy
->cpu
= cpumask_any(policy
->cpus
);
1302 up_write(&policy
->rwsem
);
1304 /* Start governor again for active policy */
1305 if (!policy_is_inactive(policy
)) {
1307 int ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
1309 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
1312 pr_err("%s: Failed to start governor\n", __func__
);
1314 } else if (cpufreq_driver
->stop_cpu
) {
1315 cpufreq_driver
->stop_cpu(policy
);
1319 static void cpufreq_offline_finish(unsigned int cpu
)
1321 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1324 pr_debug("%s: No cpu_data found\n", __func__
);
1328 /* Only proceed for inactive policies */
1329 if (!policy_is_inactive(policy
))
1332 /* If cpu is last user of policy, free policy */
1334 int ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
1336 pr_err("%s: Failed to exit governor\n", __func__
);
1340 * Perform the ->exit() even during light-weight tear-down,
1341 * since this is a core component, and is essential for the
1342 * subsequent light-weight ->init() to succeed.
1344 if (cpufreq_driver
->exit
) {
1345 cpufreq_driver
->exit(policy
);
1346 policy
->freq_table
= NULL
;
1351 * cpufreq_remove_dev - remove a CPU device
1353 * Removes the cpufreq interface for a CPU device.
1355 static void cpufreq_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
1357 unsigned int cpu
= dev
->id
;
1358 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1363 if (cpu_online(cpu
)) {
1364 cpufreq_offline_prepare(cpu
);
1365 cpufreq_offline_finish(cpu
);
1368 cpumask_clear_cpu(cpu
, policy
->real_cpus
);
1369 remove_cpu_dev_symlink(policy
, cpu
);
1371 if (cpumask_empty(policy
->real_cpus
))
1372 cpufreq_policy_free(policy
, true);
1375 static void handle_update(struct work_struct
*work
)
1377 struct cpufreq_policy
*policy
=
1378 container_of(work
, struct cpufreq_policy
, update
);
1379 unsigned int cpu
= policy
->cpu
;
1380 pr_debug("handle_update for cpu %u called\n", cpu
);
1381 cpufreq_update_policy(cpu
);
1385 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1387 * @policy: policy managing CPUs
1388 * @new_freq: CPU frequency the CPU actually runs at
1390 * We adjust to current frequency first, and need to clean up later.
1391 * So either call to cpufreq_update_policy() or schedule handle_update()).
1393 static void cpufreq_out_of_sync(struct cpufreq_policy
*policy
,
1394 unsigned int new_freq
)
1396 struct cpufreq_freqs freqs
;
1398 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1399 policy
->cur
, new_freq
);
1401 freqs
.old
= policy
->cur
;
1402 freqs
.new = new_freq
;
1404 cpufreq_freq_transition_begin(policy
, &freqs
);
1405 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1409 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1412 * This is the last known freq, without actually getting it from the driver.
1413 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1415 unsigned int cpufreq_quick_get(unsigned int cpu
)
1417 struct cpufreq_policy
*policy
;
1418 unsigned int ret_freq
= 0;
1420 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
1421 return cpufreq_driver
->get(cpu
);
1423 policy
= cpufreq_cpu_get(cpu
);
1425 ret_freq
= policy
->cur
;
1426 cpufreq_cpu_put(policy
);
1431 EXPORT_SYMBOL(cpufreq_quick_get
);
1434 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1437 * Just return the max possible frequency for a given CPU.
1439 unsigned int cpufreq_quick_get_max(unsigned int cpu
)
1441 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1442 unsigned int ret_freq
= 0;
1445 ret_freq
= policy
->max
;
1446 cpufreq_cpu_put(policy
);
1451 EXPORT_SYMBOL(cpufreq_quick_get_max
);
1453 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
)
1455 unsigned int ret_freq
= 0;
1457 if (!cpufreq_driver
->get
)
1460 ret_freq
= cpufreq_driver
->get(policy
->cpu
);
1462 /* Updating inactive policies is invalid, so avoid doing that. */
1463 if (unlikely(policy_is_inactive(policy
)))
1466 if (ret_freq
&& policy
->cur
&&
1467 !(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
1468 /* verify no discrepancy between actual and
1469 saved value exists */
1470 if (unlikely(ret_freq
!= policy
->cur
)) {
1471 cpufreq_out_of_sync(policy
, ret_freq
);
1472 schedule_work(&policy
->update
);
1480 * cpufreq_get - get the current CPU frequency (in kHz)
1483 * Get the CPU current (static) CPU frequency
1485 unsigned int cpufreq_get(unsigned int cpu
)
1487 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1488 unsigned int ret_freq
= 0;
1491 down_read(&policy
->rwsem
);
1492 ret_freq
= __cpufreq_get(policy
);
1493 up_read(&policy
->rwsem
);
1495 cpufreq_cpu_put(policy
);
1500 EXPORT_SYMBOL(cpufreq_get
);
1502 static struct subsys_interface cpufreq_interface
= {
1504 .subsys
= &cpu_subsys
,
1505 .add_dev
= cpufreq_add_dev
,
1506 .remove_dev
= cpufreq_remove_dev
,
1510 * In case platform wants some specific frequency to be configured
1513 int cpufreq_generic_suspend(struct cpufreq_policy
*policy
)
1517 if (!policy
->suspend_freq
) {
1518 pr_debug("%s: suspend_freq not defined\n", __func__
);
1522 pr_debug("%s: Setting suspend-freq: %u\n", __func__
,
1523 policy
->suspend_freq
);
1525 ret
= __cpufreq_driver_target(policy
, policy
->suspend_freq
,
1526 CPUFREQ_RELATION_H
);
1528 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1529 __func__
, policy
->suspend_freq
, ret
);
1533 EXPORT_SYMBOL(cpufreq_generic_suspend
);
1536 * cpufreq_suspend() - Suspend CPUFreq governors
1538 * Called during system wide Suspend/Hibernate cycles for suspending governors
1539 * as some platforms can't change frequency after this point in suspend cycle.
1540 * Because some of the devices (like: i2c, regulators, etc) they use for
1541 * changing frequency are suspended quickly after this point.
1543 void cpufreq_suspend(void)
1545 struct cpufreq_policy
*policy
;
1547 if (!cpufreq_driver
)
1553 pr_debug("%s: Suspending Governors\n", __func__
);
1555 for_each_active_policy(policy
) {
1556 if (__cpufreq_governor(policy
, CPUFREQ_GOV_STOP
))
1557 pr_err("%s: Failed to stop governor for policy: %p\n",
1559 else if (cpufreq_driver
->suspend
1560 && cpufreq_driver
->suspend(policy
))
1561 pr_err("%s: Failed to suspend driver: %p\n", __func__
,
1566 cpufreq_suspended
= true;
1570 * cpufreq_resume() - Resume CPUFreq governors
1572 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1573 * are suspended with cpufreq_suspend().
1575 void cpufreq_resume(void)
1577 struct cpufreq_policy
*policy
;
1579 if (!cpufreq_driver
)
1582 cpufreq_suspended
= false;
1587 pr_debug("%s: Resuming Governors\n", __func__
);
1589 for_each_active_policy(policy
) {
1590 if (cpufreq_driver
->resume
&& cpufreq_driver
->resume(policy
))
1591 pr_err("%s: Failed to resume driver: %p\n", __func__
,
1593 else if (__cpufreq_governor(policy
, CPUFREQ_GOV_START
)
1594 || __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
))
1595 pr_err("%s: Failed to start governor for policy: %p\n",
1600 * schedule call cpufreq_update_policy() for first-online CPU, as that
1601 * wouldn't be hotplugged-out on suspend. It will verify that the
1602 * current freq is in sync with what we believe it to be.
1604 policy
= cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask
));
1605 if (WARN_ON(!policy
))
1608 schedule_work(&policy
->update
);
1612 * cpufreq_get_current_driver - return current driver's name
1614 * Return the name string of the currently loaded cpufreq driver
1617 const char *cpufreq_get_current_driver(void)
1620 return cpufreq_driver
->name
;
1624 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver
);
1627 * cpufreq_get_driver_data - return current driver data
1629 * Return the private data of the currently loaded cpufreq
1630 * driver, or NULL if no cpufreq driver is loaded.
1632 void *cpufreq_get_driver_data(void)
1635 return cpufreq_driver
->driver_data
;
1639 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data
);
1641 /*********************************************************************
1642 * NOTIFIER LISTS INTERFACE *
1643 *********************************************************************/
1646 * cpufreq_register_notifier - register a driver with cpufreq
1647 * @nb: notifier function to register
1648 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1650 * Add a driver to one of two lists: either a list of drivers that
1651 * are notified about clock rate changes (once before and once after
1652 * the transition), or a list of drivers that are notified about
1653 * changes in cpufreq policy.
1655 * This function may sleep, and has the same return conditions as
1656 * blocking_notifier_chain_register.
1658 int cpufreq_register_notifier(struct notifier_block
*nb
, unsigned int list
)
1662 if (cpufreq_disabled())
1665 WARN_ON(!init_cpufreq_transition_notifier_list_called
);
1668 case CPUFREQ_TRANSITION_NOTIFIER
:
1669 ret
= srcu_notifier_chain_register(
1670 &cpufreq_transition_notifier_list
, nb
);
1672 case CPUFREQ_POLICY_NOTIFIER
:
1673 ret
= blocking_notifier_chain_register(
1674 &cpufreq_policy_notifier_list
, nb
);
1682 EXPORT_SYMBOL(cpufreq_register_notifier
);
1685 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1686 * @nb: notifier block to be unregistered
1687 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1689 * Remove a driver from the CPU frequency notifier list.
1691 * This function may sleep, and has the same return conditions as
1692 * blocking_notifier_chain_unregister.
1694 int cpufreq_unregister_notifier(struct notifier_block
*nb
, unsigned int list
)
1698 if (cpufreq_disabled())
1702 case CPUFREQ_TRANSITION_NOTIFIER
:
1703 ret
= srcu_notifier_chain_unregister(
1704 &cpufreq_transition_notifier_list
, nb
);
1706 case CPUFREQ_POLICY_NOTIFIER
:
1707 ret
= blocking_notifier_chain_unregister(
1708 &cpufreq_policy_notifier_list
, nb
);
1716 EXPORT_SYMBOL(cpufreq_unregister_notifier
);
1719 /*********************************************************************
1721 *********************************************************************/
1723 /* Must set freqs->new to intermediate frequency */
1724 static int __target_intermediate(struct cpufreq_policy
*policy
,
1725 struct cpufreq_freqs
*freqs
, int index
)
1729 freqs
->new = cpufreq_driver
->get_intermediate(policy
, index
);
1731 /* We don't need to switch to intermediate freq */
1735 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1736 __func__
, policy
->cpu
, freqs
->old
, freqs
->new);
1738 cpufreq_freq_transition_begin(policy
, freqs
);
1739 ret
= cpufreq_driver
->target_intermediate(policy
, index
);
1740 cpufreq_freq_transition_end(policy
, freqs
, ret
);
1743 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1749 static int __target_index(struct cpufreq_policy
*policy
,
1750 struct cpufreq_frequency_table
*freq_table
, int index
)
1752 struct cpufreq_freqs freqs
= {.old
= policy
->cur
, .flags
= 0};
1753 unsigned int intermediate_freq
= 0;
1754 int retval
= -EINVAL
;
1757 notify
= !(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
);
1759 /* Handle switching to intermediate frequency */
1760 if (cpufreq_driver
->get_intermediate
) {
1761 retval
= __target_intermediate(policy
, &freqs
, index
);
1765 intermediate_freq
= freqs
.new;
1766 /* Set old freq to intermediate */
1767 if (intermediate_freq
)
1768 freqs
.old
= freqs
.new;
1771 freqs
.new = freq_table
[index
].frequency
;
1772 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1773 __func__
, policy
->cpu
, freqs
.old
, freqs
.new);
1775 cpufreq_freq_transition_begin(policy
, &freqs
);
1778 retval
= cpufreq_driver
->target_index(policy
, index
);
1780 pr_err("%s: Failed to change cpu frequency: %d\n", __func__
,
1784 cpufreq_freq_transition_end(policy
, &freqs
, retval
);
1787 * Failed after setting to intermediate freq? Driver should have
1788 * reverted back to initial frequency and so should we. Check
1789 * here for intermediate_freq instead of get_intermediate, in
1790 * case we haven't switched to intermediate freq at all.
1792 if (unlikely(retval
&& intermediate_freq
)) {
1793 freqs
.old
= intermediate_freq
;
1794 freqs
.new = policy
->restore_freq
;
1795 cpufreq_freq_transition_begin(policy
, &freqs
);
1796 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1803 int __cpufreq_driver_target(struct cpufreq_policy
*policy
,
1804 unsigned int target_freq
,
1805 unsigned int relation
)
1807 unsigned int old_target_freq
= target_freq
;
1808 struct cpufreq_frequency_table
*freq_table
;
1811 if (cpufreq_disabled())
1814 /* Make sure that target_freq is within supported range */
1815 if (target_freq
> policy
->max
)
1816 target_freq
= policy
->max
;
1817 if (target_freq
< policy
->min
)
1818 target_freq
= policy
->min
;
1820 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1821 policy
->cpu
, target_freq
, relation
, old_target_freq
);
1824 * This might look like a redundant call as we are checking it again
1825 * after finding index. But it is left intentionally for cases where
1826 * exactly same freq is called again and so we can save on few function
1829 if (target_freq
== policy
->cur
)
1832 /* Save last value to restore later on errors */
1833 policy
->restore_freq
= policy
->cur
;
1835 if (cpufreq_driver
->target
)
1836 return cpufreq_driver
->target(policy
, target_freq
, relation
);
1838 if (!cpufreq_driver
->target_index
)
1841 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
1842 if (unlikely(!freq_table
)) {
1843 pr_err("%s: Unable to find freq_table\n", __func__
);
1847 retval
= cpufreq_frequency_table_target(policy
, freq_table
, target_freq
,
1849 if (unlikely(retval
)) {
1850 pr_err("%s: Unable to find matching freq\n", __func__
);
1854 if (freq_table
[index
].frequency
== policy
->cur
)
1857 return __target_index(policy
, freq_table
, index
);
1859 EXPORT_SYMBOL_GPL(__cpufreq_driver_target
);
1861 int cpufreq_driver_target(struct cpufreq_policy
*policy
,
1862 unsigned int target_freq
,
1863 unsigned int relation
)
1867 down_write(&policy
->rwsem
);
1869 ret
= __cpufreq_driver_target(policy
, target_freq
, relation
);
1871 up_write(&policy
->rwsem
);
1875 EXPORT_SYMBOL_GPL(cpufreq_driver_target
);
1877 __weak
struct cpufreq_governor
*cpufreq_fallback_governor(void)
1882 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
1887 /* Don't start any governor operations if we are entering suspend */
1888 if (cpufreq_suspended
)
1891 * Governor might not be initiated here if ACPI _PPC changed
1892 * notification happened, so check it.
1894 if (!policy
->governor
)
1897 if (policy
->governor
->max_transition_latency
&&
1898 policy
->cpuinfo
.transition_latency
>
1899 policy
->governor
->max_transition_latency
) {
1900 struct cpufreq_governor
*gov
= cpufreq_fallback_governor();
1903 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
1904 policy
->governor
->name
, gov
->name
);
1905 policy
->governor
= gov
;
1911 if (event
== CPUFREQ_GOV_POLICY_INIT
)
1912 if (!try_module_get(policy
->governor
->owner
))
1915 pr_debug("%s: for CPU %u, event %u\n", __func__
, policy
->cpu
, event
);
1917 mutex_lock(&cpufreq_governor_lock
);
1918 if ((policy
->governor_enabled
&& event
== CPUFREQ_GOV_START
)
1919 || (!policy
->governor_enabled
1920 && (event
== CPUFREQ_GOV_LIMITS
|| event
== CPUFREQ_GOV_STOP
))) {
1921 mutex_unlock(&cpufreq_governor_lock
);
1925 if (event
== CPUFREQ_GOV_STOP
)
1926 policy
->governor_enabled
= false;
1927 else if (event
== CPUFREQ_GOV_START
)
1928 policy
->governor_enabled
= true;
1930 mutex_unlock(&cpufreq_governor_lock
);
1932 ret
= policy
->governor
->governor(policy
, event
);
1935 if (event
== CPUFREQ_GOV_POLICY_INIT
)
1936 policy
->governor
->initialized
++;
1937 else if (event
== CPUFREQ_GOV_POLICY_EXIT
)
1938 policy
->governor
->initialized
--;
1940 /* Restore original values */
1941 mutex_lock(&cpufreq_governor_lock
);
1942 if (event
== CPUFREQ_GOV_STOP
)
1943 policy
->governor_enabled
= true;
1944 else if (event
== CPUFREQ_GOV_START
)
1945 policy
->governor_enabled
= false;
1946 mutex_unlock(&cpufreq_governor_lock
);
1949 if (((event
== CPUFREQ_GOV_POLICY_INIT
) && ret
) ||
1950 ((event
== CPUFREQ_GOV_POLICY_EXIT
) && !ret
))
1951 module_put(policy
->governor
->owner
);
1956 int cpufreq_register_governor(struct cpufreq_governor
*governor
)
1963 if (cpufreq_disabled())
1966 mutex_lock(&cpufreq_governor_mutex
);
1968 governor
->initialized
= 0;
1970 if (!find_governor(governor
->name
)) {
1972 list_add(&governor
->governor_list
, &cpufreq_governor_list
);
1975 mutex_unlock(&cpufreq_governor_mutex
);
1978 EXPORT_SYMBOL_GPL(cpufreq_register_governor
);
1980 void cpufreq_unregister_governor(struct cpufreq_governor
*governor
)
1982 struct cpufreq_policy
*policy
;
1983 unsigned long flags
;
1988 if (cpufreq_disabled())
1991 /* clear last_governor for all inactive policies */
1992 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1993 for_each_inactive_policy(policy
) {
1994 if (!strcmp(policy
->last_governor
, governor
->name
)) {
1995 policy
->governor
= NULL
;
1996 strcpy(policy
->last_governor
, "\0");
1999 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2001 mutex_lock(&cpufreq_governor_mutex
);
2002 list_del(&governor
->governor_list
);
2003 mutex_unlock(&cpufreq_governor_mutex
);
2006 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor
);
2009 /*********************************************************************
2010 * POLICY INTERFACE *
2011 *********************************************************************/
2014 * cpufreq_get_policy - get the current cpufreq_policy
2015 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2018 * Reads the current cpufreq policy.
2020 int cpufreq_get_policy(struct cpufreq_policy
*policy
, unsigned int cpu
)
2022 struct cpufreq_policy
*cpu_policy
;
2026 cpu_policy
= cpufreq_cpu_get(cpu
);
2030 memcpy(policy
, cpu_policy
, sizeof(*policy
));
2032 cpufreq_cpu_put(cpu_policy
);
2035 EXPORT_SYMBOL(cpufreq_get_policy
);
2038 * policy : current policy.
2039 * new_policy: policy to be set.
2041 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
2042 struct cpufreq_policy
*new_policy
)
2044 struct cpufreq_governor
*old_gov
;
2047 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2048 new_policy
->cpu
, new_policy
->min
, new_policy
->max
);
2050 memcpy(&new_policy
->cpuinfo
, &policy
->cpuinfo
, sizeof(policy
->cpuinfo
));
2053 * This check works well when we store new min/max freq attributes,
2054 * because new_policy is a copy of policy with one field updated.
2056 if (new_policy
->min
> new_policy
->max
)
2059 /* verify the cpu speed can be set within this limit */
2060 ret
= cpufreq_driver
->verify(new_policy
);
2064 /* adjust if necessary - all reasons */
2065 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2066 CPUFREQ_ADJUST
, new_policy
);
2069 * verify the cpu speed can be set within this limit, which might be
2070 * different to the first one
2072 ret
= cpufreq_driver
->verify(new_policy
);
2076 /* notification of the new policy */
2077 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2078 CPUFREQ_NOTIFY
, new_policy
);
2080 policy
->min
= new_policy
->min
;
2081 policy
->max
= new_policy
->max
;
2083 pr_debug("new min and max freqs are %u - %u kHz\n",
2084 policy
->min
, policy
->max
);
2086 if (cpufreq_driver
->setpolicy
) {
2087 policy
->policy
= new_policy
->policy
;
2088 pr_debug("setting range\n");
2089 return cpufreq_driver
->setpolicy(new_policy
);
2092 if (new_policy
->governor
== policy
->governor
)
2095 pr_debug("governor switch\n");
2097 /* save old, working values */
2098 old_gov
= policy
->governor
;
2099 /* end old governor */
2101 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
2103 /* This can happen due to race with other operations */
2104 pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
2105 __func__
, old_gov
->name
, ret
);
2109 up_write(&policy
->rwsem
);
2110 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
2111 down_write(&policy
->rwsem
);
2114 pr_err("%s: Failed to Exit Governor: %s (%d)\n",
2115 __func__
, old_gov
->name
, ret
);
2120 /* start new governor */
2121 policy
->governor
= new_policy
->governor
;
2122 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
);
2124 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
2128 up_write(&policy
->rwsem
);
2129 __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
2130 down_write(&policy
->rwsem
);
2133 /* new governor failed, so re-start old one */
2134 pr_debug("starting governor %s failed\n", policy
->governor
->name
);
2136 policy
->governor
= old_gov
;
2137 if (__cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
))
2138 policy
->governor
= NULL
;
2140 __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
2146 pr_debug("governor: change or update limits\n");
2147 return __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2151 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2152 * @cpu: CPU which shall be re-evaluated
2154 * Useful for policy notifiers which have different necessities
2155 * at different times.
2157 int cpufreq_update_policy(unsigned int cpu
)
2159 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
2160 struct cpufreq_policy new_policy
;
2166 down_write(&policy
->rwsem
);
2168 pr_debug("updating policy for CPU %u\n", cpu
);
2169 memcpy(&new_policy
, policy
, sizeof(*policy
));
2170 new_policy
.min
= policy
->user_policy
.min
;
2171 new_policy
.max
= policy
->user_policy
.max
;
2174 * BIOS might change freq behind our back
2175 * -> ask driver for current freq and notify governors about a change
2177 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
2178 new_policy
.cur
= cpufreq_driver
->get(cpu
);
2179 if (WARN_ON(!new_policy
.cur
)) {
2185 pr_debug("Driver did not initialize current freq\n");
2186 policy
->cur
= new_policy
.cur
;
2188 if (policy
->cur
!= new_policy
.cur
&& has_target())
2189 cpufreq_out_of_sync(policy
, new_policy
.cur
);
2193 ret
= cpufreq_set_policy(policy
, &new_policy
);
2196 up_write(&policy
->rwsem
);
2198 cpufreq_cpu_put(policy
);
2201 EXPORT_SYMBOL(cpufreq_update_policy
);
2203 static int cpufreq_cpu_callback(struct notifier_block
*nfb
,
2204 unsigned long action
, void *hcpu
)
2206 unsigned int cpu
= (unsigned long)hcpu
;
2208 switch (action
& ~CPU_TASKS_FROZEN
) {
2210 cpufreq_online(cpu
);
2213 case CPU_DOWN_PREPARE
:
2214 cpufreq_offline_prepare(cpu
);
2218 cpufreq_offline_finish(cpu
);
2221 case CPU_DOWN_FAILED
:
2222 cpufreq_online(cpu
);
2228 static struct notifier_block __refdata cpufreq_cpu_notifier
= {
2229 .notifier_call
= cpufreq_cpu_callback
,
2232 /*********************************************************************
2234 *********************************************************************/
2235 static int cpufreq_boost_set_sw(int state
)
2237 struct cpufreq_frequency_table
*freq_table
;
2238 struct cpufreq_policy
*policy
;
2241 for_each_active_policy(policy
) {
2242 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
2244 ret
= cpufreq_frequency_table_cpuinfo(policy
,
2247 pr_err("%s: Policy frequency update failed\n",
2251 policy
->user_policy
.max
= policy
->max
;
2252 __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2259 int cpufreq_boost_trigger_state(int state
)
2261 unsigned long flags
;
2264 if (cpufreq_driver
->boost_enabled
== state
)
2267 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2268 cpufreq_driver
->boost_enabled
= state
;
2269 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2271 ret
= cpufreq_driver
->set_boost(state
);
2273 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2274 cpufreq_driver
->boost_enabled
= !state
;
2275 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2277 pr_err("%s: Cannot %s BOOST\n",
2278 __func__
, state
? "enable" : "disable");
2284 static bool cpufreq_boost_supported(void)
2286 return likely(cpufreq_driver
) && cpufreq_driver
->set_boost
;
2289 static int create_boost_sysfs_file(void)
2293 ret
= sysfs_create_file(cpufreq_global_kobject
, &boost
.attr
);
2295 pr_err("%s: cannot register global BOOST sysfs file\n",
2301 static void remove_boost_sysfs_file(void)
2303 if (cpufreq_boost_supported())
2304 sysfs_remove_file(cpufreq_global_kobject
, &boost
.attr
);
2307 int cpufreq_enable_boost_support(void)
2309 if (!cpufreq_driver
)
2312 if (cpufreq_boost_supported())
2315 cpufreq_driver
->set_boost
= cpufreq_boost_set_sw
;
2317 /* This will get removed on driver unregister */
2318 return create_boost_sysfs_file();
2320 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support
);
2322 int cpufreq_boost_enabled(void)
2324 return cpufreq_driver
->boost_enabled
;
2326 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled
);
2328 /*********************************************************************
2329 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2330 *********************************************************************/
2333 * cpufreq_register_driver - register a CPU Frequency driver
2334 * @driver_data: A struct cpufreq_driver containing the values#
2335 * submitted by the CPU Frequency driver.
2337 * Registers a CPU Frequency driver to this core code. This code
2338 * returns zero on success, -EEXIST when another driver got here first
2339 * (and isn't unregistered in the meantime).
2342 int cpufreq_register_driver(struct cpufreq_driver
*driver_data
)
2344 unsigned long flags
;
2347 if (cpufreq_disabled())
2350 if (!driver_data
|| !driver_data
->verify
|| !driver_data
->init
||
2351 !(driver_data
->setpolicy
|| driver_data
->target_index
||
2352 driver_data
->target
) ||
2353 (driver_data
->setpolicy
&& (driver_data
->target_index
||
2354 driver_data
->target
)) ||
2355 (!!driver_data
->get_intermediate
!= !!driver_data
->target_intermediate
))
2358 pr_debug("trying to register driver %s\n", driver_data
->name
);
2360 /* Protect against concurrent CPU online/offline. */
2363 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2364 if (cpufreq_driver
) {
2365 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2369 cpufreq_driver
= driver_data
;
2370 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2372 if (driver_data
->setpolicy
)
2373 driver_data
->flags
|= CPUFREQ_CONST_LOOPS
;
2375 if (cpufreq_boost_supported()) {
2376 ret
= create_boost_sysfs_file();
2378 goto err_null_driver
;
2381 ret
= subsys_interface_register(&cpufreq_interface
);
2383 goto err_boost_unreg
;
2385 if (!(cpufreq_driver
->flags
& CPUFREQ_STICKY
) &&
2386 list_empty(&cpufreq_policy_list
)) {
2387 /* if all ->init() calls failed, unregister */
2388 pr_debug("%s: No CPU initialized for driver %s\n", __func__
,
2393 register_hotcpu_notifier(&cpufreq_cpu_notifier
);
2394 pr_debug("driver %s up and running\n", driver_data
->name
);
2401 subsys_interface_unregister(&cpufreq_interface
);
2403 remove_boost_sysfs_file();
2405 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2406 cpufreq_driver
= NULL
;
2407 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2410 EXPORT_SYMBOL_GPL(cpufreq_register_driver
);
2413 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2415 * Unregister the current CPUFreq driver. Only call this if you have
2416 * the right to do so, i.e. if you have succeeded in initialising before!
2417 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2418 * currently not initialised.
2420 int cpufreq_unregister_driver(struct cpufreq_driver
*driver
)
2422 unsigned long flags
;
2424 if (!cpufreq_driver
|| (driver
!= cpufreq_driver
))
2427 pr_debug("unregistering driver %s\n", driver
->name
);
2429 /* Protect against concurrent cpu hotplug */
2431 subsys_interface_unregister(&cpufreq_interface
);
2432 remove_boost_sysfs_file();
2433 unregister_hotcpu_notifier(&cpufreq_cpu_notifier
);
2435 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2437 cpufreq_driver
= NULL
;
2439 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2444 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver
);
2447 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2448 * or mutexes when secondary CPUs are halted.
2450 static struct syscore_ops cpufreq_syscore_ops
= {
2451 .shutdown
= cpufreq_suspend
,
2454 struct kobject
*cpufreq_global_kobject
;
2455 EXPORT_SYMBOL(cpufreq_global_kobject
);
2457 static int __init
cpufreq_core_init(void)
2459 if (cpufreq_disabled())
2462 cpufreq_global_kobject
= kobject_create_and_add("cpufreq", &cpu_subsys
.dev_root
->kobj
);
2463 BUG_ON(!cpufreq_global_kobject
);
2465 register_syscore_ops(&cpufreq_syscore_ops
);
2469 core_initcall(cpufreq_core_init
);