[CPUFREQ] allow ondemand and conservative cpufreq governors to be used as default
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
1da177e4
LT
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/notifier.h>
22#include <linux/cpufreq.h>
23#include <linux/delay.h>
24#include <linux/interrupt.h>
25#include <linux/spinlock.h>
26#include <linux/device.h>
27#include <linux/slab.h>
28#include <linux/cpu.h>
29#include <linux/completion.h>
3fc54d37 30#include <linux/mutex.h>
1da177e4 31
e08f5f5b
GS
32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
33 "cpufreq-core", msg)
1da177e4
LT
34
35/**
cd878479 36 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
37 * level driver of CPUFreq support, and its spinlock. This lock
38 * also protects the cpufreq_cpu_data array.
39 */
7d5e350f
DJ
40static struct cpufreq_driver *cpufreq_driver;
41static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
084f3493
TR
42#ifdef CONFIG_HOTPLUG_CPU
43/* This one keeps track of the previously set governor of a removed CPU */
44static struct cpufreq_governor *cpufreq_cpu_governor[NR_CPUS];
45#endif
1da177e4
LT
46static DEFINE_SPINLOCK(cpufreq_driver_lock);
47
5a01f2e8
VP
48/*
49 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
50 * all cpufreq/hotplug/workqueue/etc related lock issues.
51 *
52 * The rules for this semaphore:
53 * - Any routine that wants to read from the policy structure will
54 * do a down_read on this semaphore.
55 * - Any routine that will write to the policy structure and/or may take away
56 * the policy altogether (eg. CPU hotplug), will hold this lock in write
57 * mode before doing so.
58 *
59 * Additional rules:
60 * - All holders of the lock should check to make sure that the CPU they
61 * are concerned with are online after they get the lock.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 */
65static DEFINE_PER_CPU(int, policy_cpu);
66static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
67
68#define lock_policy_rwsem(mode, cpu) \
69int lock_policy_rwsem_##mode \
70(int cpu) \
71{ \
72 int policy_cpu = per_cpu(policy_cpu, cpu); \
73 BUG_ON(policy_cpu == -1); \
74 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
75 if (unlikely(!cpu_online(cpu))) { \
76 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
77 return -1; \
78 } \
79 \
80 return 0; \
81}
82
83lock_policy_rwsem(read, cpu);
84EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
85
86lock_policy_rwsem(write, cpu);
87EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
88
89void unlock_policy_rwsem_read(int cpu)
90{
91 int policy_cpu = per_cpu(policy_cpu, cpu);
92 BUG_ON(policy_cpu == -1);
93 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
94}
95EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
96
97void unlock_policy_rwsem_write(int cpu)
98{
99 int policy_cpu = per_cpu(policy_cpu, cpu);
100 BUG_ON(policy_cpu == -1);
101 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
102}
103EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
104
105
1da177e4
LT
106/* internal prototypes */
107static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
5a01f2e8 108static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 109static void handle_update(struct work_struct *work);
1da177e4
LT
110
111/**
32ee8c3e
DJ
112 * Two notifier lists: the "policy" list is involved in the
113 * validation process for a new CPU frequency policy; the
1da177e4
LT
114 * "transition" list for kernel code that needs to handle
115 * changes to devices when the CPU clock speed changes.
116 * The mutex locks both lists.
117 */
e041c683 118static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 119static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 120
b4dfdbb3
AS
121static int __init init_cpufreq_transition_notifier_list(void)
122{
123 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
124 return 0;
125}
b3438f82 126pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4
LT
127
128static LIST_HEAD(cpufreq_governor_list);
7d5e350f 129static DEFINE_MUTEX (cpufreq_governor_mutex);
1da177e4 130
7d5e350f 131struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4
LT
132{
133 struct cpufreq_policy *data;
134 unsigned long flags;
135
136 if (cpu >= NR_CPUS)
137 goto err_out;
138
139 /* get the cpufreq driver */
140 spin_lock_irqsave(&cpufreq_driver_lock, flags);
141
142 if (!cpufreq_driver)
143 goto err_out_unlock;
144
145 if (!try_module_get(cpufreq_driver->owner))
146 goto err_out_unlock;
147
148
149 /* get the CPU */
150 data = cpufreq_cpu_data[cpu];
151
152 if (!data)
153 goto err_out_put_module;
154
155 if (!kobject_get(&data->kobj))
156 goto err_out_put_module;
157
1da177e4 158 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
159 return data;
160
7d5e350f 161err_out_put_module:
1da177e4 162 module_put(cpufreq_driver->owner);
7d5e350f 163err_out_unlock:
1da177e4 164 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 165err_out:
1da177e4
LT
166 return NULL;
167}
168EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
169
7d5e350f 170
1da177e4
LT
171void cpufreq_cpu_put(struct cpufreq_policy *data)
172{
173 kobject_put(&data->kobj);
174 module_put(cpufreq_driver->owner);
175}
176EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
177
178
179/*********************************************************************
180 * UNIFIED DEBUG HELPERS *
181 *********************************************************************/
182#ifdef CONFIG_CPU_FREQ_DEBUG
183
184/* what part(s) of the CPUfreq subsystem are debugged? */
185static unsigned int debug;
186
187/* is the debug output ratelimit'ed using printk_ratelimit? User can
188 * set or modify this value.
189 */
190static unsigned int debug_ratelimit = 1;
191
192/* is the printk_ratelimit'ing enabled? It's enabled after a successful
193 * loading of a cpufreq driver, temporarily disabled when a new policy
194 * is set, and disabled upon cpufreq driver removal
195 */
196static unsigned int disable_ratelimit = 1;
197static DEFINE_SPINLOCK(disable_ratelimit_lock);
198
858119e1 199static void cpufreq_debug_enable_ratelimit(void)
1da177e4
LT
200{
201 unsigned long flags;
202
203 spin_lock_irqsave(&disable_ratelimit_lock, flags);
204 if (disable_ratelimit)
205 disable_ratelimit--;
206 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
207}
208
858119e1 209static void cpufreq_debug_disable_ratelimit(void)
1da177e4
LT
210{
211 unsigned long flags;
212
213 spin_lock_irqsave(&disable_ratelimit_lock, flags);
214 disable_ratelimit++;
215 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
216}
217
e08f5f5b
GS
218void cpufreq_debug_printk(unsigned int type, const char *prefix,
219 const char *fmt, ...)
1da177e4
LT
220{
221 char s[256];
222 va_list args;
223 unsigned int len;
224 unsigned long flags;
32ee8c3e 225
1da177e4
LT
226 WARN_ON(!prefix);
227 if (type & debug) {
228 spin_lock_irqsave(&disable_ratelimit_lock, flags);
e08f5f5b
GS
229 if (!disable_ratelimit && debug_ratelimit
230 && !printk_ratelimit()) {
1da177e4
LT
231 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
232 return;
233 }
234 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
235
236 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
237
238 va_start(args, fmt);
239 len += vsnprintf(&s[len], (256 - len), fmt, args);
240 va_end(args);
241
242 printk(s);
243
244 WARN_ON(len < 5);
245 }
246}
247EXPORT_SYMBOL(cpufreq_debug_printk);
248
249
250module_param(debug, uint, 0644);
e08f5f5b
GS
251MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
252 " 2 to debug drivers, and 4 to debug governors.");
1da177e4
LT
253
254module_param(debug_ratelimit, uint, 0644);
e08f5f5b
GS
255MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
256 " set to 0 to disable ratelimiting.");
1da177e4
LT
257
258#else /* !CONFIG_CPU_FREQ_DEBUG */
259
260static inline void cpufreq_debug_enable_ratelimit(void) { return; }
261static inline void cpufreq_debug_disable_ratelimit(void) { return; }
262
263#endif /* CONFIG_CPU_FREQ_DEBUG */
264
265
266/*********************************************************************
267 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
268 *********************************************************************/
269
270/**
271 * adjust_jiffies - adjust the system "loops_per_jiffy"
272 *
273 * This function alters the system "loops_per_jiffy" for the clock
274 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 275 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
276 * per-CPU loops_per_jiffy value wherever possible.
277 */
278#ifndef CONFIG_SMP
279static unsigned long l_p_j_ref;
280static unsigned int l_p_j_ref_freq;
281
858119e1 282static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
283{
284 if (ci->flags & CPUFREQ_CONST_LOOPS)
285 return;
286
287 if (!l_p_j_ref_freq) {
288 l_p_j_ref = loops_per_jiffy;
289 l_p_j_ref_freq = ci->old;
e08f5f5b
GS
290 dprintk("saving %lu as reference value for loops_per_jiffy;"
291 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4
LT
292 }
293 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
294 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
42d4dc3f 295 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
296 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
297 ci->new);
298 dprintk("scaling loops_per_jiffy to %lu"
299 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
300 }
301}
302#else
e08f5f5b
GS
303static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
304{
305 return;
306}
1da177e4
LT
307#endif
308
309
310/**
e4472cb3
DJ
311 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
312 * on frequency transition.
1da177e4 313 *
e4472cb3
DJ
314 * This function calls the transition notifiers and the "adjust_jiffies"
315 * function. It is called twice on all CPU frequency changes that have
32ee8c3e 316 * external effects.
1da177e4
LT
317 */
318void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
319{
e4472cb3
DJ
320 struct cpufreq_policy *policy;
321
1da177e4
LT
322 BUG_ON(irqs_disabled());
323
324 freqs->flags = cpufreq_driver->flags;
e4472cb3
DJ
325 dprintk("notification %u of frequency transition to %u kHz\n",
326 state, freqs->new);
1da177e4 327
e4472cb3 328 policy = cpufreq_cpu_data[freqs->cpu];
1da177e4 329 switch (state) {
e4472cb3 330
1da177e4 331 case CPUFREQ_PRECHANGE:
32ee8c3e 332 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
333 * which is not equal to what the cpufreq core thinks is
334 * "old frequency".
1da177e4
LT
335 */
336 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
337 if ((policy) && (policy->cpu == freqs->cpu) &&
338 (policy->cur) && (policy->cur != freqs->old)) {
b10eec22 339 dprintk("Warning: CPU frequency is"
e4472cb3
DJ
340 " %u, cpufreq assumed %u kHz.\n",
341 freqs->old, policy->cur);
342 freqs->old = policy->cur;
1da177e4
LT
343 }
344 }
b4dfdbb3 345 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 346 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
347 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
348 break;
e4472cb3 349
1da177e4
LT
350 case CPUFREQ_POSTCHANGE:
351 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
b4dfdbb3 352 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 353 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
354 if (likely(policy) && likely(policy->cpu == freqs->cpu))
355 policy->cur = freqs->new;
1da177e4
LT
356 break;
357 }
1da177e4
LT
358}
359EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
360
361
362
363/*********************************************************************
364 * SYSFS INTERFACE *
365 *********************************************************************/
366
3bcb09a3
JF
367static struct cpufreq_governor *__find_governor(const char *str_governor)
368{
369 struct cpufreq_governor *t;
370
371 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
372 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN))
373 return t;
374
375 return NULL;
376}
377
1da177e4
LT
378/**
379 * cpufreq_parse_governor - parse a governor string
380 */
381static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
382 struct cpufreq_governor **governor)
383{
3bcb09a3
JF
384 int err = -EINVAL;
385
1da177e4 386 if (!cpufreq_driver)
3bcb09a3
JF
387 goto out;
388
1da177e4
LT
389 if (cpufreq_driver->setpolicy) {
390 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
391 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 392 err = 0;
e08f5f5b
GS
393 } else if (!strnicmp(str_governor, "powersave",
394 CPUFREQ_NAME_LEN)) {
1da177e4 395 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 396 err = 0;
1da177e4 397 }
3bcb09a3 398 } else if (cpufreq_driver->target) {
1da177e4 399 struct cpufreq_governor *t;
3bcb09a3 400
3fc54d37 401 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
402
403 t = __find_governor(str_governor);
404
ea714970 405 if (t == NULL) {
e08f5f5b
GS
406 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
407 str_governor);
ea714970
JF
408
409 if (name) {
410 int ret;
411
412 mutex_unlock(&cpufreq_governor_mutex);
413 ret = request_module(name);
414 mutex_lock(&cpufreq_governor_mutex);
415
416 if (ret == 0)
417 t = __find_governor(str_governor);
418 }
419
420 kfree(name);
421 }
422
3bcb09a3
JF
423 if (t != NULL) {
424 *governor = t;
425 err = 0;
1da177e4 426 }
3bcb09a3 427
3fc54d37 428 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 429 }
3bcb09a3
JF
430 out:
431 return err;
1da177e4 432}
1da177e4
LT
433
434
435/* drivers/base/cpu.c */
436extern struct sysdev_class cpu_sysdev_class;
437
438
439/**
e08f5f5b
GS
440 * cpufreq_per_cpu_attr_read() / show_##file_name() -
441 * print out cpufreq information
1da177e4
LT
442 *
443 * Write out information from cpufreq_driver->policy[cpu]; object must be
444 * "unsigned int".
445 */
446
32ee8c3e
DJ
447#define show_one(file_name, object) \
448static ssize_t show_##file_name \
449(struct cpufreq_policy * policy, char *buf) \
450{ \
451 return sprintf (buf, "%u\n", policy->object); \
1da177e4
LT
452}
453
454show_one(cpuinfo_min_freq, cpuinfo.min_freq);
455show_one(cpuinfo_max_freq, cpuinfo.max_freq);
456show_one(scaling_min_freq, min);
457show_one(scaling_max_freq, max);
458show_one(scaling_cur_freq, cur);
459
e08f5f5b
GS
460static int __cpufreq_set_policy(struct cpufreq_policy *data,
461 struct cpufreq_policy *policy);
7970e08b 462
1da177e4
LT
463/**
464 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
465 */
466#define store_one(file_name, object) \
467static ssize_t store_##file_name \
468(struct cpufreq_policy * policy, const char *buf, size_t count) \
469{ \
470 unsigned int ret = -EINVAL; \
471 struct cpufreq_policy new_policy; \
472 \
473 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
474 if (ret) \
475 return -EINVAL; \
476 \
477 ret = sscanf (buf, "%u", &new_policy.object); \
478 if (ret != 1) \
479 return -EINVAL; \
480 \
7970e08b
TR
481 ret = __cpufreq_set_policy(policy, &new_policy); \
482 policy->user_policy.object = policy->object; \
1da177e4
LT
483 \
484 return ret ? ret : count; \
485}
486
487store_one(scaling_min_freq,min);
488store_one(scaling_max_freq,max);
489
490/**
491 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
492 */
e08f5f5b
GS
493static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy,
494 char *buf)
1da177e4 495{
5a01f2e8 496 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
497 if (!cur_freq)
498 return sprintf(buf, "<unknown>");
499 return sprintf(buf, "%u\n", cur_freq);
500}
501
502
503/**
504 * show_scaling_governor - show the current policy for the specified CPU
505 */
e08f5f5b
GS
506static ssize_t show_scaling_governor (struct cpufreq_policy * policy,
507 char *buf)
1da177e4
LT
508{
509 if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
510 return sprintf(buf, "powersave\n");
511 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
512 return sprintf(buf, "performance\n");
513 else if (policy->governor)
514 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name);
515 return -EINVAL;
516}
517
518
519/**
520 * store_scaling_governor - store policy for the specified CPU
521 */
32ee8c3e
DJ
522static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
523 const char *buf, size_t count)
1da177e4
LT
524{
525 unsigned int ret = -EINVAL;
526 char str_governor[16];
527 struct cpufreq_policy new_policy;
528
529 ret = cpufreq_get_policy(&new_policy, policy->cpu);
530 if (ret)
531 return ret;
532
533 ret = sscanf (buf, "%15s", str_governor);
534 if (ret != 1)
535 return -EINVAL;
536
e08f5f5b
GS
537 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
538 &new_policy.governor))
1da177e4
LT
539 return -EINVAL;
540
7970e08b
TR
541 /* Do not use cpufreq_set_policy here or the user_policy.max
542 will be wrongly overridden */
7970e08b
TR
543 ret = __cpufreq_set_policy(policy, &new_policy);
544
545 policy->user_policy.policy = policy->policy;
546 policy->user_policy.governor = policy->governor;
7970e08b 547
e08f5f5b
GS
548 if (ret)
549 return ret;
550 else
551 return count;
1da177e4
LT
552}
553
554/**
555 * show_scaling_driver - show the cpufreq driver currently loaded
556 */
557static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
558{
559 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
560}
561
562/**
563 * show_scaling_available_governors - show the available CPUfreq governors
564 */
e08f5f5b 565static ssize_t show_scaling_available_governors (struct cpufreq_policy *policy,
1da177e4
LT
566 char *buf)
567{
568 ssize_t i = 0;
569 struct cpufreq_governor *t;
570
571 if (!cpufreq_driver->target) {
572 i += sprintf(buf, "performance powersave");
573 goto out;
574 }
575
576 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
577 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2)))
578 goto out;
579 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
580 }
7d5e350f 581out:
1da177e4
LT
582 i += sprintf(&buf[i], "\n");
583 return i;
584}
585/**
586 * show_affected_cpus - show the CPUs affected by each transition
587 */
588static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
589{
590 ssize_t i = 0;
591 unsigned int cpu;
592
593 for_each_cpu_mask(cpu, policy->cpus) {
594 if (i)
595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
596 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
597 if (i >= (PAGE_SIZE - 5))
598 break;
599 }
600 i += sprintf(&buf[i], "\n");
601 return i;
602}
603
604
605#define define_one_ro(_name) \
606static struct freq_attr _name = \
607__ATTR(_name, 0444, show_##_name, NULL)
608
609#define define_one_ro0400(_name) \
610static struct freq_attr _name = \
611__ATTR(_name, 0400, show_##_name, NULL)
612
613#define define_one_rw(_name) \
614static struct freq_attr _name = \
615__ATTR(_name, 0644, show_##_name, store_##_name)
616
617define_one_ro0400(cpuinfo_cur_freq);
618define_one_ro(cpuinfo_min_freq);
619define_one_ro(cpuinfo_max_freq);
620define_one_ro(scaling_available_governors);
621define_one_ro(scaling_driver);
622define_one_ro(scaling_cur_freq);
623define_one_ro(affected_cpus);
624define_one_rw(scaling_min_freq);
625define_one_rw(scaling_max_freq);
626define_one_rw(scaling_governor);
627
628static struct attribute * default_attrs[] = {
629 &cpuinfo_min_freq.attr,
630 &cpuinfo_max_freq.attr,
631 &scaling_min_freq.attr,
632 &scaling_max_freq.attr,
633 &affected_cpus.attr,
634 &scaling_governor.attr,
635 &scaling_driver.attr,
636 &scaling_available_governors.attr,
637 NULL
638};
639
640#define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
641#define to_attr(a) container_of(a,struct freq_attr,attr)
642
643static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
644{
645 struct cpufreq_policy * policy = to_policy(kobj);
646 struct freq_attr * fattr = to_attr(attr);
647 ssize_t ret;
648 policy = cpufreq_cpu_get(policy->cpu);
649 if (!policy)
650 return -EINVAL;
5a01f2e8
VP
651
652 if (lock_policy_rwsem_read(policy->cpu) < 0)
653 return -EINVAL;
654
e08f5f5b
GS
655 if (fattr->show)
656 ret = fattr->show(policy, buf);
657 else
658 ret = -EIO;
659
5a01f2e8
VP
660 unlock_policy_rwsem_read(policy->cpu);
661
1da177e4
LT
662 cpufreq_cpu_put(policy);
663 return ret;
664}
665
32ee8c3e 666static ssize_t store(struct kobject * kobj, struct attribute * attr,
1da177e4
LT
667 const char * buf, size_t count)
668{
669 struct cpufreq_policy * policy = to_policy(kobj);
670 struct freq_attr * fattr = to_attr(attr);
671 ssize_t ret;
672 policy = cpufreq_cpu_get(policy->cpu);
673 if (!policy)
674 return -EINVAL;
5a01f2e8
VP
675
676 if (lock_policy_rwsem_write(policy->cpu) < 0)
677 return -EINVAL;
678
e08f5f5b
GS
679 if (fattr->store)
680 ret = fattr->store(policy, buf, count);
681 else
682 ret = -EIO;
683
5a01f2e8
VP
684 unlock_policy_rwsem_write(policy->cpu);
685
1da177e4
LT
686 cpufreq_cpu_put(policy);
687 return ret;
688}
689
690static void cpufreq_sysfs_release(struct kobject * kobj)
691{
692 struct cpufreq_policy * policy = to_policy(kobj);
693 dprintk("last reference is dropped\n");
694 complete(&policy->kobj_unregister);
695}
696
697static struct sysfs_ops sysfs_ops = {
698 .show = show,
699 .store = store,
700};
701
702static struct kobj_type ktype_cpufreq = {
703 .sysfs_ops = &sysfs_ops,
704 .default_attrs = default_attrs,
705 .release = cpufreq_sysfs_release,
706};
707
708
709/**
710 * cpufreq_add_dev - add a CPU device
711 *
32ee8c3e 712 * Adds the cpufreq interface for a CPU device.
1da177e4
LT
713 */
714static int cpufreq_add_dev (struct sys_device * sys_dev)
715{
716 unsigned int cpu = sys_dev->id;
717 int ret = 0;
718 struct cpufreq_policy new_policy;
719 struct cpufreq_policy *policy;
720 struct freq_attr **drv_attr;
8ff69732 721 struct sys_device *cpu_sys_dev;
1da177e4
LT
722 unsigned long flags;
723 unsigned int j;
8ff69732
DJ
724#ifdef CONFIG_SMP
725 struct cpufreq_policy *managed_policy;
726#endif
1da177e4 727
c32b6b8e
AR
728 if (cpu_is_offline(cpu))
729 return 0;
730
1da177e4
LT
731 cpufreq_debug_disable_ratelimit();
732 dprintk("adding CPU %u\n", cpu);
733
734#ifdef CONFIG_SMP
735 /* check whether a different CPU already registered this
736 * CPU because it is in the same boat. */
737 policy = cpufreq_cpu_get(cpu);
738 if (unlikely(policy)) {
8ff69732 739 cpufreq_cpu_put(policy);
1da177e4
LT
740 cpufreq_debug_enable_ratelimit();
741 return 0;
742 }
743#endif
744
745 if (!try_module_get(cpufreq_driver->owner)) {
746 ret = -EINVAL;
747 goto module_out;
748 }
749
e98df50c 750 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
1da177e4
LT
751 if (!policy) {
752 ret = -ENOMEM;
753 goto nomem_out;
754 }
1da177e4
LT
755
756 policy->cpu = cpu;
757 policy->cpus = cpumask_of_cpu(cpu);
758
5a01f2e8
VP
759 /* Initially set CPU itself as the policy_cpu */
760 per_cpu(policy_cpu, cpu) = cpu;
761 lock_policy_rwsem_write(cpu);
762
1da177e4 763 init_completion(&policy->kobj_unregister);
65f27f38 764 INIT_WORK(&policy->update, handle_update);
1da177e4 765
8122c6ce
TR
766 /* Set governor before ->init, so that driver could check it */
767 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1da177e4
LT
768 /* call driver. From then on the cpufreq must be able
769 * to accept all calls to ->verify and ->setpolicy for this CPU
770 */
771 ret = cpufreq_driver->init(policy);
772 if (ret) {
773 dprintk("initialization failed\n");
5a01f2e8 774 unlock_policy_rwsem_write(cpu);
1da177e4
LT
775 goto err_out;
776 }
22c970f3
TR
777 policy->user_policy.min = policy->cpuinfo.min_freq;
778 policy->user_policy.max = policy->cpuinfo.max_freq;
1da177e4 779
8ff69732 780#ifdef CONFIG_SMP
084f3493
TR
781
782#ifdef CONFIG_HOTPLUG_CPU
783 if (cpufreq_cpu_governor[cpu]){
784 policy->governor = cpufreq_cpu_governor[cpu];
785 dprintk("Restoring governor %s for cpu %d\n",
786 policy->governor->name, cpu);
787 }
788#endif
789
8ff69732
DJ
790 for_each_cpu_mask(j, policy->cpus) {
791 if (cpu == j)
792 continue;
793
794 /* check for existing affected CPUs. They may not be aware
795 * of it due to CPU Hotplug.
796 */
797 managed_policy = cpufreq_cpu_get(j);
798 if (unlikely(managed_policy)) {
5a01f2e8
VP
799
800 /* Set proper policy_cpu */
801 unlock_policy_rwsem_write(cpu);
802 per_cpu(policy_cpu, cpu) = managed_policy->cpu;
803
804 if (lock_policy_rwsem_write(cpu) < 0)
805 goto err_out_driver_exit;
806
8ff69732
DJ
807 spin_lock_irqsave(&cpufreq_driver_lock, flags);
808 managed_policy->cpus = policy->cpus;
809 cpufreq_cpu_data[cpu] = managed_policy;
810 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
811
812 dprintk("CPU already managed, adding link\n");
0142f9dc
AD
813 ret = sysfs_create_link(&sys_dev->kobj,
814 &managed_policy->kobj,
815 "cpufreq");
816 if (ret) {
5a01f2e8 817 unlock_policy_rwsem_write(cpu);
0142f9dc
AD
818 goto err_out_driver_exit;
819 }
8ff69732
DJ
820
821 cpufreq_debug_enable_ratelimit();
8ff69732 822 ret = 0;
5a01f2e8 823 unlock_policy_rwsem_write(cpu);
8ff69732
DJ
824 goto err_out_driver_exit; /* call driver->exit() */
825 }
826 }
827#endif
1da177e4
LT
828 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
829
830 /* prepare interface data */
831 policy->kobj.parent = &sys_dev->kobj;
832 policy->kobj.ktype = &ktype_cpufreq;
833 strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN);
834
835 ret = kobject_register(&policy->kobj);
f3876c1b 836 if (ret) {
5a01f2e8 837 unlock_policy_rwsem_write(cpu);
8085e1f1 838 goto err_out_driver_exit;
f3876c1b 839 }
1da177e4
LT
840 /* set up files for this cpu device */
841 drv_attr = cpufreq_driver->attr;
842 while ((drv_attr) && (*drv_attr)) {
58a7295b
TK
843 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
844 if (ret)
0a4b2ccc 845 goto err_out_driver_exit;
1da177e4
LT
846 drv_attr++;
847 }
0a4b2ccc 848 if (cpufreq_driver->get){
58a7295b
TK
849 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
850 if (ret)
0a4b2ccc
TR
851 goto err_out_driver_exit;
852 }
853 if (cpufreq_driver->target){
58a7295b
TK
854 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
855 if (ret)
0a4b2ccc
TR
856 goto err_out_driver_exit;
857 }
1da177e4
LT
858
859 spin_lock_irqsave(&cpufreq_driver_lock, flags);
5a01f2e8 860 for_each_cpu_mask(j, policy->cpus) {
1da177e4 861 cpufreq_cpu_data[j] = policy;
5a01f2e8
VP
862 per_cpu(policy_cpu, j) = policy->cpu;
863 }
1da177e4 864 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
8ff69732
DJ
865
866 /* symlink affected CPUs */
867 for_each_cpu_mask(j, policy->cpus) {
868 if (j == cpu)
869 continue;
870 if (!cpu_online(j))
871 continue;
872
1f8b2c9d 873 dprintk("CPU %u already managed, adding link\n", j);
8ff69732
DJ
874 cpufreq_cpu_get(cpu);
875 cpu_sys_dev = get_cpu_sysdev(j);
0142f9dc
AD
876 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
877 "cpufreq");
878 if (ret) {
5a01f2e8 879 unlock_policy_rwsem_write(cpu);
0142f9dc
AD
880 goto err_out_unregister;
881 }
8ff69732
DJ
882 }
883
1da177e4
LT
884 policy->governor = NULL; /* to assure that the starting sequence is
885 * run in cpufreq_set_policy */
87c32271 886
1da177e4 887 /* set default policy */
22c970f3
TR
888 ret = __cpufreq_set_policy(policy, &new_policy);
889 policy->user_policy.policy = policy->policy;
084f3493 890 policy->user_policy.governor = policy->governor;
22c970f3
TR
891
892 unlock_policy_rwsem_write(cpu);
893
1da177e4
LT
894 if (ret) {
895 dprintk("setting policy failed\n");
896 goto err_out_unregister;
897 }
898
899 module_put(cpufreq_driver->owner);
1da177e4
LT
900 dprintk("initialization complete\n");
901 cpufreq_debug_enable_ratelimit();
87c32271 902
1da177e4
LT
903 return 0;
904
905
906err_out_unregister:
907 spin_lock_irqsave(&cpufreq_driver_lock, flags);
908 for_each_cpu_mask(j, policy->cpus)
909 cpufreq_cpu_data[j] = NULL;
910 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
911
912 kobject_unregister(&policy->kobj);
913 wait_for_completion(&policy->kobj_unregister);
914
8085e1f1
VP
915err_out_driver_exit:
916 if (cpufreq_driver->exit)
917 cpufreq_driver->exit(policy);
918
1da177e4
LT
919err_out:
920 kfree(policy);
921
922nomem_out:
923 module_put(cpufreq_driver->owner);
c32b6b8e 924module_out:
1da177e4
LT
925 cpufreq_debug_enable_ratelimit();
926 return ret;
927}
928
929
930/**
5a01f2e8 931 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
932 *
933 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
934 * Caller should already have policy_rwsem in write mode for this CPU.
935 * This routine frees the rwsem before returning.
1da177e4 936 */
5a01f2e8 937static int __cpufreq_remove_dev (struct sys_device * sys_dev)
1da177e4
LT
938{
939 unsigned int cpu = sys_dev->id;
940 unsigned long flags;
941 struct cpufreq_policy *data;
942#ifdef CONFIG_SMP
e738cf6d 943 struct sys_device *cpu_sys_dev;
1da177e4
LT
944 unsigned int j;
945#endif
946
947 cpufreq_debug_disable_ratelimit();
948 dprintk("unregistering CPU %u\n", cpu);
949
950 spin_lock_irqsave(&cpufreq_driver_lock, flags);
951 data = cpufreq_cpu_data[cpu];
952
953 if (!data) {
954 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 955 cpufreq_debug_enable_ratelimit();
5a01f2e8 956 unlock_policy_rwsem_write(cpu);
1da177e4
LT
957 return -EINVAL;
958 }
959 cpufreq_cpu_data[cpu] = NULL;
960
961
962#ifdef CONFIG_SMP
963 /* if this isn't the CPU which is the parent of the kobj, we
32ee8c3e 964 * only need to unlink, put and exit
1da177e4
LT
965 */
966 if (unlikely(cpu != data->cpu)) {
967 dprintk("removing link\n");
8ff69732 968 cpu_clear(cpu, data->cpus);
1da177e4
LT
969 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
970 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
1da177e4
LT
971 cpufreq_cpu_put(data);
972 cpufreq_debug_enable_ratelimit();
5a01f2e8 973 unlock_policy_rwsem_write(cpu);
1da177e4
LT
974 return 0;
975 }
976#endif
977
1da177e4
LT
978
979 if (!kobject_get(&data->kobj)) {
980 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
981 cpufreq_debug_enable_ratelimit();
5a01f2e8 982 unlock_policy_rwsem_write(cpu);
32ee8c3e 983 return -EFAULT;
1da177e4
LT
984 }
985
986#ifdef CONFIG_SMP
084f3493
TR
987
988#ifdef CONFIG_HOTPLUG_CPU
989 cpufreq_cpu_governor[cpu] = data->governor;
990#endif
991
1da177e4
LT
992 /* if we have other CPUs still registered, we need to unlink them,
993 * or else wait_for_completion below will lock up. Clean the
994 * cpufreq_cpu_data[] while holding the lock, and remove the sysfs
995 * links afterwards.
996 */
997 if (unlikely(cpus_weight(data->cpus) > 1)) {
998 for_each_cpu_mask(j, data->cpus) {
999 if (j == cpu)
1000 continue;
1001 cpufreq_cpu_data[j] = NULL;
1002 }
1003 }
1004
1005 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1006
1007 if (unlikely(cpus_weight(data->cpus) > 1)) {
1008 for_each_cpu_mask(j, data->cpus) {
1009 if (j == cpu)
1010 continue;
1011 dprintk("removing link for cpu %u\n", j);
084f3493
TR
1012#ifdef CONFIG_HOTPLUG_CPU
1013 cpufreq_cpu_governor[j] = data->governor;
1014#endif
d434fca7
AR
1015 cpu_sys_dev = get_cpu_sysdev(j);
1016 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
1da177e4
LT
1017 cpufreq_cpu_put(data);
1018 }
1019 }
1020#else
1021 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1022#endif
1023
1da177e4
LT
1024 if (cpufreq_driver->target)
1025 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8
VP
1026
1027 unlock_policy_rwsem_write(cpu);
1da177e4
LT
1028
1029 kobject_unregister(&data->kobj);
1030
1031 kobject_put(&data->kobj);
1032
1033 /* we need to make sure that the underlying kobj is actually
32ee8c3e 1034 * not referenced anymore by anybody before we proceed with
1da177e4
LT
1035 * unloading.
1036 */
1037 dprintk("waiting for dropping of refcount\n");
1038 wait_for_completion(&data->kobj_unregister);
1039 dprintk("wait complete\n");
1040
1041 if (cpufreq_driver->exit)
1042 cpufreq_driver->exit(data);
1043
1044 kfree(data);
1045
1046 cpufreq_debug_enable_ratelimit();
1da177e4
LT
1047 return 0;
1048}
1049
1050
5a01f2e8
VP
1051static int cpufreq_remove_dev (struct sys_device * sys_dev)
1052{
1053 unsigned int cpu = sys_dev->id;
1054 int retval;
ec28297a
VP
1055
1056 if (cpu_is_offline(cpu))
1057 return 0;
1058
5a01f2e8
VP
1059 if (unlikely(lock_policy_rwsem_write(cpu)))
1060 BUG();
1061
1062 retval = __cpufreq_remove_dev(sys_dev);
1063 return retval;
1064}
1065
1066
65f27f38 1067static void handle_update(struct work_struct *work)
1da177e4 1068{
65f27f38
DH
1069 struct cpufreq_policy *policy =
1070 container_of(work, struct cpufreq_policy, update);
1071 unsigned int cpu = policy->cpu;
1da177e4
LT
1072 dprintk("handle_update for cpu %u called\n", cpu);
1073 cpufreq_update_policy(cpu);
1074}
1075
1076/**
1077 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1078 * @cpu: cpu number
1079 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1080 * @new_freq: CPU frequency the CPU actually runs at
1081 *
1082 * We adjust to current frequency first, and need to clean up later. So either call
1083 * to cpufreq_update_policy() or schedule handle_update()).
1084 */
e08f5f5b
GS
1085static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1086 unsigned int new_freq)
1da177e4
LT
1087{
1088 struct cpufreq_freqs freqs;
1089
b10eec22 1090 dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1091 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1092
1093 freqs.cpu = cpu;
1094 freqs.old = old_freq;
1095 freqs.new = new_freq;
1096 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1097 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1098}
1099
1100
32ee8c3e 1101/**
4ab70df4 1102 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1103 * @cpu: CPU number
1104 *
1105 * This is the last known freq, without actually getting it from the driver.
1106 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1107 */
1108unsigned int cpufreq_quick_get(unsigned int cpu)
1109{
1110 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
e08f5f5b 1111 unsigned int ret_freq = 0;
95235ca2
VP
1112
1113 if (policy) {
5a01f2e8
VP
1114 if (unlikely(lock_policy_rwsem_read(cpu)))
1115 return ret_freq;
1116
e08f5f5b 1117 ret_freq = policy->cur;
5a01f2e8
VP
1118
1119 unlock_policy_rwsem_read(cpu);
95235ca2
VP
1120 cpufreq_cpu_put(policy);
1121 }
1122
e08f5f5b 1123 return (ret_freq);
95235ca2
VP
1124}
1125EXPORT_SYMBOL(cpufreq_quick_get);
1126
1127
5a01f2e8 1128static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1129{
5a01f2e8 1130 struct cpufreq_policy *policy = cpufreq_cpu_data[cpu];
e08f5f5b 1131 unsigned int ret_freq = 0;
1da177e4 1132
1da177e4 1133 if (!cpufreq_driver->get)
5a01f2e8 1134 return (ret_freq);
1da177e4 1135
e08f5f5b 1136 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1137
e08f5f5b
GS
1138 if (ret_freq && policy->cur &&
1139 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1140 /* verify no discrepancy between actual and
1141 saved value exists */
1142 if (unlikely(ret_freq != policy->cur)) {
1143 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1144 schedule_work(&policy->update);
1145 }
1146 }
1147
5a01f2e8
VP
1148 return (ret_freq);
1149}
1da177e4 1150
5a01f2e8
VP
1151/**
1152 * cpufreq_get - get the current CPU frequency (in kHz)
1153 * @cpu: CPU number
1154 *
1155 * Get the CPU current (static) CPU frequency
1156 */
1157unsigned int cpufreq_get(unsigned int cpu)
1158{
1159 unsigned int ret_freq = 0;
1160 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1161
1162 if (!policy)
1163 goto out;
1164
1165 if (unlikely(lock_policy_rwsem_read(cpu)))
1166 goto out_policy;
1167
1168 ret_freq = __cpufreq_get(cpu);
1169
1170 unlock_policy_rwsem_read(cpu);
1da177e4 1171
5a01f2e8
VP
1172out_policy:
1173 cpufreq_cpu_put(policy);
1174out:
e08f5f5b 1175 return (ret_freq);
1da177e4
LT
1176}
1177EXPORT_SYMBOL(cpufreq_get);
1178
1179
42d4dc3f
BH
1180/**
1181 * cpufreq_suspend - let the low level driver prepare for suspend
1182 */
1183
e00d9967 1184static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
42d4dc3f
BH
1185{
1186 int cpu = sysdev->id;
e08f5f5b 1187 int ret = 0;
42d4dc3f
BH
1188 unsigned int cur_freq = 0;
1189 struct cpufreq_policy *cpu_policy;
1190
0e37b159 1191 dprintk("suspending cpu %u\n", cpu);
42d4dc3f
BH
1192
1193 if (!cpu_online(cpu))
1194 return 0;
1195
1196 /* we may be lax here as interrupts are off. Nonetheless
1197 * we need to grab the correct cpu policy, as to check
1198 * whether we really run on this CPU.
1199 */
1200
1201 cpu_policy = cpufreq_cpu_get(cpu);
1202 if (!cpu_policy)
1203 return -EINVAL;
1204
1205 /* only handle each CPU group once */
1206 if (unlikely(cpu_policy->cpu != cpu)) {
1207 cpufreq_cpu_put(cpu_policy);
1208 return 0;
1209 }
1210
1211 if (cpufreq_driver->suspend) {
e00d9967 1212 ret = cpufreq_driver->suspend(cpu_policy, pmsg);
42d4dc3f
BH
1213 if (ret) {
1214 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1215 "step on CPU %u\n", cpu_policy->cpu);
1216 cpufreq_cpu_put(cpu_policy);
1217 return ret;
1218 }
1219 }
1220
1221
1222 if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
1223 goto out;
1224
1225 if (cpufreq_driver->get)
1226 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1227
1228 if (!cur_freq || !cpu_policy->cur) {
1229 printk(KERN_ERR "cpufreq: suspend failed to assert current "
1230 "frequency is what timing core thinks it is.\n");
1231 goto out;
1232 }
1233
1234 if (unlikely(cur_freq != cpu_policy->cur)) {
1235 struct cpufreq_freqs freqs;
1236
1237 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
b10eec22 1238 dprintk("Warning: CPU frequency is %u, "
42d4dc3f
BH
1239 "cpufreq assumed %u kHz.\n",
1240 cur_freq, cpu_policy->cur);
1241
1242 freqs.cpu = cpu;
1243 freqs.old = cpu_policy->cur;
1244 freqs.new = cur_freq;
1245
b4dfdbb3 1246 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
42d4dc3f
BH
1247 CPUFREQ_SUSPENDCHANGE, &freqs);
1248 adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
1249
1250 cpu_policy->cur = cur_freq;
1251 }
1252
7d5e350f 1253out:
42d4dc3f
BH
1254 cpufreq_cpu_put(cpu_policy);
1255 return 0;
1256}
1257
1da177e4
LT
1258/**
1259 * cpufreq_resume - restore proper CPU frequency handling after resume
1260 *
1261 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1262 * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync
42d4dc3f
BH
1263 * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are
1264 * restored.
1da177e4
LT
1265 */
1266static int cpufreq_resume(struct sys_device * sysdev)
1267{
1268 int cpu = sysdev->id;
e08f5f5b 1269 int ret = 0;
1da177e4
LT
1270 struct cpufreq_policy *cpu_policy;
1271
1272 dprintk("resuming cpu %u\n", cpu);
1273
1274 if (!cpu_online(cpu))
1275 return 0;
1276
1277 /* we may be lax here as interrupts are off. Nonetheless
1278 * we need to grab the correct cpu policy, as to check
1279 * whether we really run on this CPU.
1280 */
1281
1282 cpu_policy = cpufreq_cpu_get(cpu);
1283 if (!cpu_policy)
1284 return -EINVAL;
1285
1286 /* only handle each CPU group once */
1287 if (unlikely(cpu_policy->cpu != cpu)) {
1288 cpufreq_cpu_put(cpu_policy);
1289 return 0;
1290 }
1291
1292 if (cpufreq_driver->resume) {
1293 ret = cpufreq_driver->resume(cpu_policy);
1294 if (ret) {
1295 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1296 "step on CPU %u\n", cpu_policy->cpu);
1297 cpufreq_cpu_put(cpu_policy);
1298 return ret;
1299 }
1300 }
1301
1302 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1303 unsigned int cur_freq = 0;
1304
1305 if (cpufreq_driver->get)
1306 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1307
1308 if (!cur_freq || !cpu_policy->cur) {
42d4dc3f
BH
1309 printk(KERN_ERR "cpufreq: resume failed to assert "
1310 "current frequency is what timing core "
1311 "thinks it is.\n");
1da177e4
LT
1312 goto out;
1313 }
1314
1315 if (unlikely(cur_freq != cpu_policy->cur)) {
1316 struct cpufreq_freqs freqs;
1317
ac09f698 1318 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
b10eec22 1319 dprintk("Warning: CPU frequency"
ac09f698
BH
1320 "is %u, cpufreq assumed %u kHz.\n",
1321 cur_freq, cpu_policy->cur);
1da177e4
LT
1322
1323 freqs.cpu = cpu;
1324 freqs.old = cpu_policy->cur;
1325 freqs.new = cur_freq;
1326
b4dfdbb3 1327 srcu_notifier_call_chain(
e041c683 1328 &cpufreq_transition_notifier_list,
42d4dc3f 1329 CPUFREQ_RESUMECHANGE, &freqs);
1da177e4
LT
1330 adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
1331
1332 cpu_policy->cur = cur_freq;
1333 }
1334 }
1335
1336out:
1337 schedule_work(&cpu_policy->update);
1338 cpufreq_cpu_put(cpu_policy);
1339 return ret;
1340}
1341
1342static struct sysdev_driver cpufreq_sysdev_driver = {
1343 .add = cpufreq_add_dev,
1344 .remove = cpufreq_remove_dev,
42d4dc3f 1345 .suspend = cpufreq_suspend,
1da177e4
LT
1346 .resume = cpufreq_resume,
1347};
1348
1349
1350/*********************************************************************
1351 * NOTIFIER LISTS INTERFACE *
1352 *********************************************************************/
1353
1354/**
1355 * cpufreq_register_notifier - register a driver with cpufreq
1356 * @nb: notifier function to register
1357 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1358 *
32ee8c3e 1359 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1360 * are notified about clock rate changes (once before and once after
1361 * the transition), or a list of drivers that are notified about
1362 * changes in cpufreq policy.
1363 *
1364 * This function may sleep, and has the same return conditions as
e041c683 1365 * blocking_notifier_chain_register.
1da177e4
LT
1366 */
1367int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1368{
1369 int ret;
1370
1da177e4
LT
1371 switch (list) {
1372 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1373 ret = srcu_notifier_chain_register(
e041c683 1374 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1375 break;
1376 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1377 ret = blocking_notifier_chain_register(
1378 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1379 break;
1380 default:
1381 ret = -EINVAL;
1382 }
1da177e4
LT
1383
1384 return ret;
1385}
1386EXPORT_SYMBOL(cpufreq_register_notifier);
1387
1388
1389/**
1390 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1391 * @nb: notifier block to be unregistered
1392 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1393 *
1394 * Remove a driver from the CPU frequency notifier list.
1395 *
1396 * This function may sleep, and has the same return conditions as
e041c683 1397 * blocking_notifier_chain_unregister.
1da177e4
LT
1398 */
1399int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1400{
1401 int ret;
1402
1da177e4
LT
1403 switch (list) {
1404 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1405 ret = srcu_notifier_chain_unregister(
e041c683 1406 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1407 break;
1408 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1409 ret = blocking_notifier_chain_unregister(
1410 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1411 break;
1412 default:
1413 ret = -EINVAL;
1414 }
1da177e4
LT
1415
1416 return ret;
1417}
1418EXPORT_SYMBOL(cpufreq_unregister_notifier);
1419
1420
1421/*********************************************************************
1422 * GOVERNORS *
1423 *********************************************************************/
1424
1425
1426int __cpufreq_driver_target(struct cpufreq_policy *policy,
1427 unsigned int target_freq,
1428 unsigned int relation)
1429{
1430 int retval = -EINVAL;
c32b6b8e 1431
1da177e4
LT
1432 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1433 target_freq, relation);
1434 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1435 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1436
1da177e4
LT
1437 return retval;
1438}
1439EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1440
1da177e4
LT
1441int cpufreq_driver_target(struct cpufreq_policy *policy,
1442 unsigned int target_freq,
1443 unsigned int relation)
1444{
cc993cab 1445 int ret;
1da177e4
LT
1446
1447 policy = cpufreq_cpu_get(policy->cpu);
1448 if (!policy)
1449 return -EINVAL;
1450
5a01f2e8
VP
1451 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1452 return -EINVAL;
1da177e4
LT
1453
1454 ret = __cpufreq_driver_target(policy, target_freq, relation);
1455
5a01f2e8 1456 unlock_policy_rwsem_write(policy->cpu);
1da177e4
LT
1457
1458 cpufreq_cpu_put(policy);
1da177e4
LT
1459 return ret;
1460}
1461EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1462
5a01f2e8 1463int __cpufreq_driver_getavg(struct cpufreq_policy *policy)
dfde5d62
VP
1464{
1465 int ret = 0;
1466
1467 policy = cpufreq_cpu_get(policy->cpu);
1468 if (!policy)
1469 return -EINVAL;
1470
dfde5d62
VP
1471 if (cpu_online(policy->cpu) && cpufreq_driver->getavg)
1472 ret = cpufreq_driver->getavg(policy->cpu);
1473
dfde5d62
VP
1474 cpufreq_cpu_put(policy);
1475 return ret;
1476}
5a01f2e8 1477EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1478
153d7f3f 1479/*
153d7f3f
AV
1480 * when "event" is CPUFREQ_GOV_LIMITS
1481 */
1da177e4 1482
e08f5f5b
GS
1483static int __cpufreq_governor(struct cpufreq_policy *policy,
1484 unsigned int event)
1da177e4 1485{
cc993cab 1486 int ret;
1c256245
TR
1487 struct cpufreq_governor *gov = CPUFREQ_PERFORMANCE_GOVERNOR;
1488
1489 if (policy->governor->max_transition_latency &&
1490 policy->cpuinfo.transition_latency >
1491 policy->governor->max_transition_latency) {
1492 printk(KERN_WARNING "%s governor failed, too long"
1493 " transition latency of HW, fallback"
1494 " to %s governor\n",
1495 policy->governor->name,
1496 gov->name);
1497 policy->governor = gov;
1498 }
1da177e4
LT
1499
1500 if (!try_module_get(policy->governor->owner))
1501 return -EINVAL;
1502
e08f5f5b
GS
1503 dprintk("__cpufreq_governor for CPU %u, event %u\n",
1504 policy->cpu, event);
1da177e4
LT
1505 ret = policy->governor->governor(policy, event);
1506
e08f5f5b
GS
1507 /* we keep one module reference alive for
1508 each CPU governed by this CPU */
1da177e4
LT
1509 if ((event != CPUFREQ_GOV_START) || ret)
1510 module_put(policy->governor->owner);
1511 if ((event == CPUFREQ_GOV_STOP) && !ret)
1512 module_put(policy->governor->owner);
1513
1514 return ret;
1515}
1516
1517
1da177e4
LT
1518int cpufreq_register_governor(struct cpufreq_governor *governor)
1519{
3bcb09a3 1520 int err;
1da177e4
LT
1521
1522 if (!governor)
1523 return -EINVAL;
1524
3fc54d37 1525 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1526
3bcb09a3
JF
1527 err = -EBUSY;
1528 if (__find_governor(governor->name) == NULL) {
1529 err = 0;
1530 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1531 }
1da177e4 1532
32ee8c3e 1533 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1534 return err;
1da177e4
LT
1535}
1536EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1537
1538
1539void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1540{
1541 if (!governor)
1542 return;
1543
3fc54d37 1544 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1545 list_del(&governor->governor_list);
3fc54d37 1546 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1547 return;
1548}
1549EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1550
1551
1552
1553/*********************************************************************
1554 * POLICY INTERFACE *
1555 *********************************************************************/
1556
1557/**
1558 * cpufreq_get_policy - get the current cpufreq_policy
1559 * @policy: struct cpufreq_policy into which the current cpufreq_policy is written
1560 *
1561 * Reads the current cpufreq policy.
1562 */
1563int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1564{
1565 struct cpufreq_policy *cpu_policy;
1566 if (!policy)
1567 return -EINVAL;
1568
1569 cpu_policy = cpufreq_cpu_get(cpu);
1570 if (!cpu_policy)
1571 return -EINVAL;
1572
1da177e4 1573 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1574
1575 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1576 return 0;
1577}
1578EXPORT_SYMBOL(cpufreq_get_policy);
1579
1580
153d7f3f 1581/*
e08f5f5b
GS
1582 * data : current policy.
1583 * policy : policy to be set.
153d7f3f 1584 */
e08f5f5b
GS
1585static int __cpufreq_set_policy(struct cpufreq_policy *data,
1586 struct cpufreq_policy *policy)
1da177e4
LT
1587{
1588 int ret = 0;
1589
1590 cpufreq_debug_disable_ratelimit();
1591 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1592 policy->min, policy->max);
1593
e08f5f5b
GS
1594 memcpy(&policy->cpuinfo, &data->cpuinfo,
1595 sizeof(struct cpufreq_cpuinfo));
1da177e4 1596
9c9a43ed
MD
1597 if (policy->min > data->min && policy->min > policy->max) {
1598 ret = -EINVAL;
1599 goto error_out;
1600 }
1601
1da177e4
LT
1602 /* verify the cpu speed can be set within this limit */
1603 ret = cpufreq_driver->verify(policy);
1604 if (ret)
1605 goto error_out;
1606
1da177e4 1607 /* adjust if necessary - all reasons */
e041c683
AS
1608 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1609 CPUFREQ_ADJUST, policy);
1da177e4
LT
1610
1611 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1612 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1613 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1614
1615 /* verify the cpu speed can be set within this limit,
1616 which might be different to the first one */
1617 ret = cpufreq_driver->verify(policy);
e041c683 1618 if (ret)
1da177e4 1619 goto error_out;
1da177e4
LT
1620
1621 /* notification of the new policy */
e041c683
AS
1622 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1623 CPUFREQ_NOTIFY, policy);
1da177e4 1624
7d5e350f
DJ
1625 data->min = policy->min;
1626 data->max = policy->max;
1da177e4 1627
e08f5f5b
GS
1628 dprintk("new min and max freqs are %u - %u kHz\n",
1629 data->min, data->max);
1da177e4
LT
1630
1631 if (cpufreq_driver->setpolicy) {
1632 data->policy = policy->policy;
1633 dprintk("setting range\n");
1634 ret = cpufreq_driver->setpolicy(policy);
1635 } else {
1636 if (policy->governor != data->governor) {
1637 /* save old, working values */
1638 struct cpufreq_governor *old_gov = data->governor;
1639
1640 dprintk("governor switch\n");
1641
1642 /* end old governor */
1643 if (data->governor)
1644 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1645
1646 /* start new governor */
1647 data->governor = policy->governor;
1648 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1649 /* new governor failed, so re-start old one */
e08f5f5b
GS
1650 dprintk("starting governor %s failed\n",
1651 data->governor->name);
1da177e4
LT
1652 if (old_gov) {
1653 data->governor = old_gov;
e08f5f5b
GS
1654 __cpufreq_governor(data,
1655 CPUFREQ_GOV_START);
1da177e4
LT
1656 }
1657 ret = -EINVAL;
1658 goto error_out;
1659 }
1660 /* might be a policy change, too, so fall through */
1661 }
1662 dprintk("governor: change or update limits\n");
1663 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1664 }
1665
7d5e350f 1666error_out:
1da177e4
LT
1667 cpufreq_debug_enable_ratelimit();
1668 return ret;
1669}
1670
1da177e4
LT
1671/**
1672 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1673 * @cpu: CPU which shall be re-evaluated
1674 *
1675 * Usefull for policy notifiers which have different necessities
1676 * at different times.
1677 */
1678int cpufreq_update_policy(unsigned int cpu)
1679{
1680 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1681 struct cpufreq_policy policy;
1682 int ret = 0;
1683
1684 if (!data)
1685 return -ENODEV;
1686
5a01f2e8
VP
1687 if (unlikely(lock_policy_rwsem_write(cpu)))
1688 return -EINVAL;
1da177e4
LT
1689
1690 dprintk("updating policy for CPU %u\n", cpu);
7d5e350f 1691 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1692 policy.min = data->user_policy.min;
1693 policy.max = data->user_policy.max;
1694 policy.policy = data->user_policy.policy;
1695 policy.governor = data->user_policy.governor;
1696
0961dd0d
TR
1697 /* BIOS might change freq behind our back
1698 -> ask driver for current freq and notify governors about a change */
1699 if (cpufreq_driver->get) {
1700 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3
TR
1701 if (!data->cur) {
1702 dprintk("Driver did not initialize current freq");
1703 data->cur = policy.cur;
1704 } else {
1705 if (data->cur != policy.cur)
e08f5f5b
GS
1706 cpufreq_out_of_sync(cpu, data->cur,
1707 policy.cur);
a85f7bd3 1708 }
0961dd0d
TR
1709 }
1710
1da177e4
LT
1711 ret = __cpufreq_set_policy(data, &policy);
1712
5a01f2e8
VP
1713 unlock_policy_rwsem_write(cpu);
1714
1da177e4
LT
1715 cpufreq_cpu_put(data);
1716 return ret;
1717}
1718EXPORT_SYMBOL(cpufreq_update_policy);
1719
65edc68c 1720static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1721 unsigned long action, void *hcpu)
1722{
1723 unsigned int cpu = (unsigned long)hcpu;
c32b6b8e
AR
1724 struct sys_device *sys_dev;
1725
1726 sys_dev = get_cpu_sysdev(cpu);
c32b6b8e
AR
1727 if (sys_dev) {
1728 switch (action) {
1729 case CPU_ONLINE:
8bb78442 1730 case CPU_ONLINE_FROZEN:
c32b6b8e
AR
1731 cpufreq_add_dev(sys_dev);
1732 break;
1733 case CPU_DOWN_PREPARE:
8bb78442 1734 case CPU_DOWN_PREPARE_FROZEN:
5a01f2e8
VP
1735 if (unlikely(lock_policy_rwsem_write(cpu)))
1736 BUG();
1737
5a01f2e8 1738 __cpufreq_remove_dev(sys_dev);
c32b6b8e 1739 break;
5a01f2e8 1740 case CPU_DOWN_FAILED:
8bb78442 1741 case CPU_DOWN_FAILED_FROZEN:
5a01f2e8 1742 cpufreq_add_dev(sys_dev);
c32b6b8e
AR
1743 break;
1744 }
1745 }
1746 return NOTIFY_OK;
1747}
1748
74b85f37 1749static struct notifier_block __cpuinitdata cpufreq_cpu_notifier =
c32b6b8e
AR
1750{
1751 .notifier_call = cpufreq_cpu_callback,
1752};
1da177e4
LT
1753
1754/*********************************************************************
1755 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1756 *********************************************************************/
1757
1758/**
1759 * cpufreq_register_driver - register a CPU Frequency driver
1760 * @driver_data: A struct cpufreq_driver containing the values#
1761 * submitted by the CPU Frequency driver.
1762 *
32ee8c3e 1763 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1764 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1765 * (and isn't unregistered in the meantime).
1da177e4
LT
1766 *
1767 */
221dee28 1768int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1769{
1770 unsigned long flags;
1771 int ret;
1772
1773 if (!driver_data || !driver_data->verify || !driver_data->init ||
1774 ((!driver_data->setpolicy) && (!driver_data->target)))
1775 return -EINVAL;
1776
1777 dprintk("trying to register driver %s\n", driver_data->name);
1778
1779 if (driver_data->setpolicy)
1780 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1781
1782 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1783 if (cpufreq_driver) {
1784 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1785 return -EBUSY;
1786 }
1787 cpufreq_driver = driver_data;
1788 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1789
1790 ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver);
1791
1792 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1793 int i;
1794 ret = -ENODEV;
1795
1796 /* check for at least one working CPU */
1797 for (i=0; i<NR_CPUS; i++)
1798 if (cpufreq_cpu_data[i])
1799 ret = 0;
1800
1801 /* if all ->init() calls failed, unregister */
1802 if (ret) {
e08f5f5b
GS
1803 dprintk("no CPU initialized for driver %s\n",
1804 driver_data->name);
1805 sysdev_driver_unregister(&cpu_sysdev_class,
1806 &cpufreq_sysdev_driver);
1da177e4
LT
1807
1808 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1809 cpufreq_driver = NULL;
1810 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1811 }
1812 }
1813
1814 if (!ret) {
65edc68c 1815 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1816 dprintk("driver %s up and running\n", driver_data->name);
1817 cpufreq_debug_enable_ratelimit();
1818 }
1819
1820 return (ret);
1821}
1822EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1823
1824
1825/**
1826 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1827 *
32ee8c3e 1828 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1829 * the right to do so, i.e. if you have succeeded in initialising before!
1830 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1831 * currently not initialised.
1832 */
221dee28 1833int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1834{
1835 unsigned long flags;
1836
1837 cpufreq_debug_disable_ratelimit();
1838
1839 if (!cpufreq_driver || (driver != cpufreq_driver)) {
1840 cpufreq_debug_enable_ratelimit();
1841 return -EINVAL;
1842 }
1843
1844 dprintk("unregistering driver %s\n", driver->name);
1845
1846 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
65edc68c 1847 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1848
1849 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1850 cpufreq_driver = NULL;
1851 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1852
1853 return 0;
1854}
1855EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1856
1857static int __init cpufreq_core_init(void)
1858{
1859 int cpu;
1860
1861 for_each_possible_cpu(cpu) {
1862 per_cpu(policy_cpu, cpu) = -1;
1863 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1864 }
1865 return 0;
1866}
1867
1868core_initcall(cpufreq_core_init);
This page took 0.353937 seconds and 5 git commands to generate.