cpufreq: cpufreq-cpu0: Call CPUFREQ_POSTCHANGE notifier for failure cases too
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
1da177e4
LT
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/notifier.h>
24#include <linux/cpufreq.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/completion.h>
3fc54d37 32#include <linux/mutex.h>
e00e56df 33#include <linux/syscore_ops.h>
1da177e4 34
6f4f2723
TR
35#include <trace/events/power.h>
36
1da177e4 37/**
cd878479 38 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
41 */
7d5e350f 42static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 47#endif
0d1857a1 48static DEFINE_RWLOCK(cpufreq_driver_lock);
1da177e4 49
5a01f2e8
VP
50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
5a01f2e8
VP
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 66 */
f1625066 67static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 71static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 72{ \
f1625066 73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
76 \
77 return 0; \
78}
79
80lock_policy_rwsem(read, cpu);
5a01f2e8 81lock_policy_rwsem(write, cpu);
5a01f2e8 82
fa1d8af4
VK
83#define unlock_policy_rwsem(mode, cpu) \
84static void unlock_policy_rwsem_##mode(int cpu) \
85{ \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 89}
5a01f2e8 90
fa1d8af4
VK
91unlock_policy_rwsem(read, cpu);
92unlock_policy_rwsem(write, cpu);
5a01f2e8 93
1da177e4 94/* internal prototypes */
29464f28
DJ
95static int __cpufreq_governor(struct cpufreq_policy *policy,
96 unsigned int event);
5a01f2e8 97static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 98static void handle_update(struct work_struct *work);
1da177e4
LT
99
100/**
32ee8c3e
DJ
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
1da177e4
LT
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
106 */
e041c683 107static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 108static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 109
74212ca4 110static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
111static int __init init_cpufreq_transition_notifier_list(void)
112{
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 114 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
115 return 0;
116}
b3438f82 117pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 118
a7b422cd 119static int off __read_mostly;
da584455 120static int cpufreq_disabled(void)
a7b422cd
KRW
121{
122 return off;
123}
124void disable_cpufreq(void)
125{
126 off = 1;
127}
1da177e4 128static LIST_HEAD(cpufreq_governor_list);
29464f28 129static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 130
4d5dcc42
VK
131bool have_governor_per_policy(void)
132{
133 return cpufreq_driver->have_governor_per_policy;
134}
135
a9144436 136static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
137{
138 struct cpufreq_policy *data;
139 unsigned long flags;
140
7a6aedfa 141 if (cpu >= nr_cpu_ids)
1da177e4
LT
142 goto err_out;
143
144 /* get the cpufreq driver */
0d1857a1 145 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4
LT
146
147 if (!cpufreq_driver)
148 goto err_out_unlock;
149
150 if (!try_module_get(cpufreq_driver->owner))
151 goto err_out_unlock;
152
153
154 /* get the CPU */
7a6aedfa 155 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
156
157 if (!data)
158 goto err_out_put_module;
159
a9144436 160 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
161 goto err_out_put_module;
162
0d1857a1 163 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
164 return data;
165
7d5e350f 166err_out_put_module:
1da177e4 167 module_put(cpufreq_driver->owner);
7d5e350f 168err_out_unlock:
0d1857a1 169 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 170err_out:
1da177e4
LT
171 return NULL;
172}
a9144436
SB
173
174struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
175{
d5aaffa9
DB
176 if (cpufreq_disabled())
177 return NULL;
178
a9144436
SB
179 return __cpufreq_cpu_get(cpu, false);
180}
1da177e4
LT
181EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
182
a9144436
SB
183static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
184{
185 return __cpufreq_cpu_get(cpu, true);
186}
187
188static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
189{
190 if (!sysfs)
191 kobject_put(&data->kobj);
192 module_put(cpufreq_driver->owner);
193}
7d5e350f 194
1da177e4
LT
195void cpufreq_cpu_put(struct cpufreq_policy *data)
196{
d5aaffa9
DB
197 if (cpufreq_disabled())
198 return;
199
a9144436 200 __cpufreq_cpu_put(data, false);
1da177e4
LT
201}
202EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
203
a9144436
SB
204static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
205{
206 __cpufreq_cpu_put(data, true);
207}
1da177e4 208
1da177e4
LT
209/*********************************************************************
210 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
211 *********************************************************************/
212
213/**
214 * adjust_jiffies - adjust the system "loops_per_jiffy"
215 *
216 * This function alters the system "loops_per_jiffy" for the clock
217 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 218 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
219 * per-CPU loops_per_jiffy value wherever possible.
220 */
221#ifndef CONFIG_SMP
222static unsigned long l_p_j_ref;
223static unsigned int l_p_j_ref_freq;
224
858119e1 225static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
226{
227 if (ci->flags & CPUFREQ_CONST_LOOPS)
228 return;
229
230 if (!l_p_j_ref_freq) {
231 l_p_j_ref = loops_per_jiffy;
232 l_p_j_ref_freq = ci->old;
2d06d8c4 233 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 234 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 235 }
d08de0c1 236 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 237 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
238 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
239 ci->new);
2d06d8c4 240 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 241 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
242 }
243}
244#else
e08f5f5b
GS
245static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
246{
247 return;
248}
1da177e4
LT
249#endif
250
251
252/**
e4472cb3
DJ
253 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
254 * on frequency transition.
1da177e4 255 *
e4472cb3
DJ
256 * This function calls the transition notifiers and the "adjust_jiffies"
257 * function. It is called twice on all CPU frequency changes that have
32ee8c3e 258 * external effects.
1da177e4
LT
259 */
260void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
261{
e4472cb3 262 struct cpufreq_policy *policy;
2eaa3e2d 263 unsigned long flags;
e4472cb3 264
1da177e4
LT
265 BUG_ON(irqs_disabled());
266
d5aaffa9
DB
267 if (cpufreq_disabled())
268 return;
269
1da177e4 270 freqs->flags = cpufreq_driver->flags;
2d06d8c4 271 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 272 state, freqs->new);
1da177e4 273
0d1857a1 274 read_lock_irqsave(&cpufreq_driver_lock, flags);
7a6aedfa 275 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
0d1857a1 276 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2eaa3e2d 277
1da177e4 278 switch (state) {
e4472cb3 279
1da177e4 280 case CPUFREQ_PRECHANGE:
32ee8c3e 281 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
282 * which is not equal to what the cpufreq core thinks is
283 * "old frequency".
1da177e4
LT
284 */
285 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
286 if ((policy) && (policy->cpu == freqs->cpu) &&
287 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 288 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
289 " %u, cpufreq assumed %u kHz.\n",
290 freqs->old, policy->cur);
291 freqs->old = policy->cur;
1da177e4
LT
292 }
293 }
b4dfdbb3 294 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 295 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
296 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
297 break;
e4472cb3 298
1da177e4
LT
299 case CPUFREQ_POSTCHANGE:
300 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 301 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 302 (unsigned long)freqs->cpu);
25e41933 303 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 304 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 305 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
306 if (likely(policy) && likely(policy->cpu == freqs->cpu))
307 policy->cur = freqs->new;
1da177e4
LT
308 break;
309 }
1da177e4
LT
310}
311EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
312
313
314
315/*********************************************************************
316 * SYSFS INTERFACE *
317 *********************************************************************/
318
3bcb09a3
JF
319static struct cpufreq_governor *__find_governor(const char *str_governor)
320{
321 struct cpufreq_governor *t;
322
323 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 324 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
325 return t;
326
327 return NULL;
328}
329
1da177e4
LT
330/**
331 * cpufreq_parse_governor - parse a governor string
332 */
905d77cd 333static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
334 struct cpufreq_governor **governor)
335{
3bcb09a3
JF
336 int err = -EINVAL;
337
1da177e4 338 if (!cpufreq_driver)
3bcb09a3
JF
339 goto out;
340
1da177e4
LT
341 if (cpufreq_driver->setpolicy) {
342 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
343 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 344 err = 0;
e08f5f5b
GS
345 } else if (!strnicmp(str_governor, "powersave",
346 CPUFREQ_NAME_LEN)) {
1da177e4 347 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 348 err = 0;
1da177e4 349 }
3bcb09a3 350 } else if (cpufreq_driver->target) {
1da177e4 351 struct cpufreq_governor *t;
3bcb09a3 352
3fc54d37 353 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
354
355 t = __find_governor(str_governor);
356
ea714970 357 if (t == NULL) {
1a8e1463 358 int ret;
ea714970 359
1a8e1463
KC
360 mutex_unlock(&cpufreq_governor_mutex);
361 ret = request_module("cpufreq_%s", str_governor);
362 mutex_lock(&cpufreq_governor_mutex);
ea714970 363
1a8e1463
KC
364 if (ret == 0)
365 t = __find_governor(str_governor);
ea714970
JF
366 }
367
3bcb09a3
JF
368 if (t != NULL) {
369 *governor = t;
370 err = 0;
1da177e4 371 }
3bcb09a3 372
3fc54d37 373 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 374 }
29464f28 375out:
3bcb09a3 376 return err;
1da177e4 377}
1da177e4
LT
378
379
1da177e4 380/**
e08f5f5b
GS
381 * cpufreq_per_cpu_attr_read() / show_##file_name() -
382 * print out cpufreq information
1da177e4
LT
383 *
384 * Write out information from cpufreq_driver->policy[cpu]; object must be
385 * "unsigned int".
386 */
387
32ee8c3e
DJ
388#define show_one(file_name, object) \
389static ssize_t show_##file_name \
905d77cd 390(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 391{ \
29464f28 392 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
393}
394
395show_one(cpuinfo_min_freq, cpuinfo.min_freq);
396show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 397show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
398show_one(scaling_min_freq, min);
399show_one(scaling_max_freq, max);
400show_one(scaling_cur_freq, cur);
401
e08f5f5b
GS
402static int __cpufreq_set_policy(struct cpufreq_policy *data,
403 struct cpufreq_policy *policy);
7970e08b 404
1da177e4
LT
405/**
406 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
407 */
408#define store_one(file_name, object) \
409static ssize_t store_##file_name \
905d77cd 410(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 411{ \
f55c9c26 412 unsigned int ret; \
1da177e4
LT
413 struct cpufreq_policy new_policy; \
414 \
415 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
416 if (ret) \
417 return -EINVAL; \
418 \
29464f28 419 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
420 if (ret != 1) \
421 return -EINVAL; \
422 \
7970e08b
TR
423 ret = __cpufreq_set_policy(policy, &new_policy); \
424 policy->user_policy.object = policy->object; \
1da177e4
LT
425 \
426 return ret ? ret : count; \
427}
428
29464f28
DJ
429store_one(scaling_min_freq, min);
430store_one(scaling_max_freq, max);
1da177e4
LT
431
432/**
433 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
434 */
905d77cd
DJ
435static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
436 char *buf)
1da177e4 437{
5a01f2e8 438 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
439 if (!cur_freq)
440 return sprintf(buf, "<unknown>");
441 return sprintf(buf, "%u\n", cur_freq);
442}
443
444
445/**
446 * show_scaling_governor - show the current policy for the specified CPU
447 */
905d77cd 448static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 449{
29464f28 450 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
451 return sprintf(buf, "powersave\n");
452 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
453 return sprintf(buf, "performance\n");
454 else if (policy->governor)
4b972f0b 455 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 456 policy->governor->name);
1da177e4
LT
457 return -EINVAL;
458}
459
460
461/**
462 * store_scaling_governor - store policy for the specified CPU
463 */
905d77cd
DJ
464static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
465 const char *buf, size_t count)
1da177e4 466{
f55c9c26 467 unsigned int ret;
1da177e4
LT
468 char str_governor[16];
469 struct cpufreq_policy new_policy;
470
471 ret = cpufreq_get_policy(&new_policy, policy->cpu);
472 if (ret)
473 return ret;
474
29464f28 475 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
476 if (ret != 1)
477 return -EINVAL;
478
e08f5f5b
GS
479 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
480 &new_policy.governor))
1da177e4
LT
481 return -EINVAL;
482
7970e08b
TR
483 /* Do not use cpufreq_set_policy here or the user_policy.max
484 will be wrongly overridden */
7970e08b
TR
485 ret = __cpufreq_set_policy(policy, &new_policy);
486
487 policy->user_policy.policy = policy->policy;
488 policy->user_policy.governor = policy->governor;
7970e08b 489
e08f5f5b
GS
490 if (ret)
491 return ret;
492 else
493 return count;
1da177e4
LT
494}
495
496/**
497 * show_scaling_driver - show the cpufreq driver currently loaded
498 */
905d77cd 499static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 500{
4b972f0b 501 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
502}
503
504/**
505 * show_scaling_available_governors - show the available CPUfreq governors
506 */
905d77cd
DJ
507static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
508 char *buf)
1da177e4
LT
509{
510 ssize_t i = 0;
511 struct cpufreq_governor *t;
512
513 if (!cpufreq_driver->target) {
514 i += sprintf(buf, "performance powersave");
515 goto out;
516 }
517
518 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
519 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
520 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 521 goto out;
4b972f0b 522 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 523 }
7d5e350f 524out:
1da177e4
LT
525 i += sprintf(&buf[i], "\n");
526 return i;
527}
e8628dd0 528
835481d9 529static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
530{
531 ssize_t i = 0;
532 unsigned int cpu;
533
835481d9 534 for_each_cpu(cpu, mask) {
1da177e4
LT
535 if (i)
536 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
537 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
538 if (i >= (PAGE_SIZE - 5))
29464f28 539 break;
1da177e4
LT
540 }
541 i += sprintf(&buf[i], "\n");
542 return i;
543}
544
e8628dd0
DW
545/**
546 * show_related_cpus - show the CPUs affected by each transition even if
547 * hw coordination is in use
548 */
549static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
550{
e8628dd0
DW
551 return show_cpus(policy->related_cpus, buf);
552}
553
554/**
555 * show_affected_cpus - show the CPUs affected by each transition
556 */
557static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
558{
559 return show_cpus(policy->cpus, buf);
560}
561
9e76988e 562static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 563 const char *buf, size_t count)
9e76988e
VP
564{
565 unsigned int freq = 0;
566 unsigned int ret;
567
879000f9 568 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
569 return -EINVAL;
570
571 ret = sscanf(buf, "%u", &freq);
572 if (ret != 1)
573 return -EINVAL;
574
575 policy->governor->store_setspeed(policy, freq);
576
577 return count;
578}
579
580static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
581{
879000f9 582 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
583 return sprintf(buf, "<unsupported>\n");
584
585 return policy->governor->show_setspeed(policy, buf);
586}
1da177e4 587
e2f74f35 588/**
8bf1ac72 589 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
590 */
591static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
592{
593 unsigned int limit;
594 int ret;
595 if (cpufreq_driver->bios_limit) {
596 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
597 if (!ret)
598 return sprintf(buf, "%u\n", limit);
599 }
600 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
601}
602
6dad2a29
BP
603cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
604cpufreq_freq_attr_ro(cpuinfo_min_freq);
605cpufreq_freq_attr_ro(cpuinfo_max_freq);
606cpufreq_freq_attr_ro(cpuinfo_transition_latency);
607cpufreq_freq_attr_ro(scaling_available_governors);
608cpufreq_freq_attr_ro(scaling_driver);
609cpufreq_freq_attr_ro(scaling_cur_freq);
610cpufreq_freq_attr_ro(bios_limit);
611cpufreq_freq_attr_ro(related_cpus);
612cpufreq_freq_attr_ro(affected_cpus);
613cpufreq_freq_attr_rw(scaling_min_freq);
614cpufreq_freq_attr_rw(scaling_max_freq);
615cpufreq_freq_attr_rw(scaling_governor);
616cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 617
905d77cd 618static struct attribute *default_attrs[] = {
1da177e4
LT
619 &cpuinfo_min_freq.attr,
620 &cpuinfo_max_freq.attr,
ed129784 621 &cpuinfo_transition_latency.attr,
1da177e4
LT
622 &scaling_min_freq.attr,
623 &scaling_max_freq.attr,
624 &affected_cpus.attr,
e8628dd0 625 &related_cpus.attr,
1da177e4
LT
626 &scaling_governor.attr,
627 &scaling_driver.attr,
628 &scaling_available_governors.attr,
9e76988e 629 &scaling_setspeed.attr,
1da177e4
LT
630 NULL
631};
632
8aa84ad8
TR
633struct kobject *cpufreq_global_kobject;
634EXPORT_SYMBOL(cpufreq_global_kobject);
635
29464f28
DJ
636#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
637#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 638
29464f28 639static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 640{
905d77cd
DJ
641 struct cpufreq_policy *policy = to_policy(kobj);
642 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 643 ssize_t ret = -EINVAL;
a9144436 644 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 645 if (!policy)
0db4a8a9 646 goto no_policy;
5a01f2e8
VP
647
648 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 649 goto fail;
5a01f2e8 650
e08f5f5b
GS
651 if (fattr->show)
652 ret = fattr->show(policy, buf);
653 else
654 ret = -EIO;
655
5a01f2e8 656 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 657fail:
a9144436 658 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 659no_policy:
1da177e4
LT
660 return ret;
661}
662
905d77cd
DJ
663static ssize_t store(struct kobject *kobj, struct attribute *attr,
664 const char *buf, size_t count)
1da177e4 665{
905d77cd
DJ
666 struct cpufreq_policy *policy = to_policy(kobj);
667 struct freq_attr *fattr = to_attr(attr);
a07530b4 668 ssize_t ret = -EINVAL;
a9144436 669 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 670 if (!policy)
a07530b4 671 goto no_policy;
5a01f2e8
VP
672
673 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 674 goto fail;
5a01f2e8 675
e08f5f5b
GS
676 if (fattr->store)
677 ret = fattr->store(policy, buf, count);
678 else
679 ret = -EIO;
680
5a01f2e8 681 unlock_policy_rwsem_write(policy->cpu);
a07530b4 682fail:
a9144436 683 cpufreq_cpu_put_sysfs(policy);
a07530b4 684no_policy:
1da177e4
LT
685 return ret;
686}
687
905d77cd 688static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 689{
905d77cd 690 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 691 pr_debug("last reference is dropped\n");
1da177e4
LT
692 complete(&policy->kobj_unregister);
693}
694
52cf25d0 695static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
696 .show = show,
697 .store = store,
698};
699
700static struct kobj_type ktype_cpufreq = {
701 .sysfs_ops = &sysfs_ops,
702 .default_attrs = default_attrs,
703 .release = cpufreq_sysfs_release,
704};
705
19d6f7ec 706/* symlink affected CPUs */
cf3289d0
AC
707static int cpufreq_add_dev_symlink(unsigned int cpu,
708 struct cpufreq_policy *policy)
19d6f7ec
DJ
709{
710 unsigned int j;
711 int ret = 0;
712
713 for_each_cpu(j, policy->cpus) {
714 struct cpufreq_policy *managed_policy;
8a25a2fd 715 struct device *cpu_dev;
19d6f7ec
DJ
716
717 if (j == cpu)
718 continue;
19d6f7ec 719
2d06d8c4 720 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 721 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
722 cpu_dev = get_cpu_device(j);
723 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
724 "cpufreq");
725 if (ret) {
726 cpufreq_cpu_put(managed_policy);
727 return ret;
728 }
729 }
730 return ret;
731}
732
cf3289d0
AC
733static int cpufreq_add_dev_interface(unsigned int cpu,
734 struct cpufreq_policy *policy,
8a25a2fd 735 struct device *dev)
909a694e 736{
ecf7e461 737 struct cpufreq_policy new_policy;
909a694e
DJ
738 struct freq_attr **drv_attr;
739 unsigned long flags;
740 int ret = 0;
741 unsigned int j;
742
743 /* prepare interface data */
744 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 745 &dev->kobj, "cpufreq");
909a694e
DJ
746 if (ret)
747 return ret;
748
749 /* set up files for this cpu device */
750 drv_attr = cpufreq_driver->attr;
751 while ((drv_attr) && (*drv_attr)) {
752 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
753 if (ret)
754 goto err_out_kobj_put;
755 drv_attr++;
756 }
757 if (cpufreq_driver->get) {
758 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
759 if (ret)
760 goto err_out_kobj_put;
761 }
762 if (cpufreq_driver->target) {
763 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
764 if (ret)
765 goto err_out_kobj_put;
766 }
e2f74f35
TR
767 if (cpufreq_driver->bios_limit) {
768 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
769 if (ret)
770 goto err_out_kobj_put;
771 }
909a694e 772
0d1857a1 773 write_lock_irqsave(&cpufreq_driver_lock, flags);
909a694e 774 for_each_cpu(j, policy->cpus) {
909a694e 775 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 776 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e 777 }
0d1857a1 778 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
909a694e
DJ
779
780 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
781 if (ret)
782 goto err_out_kobj_put;
783
784 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
785 /* assure that the starting sequence is run in __cpufreq_set_policy */
786 policy->governor = NULL;
787
788 /* set default policy */
789 ret = __cpufreq_set_policy(policy, &new_policy);
790 policy->user_policy.policy = policy->policy;
791 policy->user_policy.governor = policy->governor;
792
793 if (ret) {
2d06d8c4 794 pr_debug("setting policy failed\n");
ecf7e461
DJ
795 if (cpufreq_driver->exit)
796 cpufreq_driver->exit(policy);
797 }
909a694e
DJ
798 return ret;
799
800err_out_kobj_put:
801 kobject_put(&policy->kobj);
802 wait_for_completion(&policy->kobj_unregister);
803 return ret;
804}
805
fcf80582
VK
806#ifdef CONFIG_HOTPLUG_CPU
807static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
808 struct device *dev)
809{
810 struct cpufreq_policy *policy;
811 int ret = 0;
812 unsigned long flags;
813
814 policy = cpufreq_cpu_get(sibling);
815 WARN_ON(!policy);
816
fcf80582
VK
817 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
818
2eaa3e2d
VK
819 lock_policy_rwsem_write(sibling);
820
0d1857a1 821 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 822
fcf80582 823 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 824 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 825 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 826 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 827
2eaa3e2d
VK
828 unlock_policy_rwsem_write(sibling);
829
fcf80582
VK
830 __cpufreq_governor(policy, CPUFREQ_GOV_START);
831 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
832
fcf80582
VK
833 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
834 if (ret) {
835 cpufreq_cpu_put(policy);
836 return ret;
837 }
838
839 return 0;
840}
841#endif
1da177e4
LT
842
843/**
844 * cpufreq_add_dev - add a CPU device
845 *
32ee8c3e 846 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
847 *
848 * The Oracle says: try running cpufreq registration/unregistration concurrently
849 * with with cpu hotplugging and all hell will break loose. Tried to clean this
850 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 851 */
8a25a2fd 852static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 853{
fcf80582 854 unsigned int j, cpu = dev->id;
65922465 855 int ret = -ENOMEM;
1da177e4 856 struct cpufreq_policy *policy;
1da177e4 857 unsigned long flags;
90e41bac 858#ifdef CONFIG_HOTPLUG_CPU
fcf80582 859 struct cpufreq_governor *gov;
90e41bac
PB
860 int sibling;
861#endif
1da177e4 862
c32b6b8e
AR
863 if (cpu_is_offline(cpu))
864 return 0;
865
2d06d8c4 866 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
867
868#ifdef CONFIG_SMP
869 /* check whether a different CPU already registered this
870 * CPU because it is in the same boat. */
871 policy = cpufreq_cpu_get(cpu);
872 if (unlikely(policy)) {
8ff69732 873 cpufreq_cpu_put(policy);
1da177e4
LT
874 return 0;
875 }
fcf80582
VK
876
877#ifdef CONFIG_HOTPLUG_CPU
878 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 879 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
880 for_each_online_cpu(sibling) {
881 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 882 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 883 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 884 return cpufreq_add_policy_cpu(cpu, sibling, dev);
2eaa3e2d 885 }
fcf80582 886 }
0d1857a1 887 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 888#endif
1da177e4
LT
889#endif
890
891 if (!try_module_get(cpufreq_driver->owner)) {
892 ret = -EINVAL;
893 goto module_out;
894 }
895
e98df50c 896 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 897 if (!policy)
1da177e4 898 goto nomem_out;
059019a3
DJ
899
900 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 901 goto err_free_policy;
059019a3
DJ
902
903 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 904 goto err_free_cpumask;
1da177e4
LT
905
906 policy->cpu = cpu;
65922465 907 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 908 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 909
5a01f2e8 910 /* Initially set CPU itself as the policy_cpu */
f1625066 911 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 912
1da177e4 913 init_completion(&policy->kobj_unregister);
65f27f38 914 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
915
916 /* call driver. From then on the cpufreq must be able
917 * to accept all calls to ->verify and ->setpolicy for this CPU
918 */
919 ret = cpufreq_driver->init(policy);
920 if (ret) {
2d06d8c4 921 pr_debug("initialization failed\n");
2eaa3e2d 922 goto err_set_policy_cpu;
1da177e4 923 }
643ae6e8 924
fcf80582
VK
925 /* related cpus should atleast have policy->cpus */
926 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
927
643ae6e8
VK
928 /*
929 * affected cpus must always be the one, which are online. We aren't
930 * managing offline cpus here.
931 */
932 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
933
187d9f4e
MC
934 policy->user_policy.min = policy->min;
935 policy->user_policy.max = policy->max;
1da177e4 936
a1531acd
TR
937 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
938 CPUFREQ_START, policy);
939
fcf80582
VK
940#ifdef CONFIG_HOTPLUG_CPU
941 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
942 if (gov) {
943 policy->governor = gov;
944 pr_debug("Restoring governor %s for cpu %d\n",
945 policy->governor->name, cpu);
4bfa042c 946 }
fcf80582 947#endif
1da177e4 948
8a25a2fd 949 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
950 if (ret)
951 goto err_out_unregister;
8ff69732 952
038c5b3e 953 kobject_uevent(&policy->kobj, KOBJ_ADD);
1da177e4 954 module_put(cpufreq_driver->owner);
2d06d8c4 955 pr_debug("initialization complete\n");
87c32271 956
1da177e4
LT
957 return 0;
958
1da177e4 959err_out_unregister:
0d1857a1 960 write_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 961 for_each_cpu(j, policy->cpus)
7a6aedfa 962 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 963 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 964
c10997f6 965 kobject_put(&policy->kobj);
1da177e4
LT
966 wait_for_completion(&policy->kobj_unregister);
967
2eaa3e2d
VK
968err_set_policy_cpu:
969 per_cpu(cpufreq_policy_cpu, cpu) = -1;
cad70a6a 970 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
971err_free_cpumask:
972 free_cpumask_var(policy->cpus);
973err_free_policy:
1da177e4 974 kfree(policy);
1da177e4
LT
975nomem_out:
976 module_put(cpufreq_driver->owner);
c32b6b8e 977module_out:
1da177e4
LT
978 return ret;
979}
980
b8eed8af
VK
981static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
982{
983 int j;
984
985 policy->last_cpu = policy->cpu;
986 policy->cpu = cpu;
987
3361b7b1 988 for_each_cpu(j, policy->cpus)
b8eed8af 989 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
990
991#ifdef CONFIG_CPU_FREQ_TABLE
992 cpufreq_frequency_table_update_policy_cpu(policy);
993#endif
994 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
995 CPUFREQ_UPDATE_POLICY_CPU, policy);
996}
1da177e4
LT
997
998/**
5a01f2e8 999 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1000 *
1001 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1002 * Caller should already have policy_rwsem in write mode for this CPU.
1003 * This routine frees the rwsem before returning.
1da177e4 1004 */
8a25a2fd 1005static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1006{
b8eed8af 1007 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1008 unsigned long flags;
1009 struct cpufreq_policy *data;
499bca9b
AW
1010 struct kobject *kobj;
1011 struct completion *cmp;
8a25a2fd 1012 struct device *cpu_dev;
1da177e4 1013
b8eed8af 1014 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1015
0d1857a1 1016 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1017
7a6aedfa 1018 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1019 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1020
0d1857a1 1021 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1022
1023 if (!data) {
b8eed8af 1024 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1025 return -EINVAL;
1026 }
1da177e4 1027
b8eed8af 1028 if (cpufreq_driver->target)
f6a7409c 1029 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1da177e4 1030
084f3493 1031#ifdef CONFIG_HOTPLUG_CPU
fa69e33f
DB
1032 if (!cpufreq_driver->setpolicy)
1033 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1034 data->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1035#endif
1036
2eaa3e2d 1037 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af
VK
1038 cpus = cpumask_weight(data->cpus);
1039 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1040 unlock_policy_rwsem_write(cpu);
084f3493 1041
73bf0fc2
VK
1042 if (cpu != data->cpu) {
1043 sysfs_remove_link(&dev->kobj, "cpufreq");
1044 } else if (cpus > 1) {
b8eed8af
VK
1045 /* first sibling now owns the new sysfs dir */
1046 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1047 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1048 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1049 if (ret) {
1050 pr_err("%s: Failed to move kobj: %d", __func__, ret);
084f3493 1051
2eaa3e2d 1052 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1053 cpumask_set_cpu(cpu, data->cpus);
1da177e4 1054
0d1857a1 1055 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1056 per_cpu(cpufreq_cpu_data, cpu) = data;
0d1857a1 1057 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1058
499bca9b 1059 unlock_policy_rwsem_write(cpu);
1da177e4 1060
2eaa3e2d
VK
1061 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1062 "cpufreq");
b8eed8af 1063 return -EINVAL;
1da177e4 1064 }
5a01f2e8 1065
2eaa3e2d 1066 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1067 update_policy_cpu(data, cpu_dev->id);
2eaa3e2d 1068 unlock_policy_rwsem_write(cpu);
b8eed8af
VK
1069 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1070 __func__, cpu_dev->id, cpu);
1da177e4 1071 }
1da177e4 1072
b8eed8af
VK
1073 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1074 cpufreq_cpu_put(data);
1da177e4 1075
b8eed8af
VK
1076 /* If cpu is last user of policy, free policy */
1077 if (cpus == 1) {
7bd353a9
VK
1078 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1079
2eaa3e2d 1080 lock_policy_rwsem_read(cpu);
b8eed8af
VK
1081 kobj = &data->kobj;
1082 cmp = &data->kobj_unregister;
2eaa3e2d 1083 unlock_policy_rwsem_read(cpu);
b8eed8af 1084 kobject_put(kobj);
7d26e2d5 1085
b8eed8af
VK
1086 /* we need to make sure that the underlying kobj is actually
1087 * not referenced anymore by anybody before we proceed with
1088 * unloading.
1089 */
1090 pr_debug("waiting for dropping of refcount\n");
1091 wait_for_completion(cmp);
1092 pr_debug("wait complete\n");
7d26e2d5 1093
b8eed8af
VK
1094 if (cpufreq_driver->exit)
1095 cpufreq_driver->exit(data);
27ecddc2 1096
b8eed8af
VK
1097 free_cpumask_var(data->related_cpus);
1098 free_cpumask_var(data->cpus);
1099 kfree(data);
1100 } else if (cpufreq_driver->target) {
1101 __cpufreq_governor(data, CPUFREQ_GOV_START);
1102 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
27ecddc2 1103 }
1da177e4 1104
2eaa3e2d 1105 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1106 return 0;
1107}
1108
1109
8a25a2fd 1110static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1111{
8a25a2fd 1112 unsigned int cpu = dev->id;
5a01f2e8 1113 int retval;
ec28297a
VP
1114
1115 if (cpu_is_offline(cpu))
1116 return 0;
1117
8a25a2fd 1118 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1119 return retval;
1120}
1121
1122
65f27f38 1123static void handle_update(struct work_struct *work)
1da177e4 1124{
65f27f38
DH
1125 struct cpufreq_policy *policy =
1126 container_of(work, struct cpufreq_policy, update);
1127 unsigned int cpu = policy->cpu;
2d06d8c4 1128 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1129 cpufreq_update_policy(cpu);
1130}
1131
1132/**
1133 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1134 * @cpu: cpu number
1135 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1136 * @new_freq: CPU frequency the CPU actually runs at
1137 *
29464f28
DJ
1138 * We adjust to current frequency first, and need to clean up later.
1139 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1140 */
e08f5f5b
GS
1141static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1142 unsigned int new_freq)
1da177e4
LT
1143{
1144 struct cpufreq_freqs freqs;
1145
2d06d8c4 1146 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1147 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1148
1149 freqs.cpu = cpu;
1150 freqs.old = old_freq;
1151 freqs.new = new_freq;
1152 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1153 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1154}
1155
1156
32ee8c3e 1157/**
4ab70df4 1158 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1159 * @cpu: CPU number
1160 *
1161 * This is the last known freq, without actually getting it from the driver.
1162 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1163 */
1164unsigned int cpufreq_quick_get(unsigned int cpu)
1165{
9e21ba8b 1166 struct cpufreq_policy *policy;
e08f5f5b 1167 unsigned int ret_freq = 0;
95235ca2 1168
9e21ba8b
DB
1169 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1170 return cpufreq_driver->get(cpu);
1171
1172 policy = cpufreq_cpu_get(cpu);
95235ca2 1173 if (policy) {
e08f5f5b 1174 ret_freq = policy->cur;
95235ca2
VP
1175 cpufreq_cpu_put(policy);
1176 }
1177
4d34a67d 1178 return ret_freq;
95235ca2
VP
1179}
1180EXPORT_SYMBOL(cpufreq_quick_get);
1181
3d737108
JB
1182/**
1183 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1184 * @cpu: CPU number
1185 *
1186 * Just return the max possible frequency for a given CPU.
1187 */
1188unsigned int cpufreq_quick_get_max(unsigned int cpu)
1189{
1190 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1191 unsigned int ret_freq = 0;
1192
1193 if (policy) {
1194 ret_freq = policy->max;
1195 cpufreq_cpu_put(policy);
1196 }
1197
1198 return ret_freq;
1199}
1200EXPORT_SYMBOL(cpufreq_quick_get_max);
1201
95235ca2 1202
5a01f2e8 1203static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1204{
7a6aedfa 1205 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1206 unsigned int ret_freq = 0;
1da177e4 1207
1da177e4 1208 if (!cpufreq_driver->get)
4d34a67d 1209 return ret_freq;
1da177e4 1210
e08f5f5b 1211 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1212
e08f5f5b
GS
1213 if (ret_freq && policy->cur &&
1214 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1215 /* verify no discrepancy between actual and
1216 saved value exists */
1217 if (unlikely(ret_freq != policy->cur)) {
1218 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1219 schedule_work(&policy->update);
1220 }
1221 }
1222
4d34a67d 1223 return ret_freq;
5a01f2e8 1224}
1da177e4 1225
5a01f2e8
VP
1226/**
1227 * cpufreq_get - get the current CPU frequency (in kHz)
1228 * @cpu: CPU number
1229 *
1230 * Get the CPU current (static) CPU frequency
1231 */
1232unsigned int cpufreq_get(unsigned int cpu)
1233{
1234 unsigned int ret_freq = 0;
1235 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1236
1237 if (!policy)
1238 goto out;
1239
1240 if (unlikely(lock_policy_rwsem_read(cpu)))
1241 goto out_policy;
1242
1243 ret_freq = __cpufreq_get(cpu);
1244
1245 unlock_policy_rwsem_read(cpu);
1da177e4 1246
5a01f2e8
VP
1247out_policy:
1248 cpufreq_cpu_put(policy);
1249out:
4d34a67d 1250 return ret_freq;
1da177e4
LT
1251}
1252EXPORT_SYMBOL(cpufreq_get);
1253
8a25a2fd
KS
1254static struct subsys_interface cpufreq_interface = {
1255 .name = "cpufreq",
1256 .subsys = &cpu_subsys,
1257 .add_dev = cpufreq_add_dev,
1258 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1259};
1260
1da177e4 1261
42d4dc3f 1262/**
e00e56df
RW
1263 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1264 *
1265 * This function is only executed for the boot processor. The other CPUs
1266 * have been put offline by means of CPU hotplug.
42d4dc3f 1267 */
e00e56df 1268static int cpufreq_bp_suspend(void)
42d4dc3f 1269{
e08f5f5b 1270 int ret = 0;
4bc5d341 1271
e00e56df 1272 int cpu = smp_processor_id();
42d4dc3f
BH
1273 struct cpufreq_policy *cpu_policy;
1274
2d06d8c4 1275 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1276
e00e56df 1277 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1278 cpu_policy = cpufreq_cpu_get(cpu);
1279 if (!cpu_policy)
e00e56df 1280 return 0;
42d4dc3f
BH
1281
1282 if (cpufreq_driver->suspend) {
7ca64e2d 1283 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1284 if (ret)
42d4dc3f
BH
1285 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1286 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1287 }
1288
42d4dc3f 1289 cpufreq_cpu_put(cpu_policy);
c9060494 1290 return ret;
42d4dc3f
BH
1291}
1292
1da177e4 1293/**
e00e56df 1294 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1295 *
1296 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1297 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1298 * restored. It will verify that the current freq is in sync with
1299 * what we believe it to be. This is a bit later than when it
1300 * should be, but nonethteless it's better than calling
1301 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1302 *
1303 * This function is only executed for the boot CPU. The other CPUs have not
1304 * been turned on yet.
1da177e4 1305 */
e00e56df 1306static void cpufreq_bp_resume(void)
1da177e4 1307{
e08f5f5b 1308 int ret = 0;
4bc5d341 1309
e00e56df 1310 int cpu = smp_processor_id();
1da177e4
LT
1311 struct cpufreq_policy *cpu_policy;
1312
2d06d8c4 1313 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1314
e00e56df 1315 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1316 cpu_policy = cpufreq_cpu_get(cpu);
1317 if (!cpu_policy)
e00e56df 1318 return;
1da177e4
LT
1319
1320 if (cpufreq_driver->resume) {
1321 ret = cpufreq_driver->resume(cpu_policy);
1322 if (ret) {
1323 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1324 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1325 goto fail;
1da177e4
LT
1326 }
1327 }
1328
1da177e4 1329 schedule_work(&cpu_policy->update);
ce6c3997 1330
c9060494 1331fail:
1da177e4 1332 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1333}
1334
e00e56df
RW
1335static struct syscore_ops cpufreq_syscore_ops = {
1336 .suspend = cpufreq_bp_suspend,
1337 .resume = cpufreq_bp_resume,
1da177e4
LT
1338};
1339
9d95046e
BP
1340/**
1341 * cpufreq_get_current_driver - return current driver's name
1342 *
1343 * Return the name string of the currently loaded cpufreq driver
1344 * or NULL, if none.
1345 */
1346const char *cpufreq_get_current_driver(void)
1347{
1348 if (cpufreq_driver)
1349 return cpufreq_driver->name;
1350
1351 return NULL;
1352}
1353EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1354
1355/*********************************************************************
1356 * NOTIFIER LISTS INTERFACE *
1357 *********************************************************************/
1358
1359/**
1360 * cpufreq_register_notifier - register a driver with cpufreq
1361 * @nb: notifier function to register
1362 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1363 *
32ee8c3e 1364 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1365 * are notified about clock rate changes (once before and once after
1366 * the transition), or a list of drivers that are notified about
1367 * changes in cpufreq policy.
1368 *
1369 * This function may sleep, and has the same return conditions as
e041c683 1370 * blocking_notifier_chain_register.
1da177e4
LT
1371 */
1372int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1373{
1374 int ret;
1375
d5aaffa9
DB
1376 if (cpufreq_disabled())
1377 return -EINVAL;
1378
74212ca4
CEB
1379 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1380
1da177e4
LT
1381 switch (list) {
1382 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1383 ret = srcu_notifier_chain_register(
e041c683 1384 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1385 break;
1386 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1387 ret = blocking_notifier_chain_register(
1388 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1389 break;
1390 default:
1391 ret = -EINVAL;
1392 }
1da177e4
LT
1393
1394 return ret;
1395}
1396EXPORT_SYMBOL(cpufreq_register_notifier);
1397
1398
1399/**
1400 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1401 * @nb: notifier block to be unregistered
1402 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1403 *
1404 * Remove a driver from the CPU frequency notifier list.
1405 *
1406 * This function may sleep, and has the same return conditions as
e041c683 1407 * blocking_notifier_chain_unregister.
1da177e4
LT
1408 */
1409int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1410{
1411 int ret;
1412
d5aaffa9
DB
1413 if (cpufreq_disabled())
1414 return -EINVAL;
1415
1da177e4
LT
1416 switch (list) {
1417 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1418 ret = srcu_notifier_chain_unregister(
e041c683 1419 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1420 break;
1421 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1422 ret = blocking_notifier_chain_unregister(
1423 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1424 break;
1425 default:
1426 ret = -EINVAL;
1427 }
1da177e4
LT
1428
1429 return ret;
1430}
1431EXPORT_SYMBOL(cpufreq_unregister_notifier);
1432
1433
1434/*********************************************************************
1435 * GOVERNORS *
1436 *********************************************************************/
1437
1438
1439int __cpufreq_driver_target(struct cpufreq_policy *policy,
1440 unsigned int target_freq,
1441 unsigned int relation)
1442{
1443 int retval = -EINVAL;
7249924e 1444 unsigned int old_target_freq = target_freq;
c32b6b8e 1445
a7b422cd
KRW
1446 if (cpufreq_disabled())
1447 return -ENODEV;
1448
7249924e
VK
1449 /* Make sure that target_freq is within supported range */
1450 if (target_freq > policy->max)
1451 target_freq = policy->max;
1452 if (target_freq < policy->min)
1453 target_freq = policy->min;
1454
1455 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1456 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1457
1458 if (target_freq == policy->cur)
1459 return 0;
1460
3361b7b1 1461 if (cpufreq_driver->target)
1da177e4 1462 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1463
1da177e4
LT
1464 return retval;
1465}
1466EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1467
1da177e4
LT
1468int cpufreq_driver_target(struct cpufreq_policy *policy,
1469 unsigned int target_freq,
1470 unsigned int relation)
1471{
f1829e4a 1472 int ret = -EINVAL;
1da177e4
LT
1473
1474 policy = cpufreq_cpu_get(policy->cpu);
1475 if (!policy)
f1829e4a 1476 goto no_policy;
1da177e4 1477
5a01f2e8 1478 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1479 goto fail;
1da177e4
LT
1480
1481 ret = __cpufreq_driver_target(policy, target_freq, relation);
1482
5a01f2e8 1483 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1484
f1829e4a 1485fail:
1da177e4 1486 cpufreq_cpu_put(policy);
f1829e4a 1487no_policy:
1da177e4
LT
1488 return ret;
1489}
1490EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1491
bf0b90e3 1492int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1493{
1494 int ret = 0;
1495
d5aaffa9
DB
1496 if (cpufreq_disabled())
1497 return ret;
1498
3361b7b1 1499 if (!cpufreq_driver->getavg)
0676f7f2
VK
1500 return 0;
1501
dfde5d62
VP
1502 policy = cpufreq_cpu_get(policy->cpu);
1503 if (!policy)
1504 return -EINVAL;
1505
0676f7f2 1506 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1507
dfde5d62
VP
1508 cpufreq_cpu_put(policy);
1509 return ret;
1510}
5a01f2e8 1511EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1512
153d7f3f 1513/*
153d7f3f
AV
1514 * when "event" is CPUFREQ_GOV_LIMITS
1515 */
1da177e4 1516
e08f5f5b
GS
1517static int __cpufreq_governor(struct cpufreq_policy *policy,
1518 unsigned int event)
1da177e4 1519{
cc993cab 1520 int ret;
6afde10c
TR
1521
1522 /* Only must be defined when default governor is known to have latency
1523 restrictions, like e.g. conservative or ondemand.
1524 That this is the case is already ensured in Kconfig
1525 */
1526#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1527 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1528#else
1529 struct cpufreq_governor *gov = NULL;
1530#endif
1c256245
TR
1531
1532 if (policy->governor->max_transition_latency &&
1533 policy->cpuinfo.transition_latency >
1534 policy->governor->max_transition_latency) {
6afde10c
TR
1535 if (!gov)
1536 return -EINVAL;
1537 else {
1538 printk(KERN_WARNING "%s governor failed, too long"
1539 " transition latency of HW, fallback"
1540 " to %s governor\n",
1541 policy->governor->name,
1542 gov->name);
1543 policy->governor = gov;
1544 }
1c256245 1545 }
1da177e4
LT
1546
1547 if (!try_module_get(policy->governor->owner))
1548 return -EINVAL;
1549
2d06d8c4 1550 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1551 policy->cpu, event);
1da177e4
LT
1552 ret = policy->governor->governor(policy, event);
1553
4d5dcc42
VK
1554 if (!ret) {
1555 if (event == CPUFREQ_GOV_POLICY_INIT)
1556 policy->governor->initialized++;
1557 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1558 policy->governor->initialized--;
1559 }
b394058f 1560
e08f5f5b
GS
1561 /* we keep one module reference alive for
1562 each CPU governed by this CPU */
1da177e4
LT
1563 if ((event != CPUFREQ_GOV_START) || ret)
1564 module_put(policy->governor->owner);
1565 if ((event == CPUFREQ_GOV_STOP) && !ret)
1566 module_put(policy->governor->owner);
1567
1568 return ret;
1569}
1570
1571
1da177e4
LT
1572int cpufreq_register_governor(struct cpufreq_governor *governor)
1573{
3bcb09a3 1574 int err;
1da177e4
LT
1575
1576 if (!governor)
1577 return -EINVAL;
1578
a7b422cd
KRW
1579 if (cpufreq_disabled())
1580 return -ENODEV;
1581
3fc54d37 1582 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1583
b394058f 1584 governor->initialized = 0;
3bcb09a3
JF
1585 err = -EBUSY;
1586 if (__find_governor(governor->name) == NULL) {
1587 err = 0;
1588 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1589 }
1da177e4 1590
32ee8c3e 1591 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1592 return err;
1da177e4
LT
1593}
1594EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1595
1596
1597void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1598{
90e41bac
PB
1599#ifdef CONFIG_HOTPLUG_CPU
1600 int cpu;
1601#endif
1602
1da177e4
LT
1603 if (!governor)
1604 return;
1605
a7b422cd
KRW
1606 if (cpufreq_disabled())
1607 return;
1608
90e41bac
PB
1609#ifdef CONFIG_HOTPLUG_CPU
1610 for_each_present_cpu(cpu) {
1611 if (cpu_online(cpu))
1612 continue;
1613 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1614 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1615 }
1616#endif
1617
3fc54d37 1618 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1619 list_del(&governor->governor_list);
3fc54d37 1620 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1621 return;
1622}
1623EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1624
1625
1626
1627/*********************************************************************
1628 * POLICY INTERFACE *
1629 *********************************************************************/
1630
1631/**
1632 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1633 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1634 * is written
1da177e4
LT
1635 *
1636 * Reads the current cpufreq policy.
1637 */
1638int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1639{
1640 struct cpufreq_policy *cpu_policy;
1641 if (!policy)
1642 return -EINVAL;
1643
1644 cpu_policy = cpufreq_cpu_get(cpu);
1645 if (!cpu_policy)
1646 return -EINVAL;
1647
1da177e4 1648 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1649
1650 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1651 return 0;
1652}
1653EXPORT_SYMBOL(cpufreq_get_policy);
1654
1655
153d7f3f 1656/*
e08f5f5b
GS
1657 * data : current policy.
1658 * policy : policy to be set.
153d7f3f 1659 */
e08f5f5b
GS
1660static int __cpufreq_set_policy(struct cpufreq_policy *data,
1661 struct cpufreq_policy *policy)
1da177e4 1662{
7bd353a9 1663 int ret = 0, failed = 1;
1da177e4 1664
2d06d8c4 1665 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1666 policy->min, policy->max);
1667
e08f5f5b
GS
1668 memcpy(&policy->cpuinfo, &data->cpuinfo,
1669 sizeof(struct cpufreq_cpuinfo));
1da177e4 1670
53391fa2 1671 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1672 ret = -EINVAL;
1673 goto error_out;
1674 }
1675
1da177e4
LT
1676 /* verify the cpu speed can be set within this limit */
1677 ret = cpufreq_driver->verify(policy);
1678 if (ret)
1679 goto error_out;
1680
1da177e4 1681 /* adjust if necessary - all reasons */
e041c683
AS
1682 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1683 CPUFREQ_ADJUST, policy);
1da177e4
LT
1684
1685 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1686 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1687 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1688
1689 /* verify the cpu speed can be set within this limit,
1690 which might be different to the first one */
1691 ret = cpufreq_driver->verify(policy);
e041c683 1692 if (ret)
1da177e4 1693 goto error_out;
1da177e4
LT
1694
1695 /* notification of the new policy */
e041c683
AS
1696 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1697 CPUFREQ_NOTIFY, policy);
1da177e4 1698
7d5e350f
DJ
1699 data->min = policy->min;
1700 data->max = policy->max;
1da177e4 1701
2d06d8c4 1702 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1703 data->min, data->max);
1da177e4
LT
1704
1705 if (cpufreq_driver->setpolicy) {
1706 data->policy = policy->policy;
2d06d8c4 1707 pr_debug("setting range\n");
1da177e4
LT
1708 ret = cpufreq_driver->setpolicy(policy);
1709 } else {
1710 if (policy->governor != data->governor) {
1711 /* save old, working values */
1712 struct cpufreq_governor *old_gov = data->governor;
1713
2d06d8c4 1714 pr_debug("governor switch\n");
1da177e4
LT
1715
1716 /* end old governor */
7bd353a9 1717 if (data->governor) {
1da177e4 1718 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
7bd353a9
VK
1719 __cpufreq_governor(data,
1720 CPUFREQ_GOV_POLICY_EXIT);
1721 }
1da177e4
LT
1722
1723 /* start new governor */
1724 data->governor = policy->governor;
7bd353a9
VK
1725 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1726 if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
1727 failed = 0;
1728 else
1729 __cpufreq_governor(data,
1730 CPUFREQ_GOV_POLICY_EXIT);
1731 }
1732
1733 if (failed) {
1da177e4 1734 /* new governor failed, so re-start old one */
2d06d8c4 1735 pr_debug("starting governor %s failed\n",
e08f5f5b 1736 data->governor->name);
1da177e4
LT
1737 if (old_gov) {
1738 data->governor = old_gov;
7bd353a9
VK
1739 __cpufreq_governor(data,
1740 CPUFREQ_GOV_POLICY_INIT);
e08f5f5b
GS
1741 __cpufreq_governor(data,
1742 CPUFREQ_GOV_START);
1da177e4
LT
1743 }
1744 ret = -EINVAL;
1745 goto error_out;
1746 }
1747 /* might be a policy change, too, so fall through */
1748 }
2d06d8c4 1749 pr_debug("governor: change or update limits\n");
1da177e4
LT
1750 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1751 }
1752
7d5e350f 1753error_out:
1da177e4
LT
1754 return ret;
1755}
1756
1da177e4
LT
1757/**
1758 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1759 * @cpu: CPU which shall be re-evaluated
1760 *
25985edc 1761 * Useful for policy notifiers which have different necessities
1da177e4
LT
1762 * at different times.
1763 */
1764int cpufreq_update_policy(unsigned int cpu)
1765{
1766 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1767 struct cpufreq_policy policy;
f1829e4a 1768 int ret;
1da177e4 1769
f1829e4a
JL
1770 if (!data) {
1771 ret = -ENODEV;
1772 goto no_policy;
1773 }
1da177e4 1774
f1829e4a
JL
1775 if (unlikely(lock_policy_rwsem_write(cpu))) {
1776 ret = -EINVAL;
1777 goto fail;
1778 }
1da177e4 1779
2d06d8c4 1780 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1781 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1782 policy.min = data->user_policy.min;
1783 policy.max = data->user_policy.max;
1784 policy.policy = data->user_policy.policy;
1785 policy.governor = data->user_policy.governor;
1786
0961dd0d
TR
1787 /* BIOS might change freq behind our back
1788 -> ask driver for current freq and notify governors about a change */
1789 if (cpufreq_driver->get) {
1790 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1791 if (!data->cur) {
2d06d8c4 1792 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1793 data->cur = policy.cur;
1794 } else {
f6b0515b 1795 if (data->cur != policy.cur && cpufreq_driver->target)
e08f5f5b
GS
1796 cpufreq_out_of_sync(cpu, data->cur,
1797 policy.cur);
a85f7bd3 1798 }
0961dd0d
TR
1799 }
1800
1da177e4
LT
1801 ret = __cpufreq_set_policy(data, &policy);
1802
5a01f2e8
VP
1803 unlock_policy_rwsem_write(cpu);
1804
f1829e4a 1805fail:
1da177e4 1806 cpufreq_cpu_put(data);
f1829e4a 1807no_policy:
1da177e4
LT
1808 return ret;
1809}
1810EXPORT_SYMBOL(cpufreq_update_policy);
1811
dd184a01 1812static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1813 unsigned long action, void *hcpu)
1814{
1815 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1816 struct device *dev;
c32b6b8e 1817
8a25a2fd
KS
1818 dev = get_cpu_device(cpu);
1819 if (dev) {
c32b6b8e
AR
1820 switch (action) {
1821 case CPU_ONLINE:
8bb78442 1822 case CPU_ONLINE_FROZEN:
8a25a2fd 1823 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1824 break;
1825 case CPU_DOWN_PREPARE:
8bb78442 1826 case CPU_DOWN_PREPARE_FROZEN:
8a25a2fd 1827 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1828 break;
5a01f2e8 1829 case CPU_DOWN_FAILED:
8bb78442 1830 case CPU_DOWN_FAILED_FROZEN:
8a25a2fd 1831 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1832 break;
1833 }
1834 }
1835 return NOTIFY_OK;
1836}
1837
9c36f746 1838static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1839 .notifier_call = cpufreq_cpu_callback,
1840};
1da177e4
LT
1841
1842/*********************************************************************
1843 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1844 *********************************************************************/
1845
1846/**
1847 * cpufreq_register_driver - register a CPU Frequency driver
1848 * @driver_data: A struct cpufreq_driver containing the values#
1849 * submitted by the CPU Frequency driver.
1850 *
32ee8c3e 1851 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1852 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1853 * (and isn't unregistered in the meantime).
1da177e4
LT
1854 *
1855 */
221dee28 1856int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1857{
1858 unsigned long flags;
1859 int ret;
1860
a7b422cd
KRW
1861 if (cpufreq_disabled())
1862 return -ENODEV;
1863
1da177e4
LT
1864 if (!driver_data || !driver_data->verify || !driver_data->init ||
1865 ((!driver_data->setpolicy) && (!driver_data->target)))
1866 return -EINVAL;
1867
2d06d8c4 1868 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1869
1870 if (driver_data->setpolicy)
1871 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1872
0d1857a1 1873 write_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 1874 if (cpufreq_driver) {
0d1857a1 1875 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1876 return -EBUSY;
1877 }
1878 cpufreq_driver = driver_data;
0d1857a1 1879 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1880
8a25a2fd 1881 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1882 if (ret)
1883 goto err_null_driver;
1da177e4 1884
8f5bc2ab 1885 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1886 int i;
1887 ret = -ENODEV;
1888
1889 /* check for at least one working CPU */
7a6aedfa
MT
1890 for (i = 0; i < nr_cpu_ids; i++)
1891 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1892 ret = 0;
7a6aedfa
MT
1893 break;
1894 }
1da177e4
LT
1895
1896 /* if all ->init() calls failed, unregister */
1897 if (ret) {
2d06d8c4 1898 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1899 driver_data->name);
8a25a2fd 1900 goto err_if_unreg;
1da177e4
LT
1901 }
1902 }
1903
8f5bc2ab 1904 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1905 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1906
8f5bc2ab 1907 return 0;
8a25a2fd
KS
1908err_if_unreg:
1909 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 1910err_null_driver:
0d1857a1 1911 write_lock_irqsave(&cpufreq_driver_lock, flags);
8f5bc2ab 1912 cpufreq_driver = NULL;
0d1857a1 1913 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 1914 return ret;
1da177e4
LT
1915}
1916EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1917
1918
1919/**
1920 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1921 *
32ee8c3e 1922 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1923 * the right to do so, i.e. if you have succeeded in initialising before!
1924 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1925 * currently not initialised.
1926 */
221dee28 1927int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1928{
1929 unsigned long flags;
1930
2d06d8c4 1931 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 1932 return -EINVAL;
1da177e4 1933
2d06d8c4 1934 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 1935
8a25a2fd 1936 subsys_interface_unregister(&cpufreq_interface);
65edc68c 1937 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 1938
0d1857a1 1939 write_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 1940 cpufreq_driver = NULL;
0d1857a1 1941 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1942
1943 return 0;
1944}
1945EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1946
1947static int __init cpufreq_core_init(void)
1948{
1949 int cpu;
1950
a7b422cd
KRW
1951 if (cpufreq_disabled())
1952 return -ENODEV;
1953
5a01f2e8 1954 for_each_possible_cpu(cpu) {
f1625066 1955 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
1956 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1957 }
8aa84ad8 1958
8a25a2fd 1959 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 1960 BUG_ON(!cpufreq_global_kobject);
e00e56df 1961 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 1962
5a01f2e8
VP
1963 return 0;
1964}
5a01f2e8 1965core_initcall(cpufreq_core_init);
This page took 0.749684 seconds and 5 git commands to generate.