cpufreq: Add EXPORT_SYMBOL_GPL for have_governor_per_policy
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
1da177e4
LT
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/notifier.h>
24#include <linux/cpufreq.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/completion.h>
3fc54d37 32#include <linux/mutex.h>
e00e56df 33#include <linux/syscore_ops.h>
1da177e4 34
6f4f2723
TR
35#include <trace/events/power.h>
36
1da177e4 37/**
cd878479 38 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
41 */
1c3d85dd 42static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 47#endif
0d1857a1 48static DEFINE_RWLOCK(cpufreq_driver_lock);
1da177e4 49
5a01f2e8
VP
50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
5a01f2e8
VP
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 66 */
f1625066 67static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 71static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 72{ \
f1625066 73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
76 \
77 return 0; \
78}
79
80lock_policy_rwsem(read, cpu);
5a01f2e8 81lock_policy_rwsem(write, cpu);
5a01f2e8 82
fa1d8af4
VK
83#define unlock_policy_rwsem(mode, cpu) \
84static void unlock_policy_rwsem_##mode(int cpu) \
85{ \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 89}
5a01f2e8 90
fa1d8af4
VK
91unlock_policy_rwsem(read, cpu);
92unlock_policy_rwsem(write, cpu);
5a01f2e8 93
1da177e4 94/* internal prototypes */
29464f28
DJ
95static int __cpufreq_governor(struct cpufreq_policy *policy,
96 unsigned int event);
5a01f2e8 97static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 98static void handle_update(struct work_struct *work);
1da177e4
LT
99
100/**
32ee8c3e
DJ
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
1da177e4
LT
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
106 */
e041c683 107static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 108static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 109
74212ca4 110static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
111static int __init init_cpufreq_transition_notifier_list(void)
112{
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 114 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
115 return 0;
116}
b3438f82 117pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 118
a7b422cd 119static int off __read_mostly;
da584455 120static int cpufreq_disabled(void)
a7b422cd
KRW
121{
122 return off;
123}
124void disable_cpufreq(void)
125{
126 off = 1;
127}
1da177e4 128static LIST_HEAD(cpufreq_governor_list);
29464f28 129static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 130
4d5dcc42
VK
131bool have_governor_per_policy(void)
132{
1c3d85dd 133 return cpufreq_driver->have_governor_per_policy;
4d5dcc42 134}
3f869d6d 135EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 136
a9144436 137static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
138{
139 struct cpufreq_policy *data;
140 unsigned long flags;
141
7a6aedfa 142 if (cpu >= nr_cpu_ids)
1da177e4
LT
143 goto err_out;
144
145 /* get the cpufreq driver */
1c3d85dd 146 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 147
1c3d85dd 148 if (!cpufreq_driver)
1da177e4
LT
149 goto err_out_unlock;
150
1c3d85dd 151 if (!try_module_get(cpufreq_driver->owner))
1da177e4
LT
152 goto err_out_unlock;
153
154
155 /* get the CPU */
7a6aedfa 156 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
157
158 if (!data)
159 goto err_out_put_module;
160
a9144436 161 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
162 goto err_out_put_module;
163
0d1857a1 164 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
165 return data;
166
7d5e350f 167err_out_put_module:
1c3d85dd 168 module_put(cpufreq_driver->owner);
5800043b 169err_out_unlock:
1c3d85dd 170 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 171err_out:
1da177e4
LT
172 return NULL;
173}
a9144436
SB
174
175struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
176{
d5aaffa9
DB
177 if (cpufreq_disabled())
178 return NULL;
179
a9144436
SB
180 return __cpufreq_cpu_get(cpu, false);
181}
1da177e4
LT
182EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
183
a9144436
SB
184static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
185{
186 return __cpufreq_cpu_get(cpu, true);
187}
188
189static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
190{
191 if (!sysfs)
192 kobject_put(&data->kobj);
1c3d85dd 193 module_put(cpufreq_driver->owner);
a9144436 194}
7d5e350f 195
1da177e4
LT
196void cpufreq_cpu_put(struct cpufreq_policy *data)
197{
d5aaffa9
DB
198 if (cpufreq_disabled())
199 return;
200
a9144436 201 __cpufreq_cpu_put(data, false);
1da177e4
LT
202}
203EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
204
a9144436
SB
205static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
206{
207 __cpufreq_cpu_put(data, true);
208}
1da177e4 209
1da177e4
LT
210/*********************************************************************
211 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
212 *********************************************************************/
213
214/**
215 * adjust_jiffies - adjust the system "loops_per_jiffy"
216 *
217 * This function alters the system "loops_per_jiffy" for the clock
218 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 219 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
220 * per-CPU loops_per_jiffy value wherever possible.
221 */
222#ifndef CONFIG_SMP
223static unsigned long l_p_j_ref;
224static unsigned int l_p_j_ref_freq;
225
858119e1 226static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
227{
228 if (ci->flags & CPUFREQ_CONST_LOOPS)
229 return;
230
231 if (!l_p_j_ref_freq) {
232 l_p_j_ref = loops_per_jiffy;
233 l_p_j_ref_freq = ci->old;
2d06d8c4 234 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 235 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 236 }
d08de0c1 237 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 238 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
239 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
240 ci->new);
2d06d8c4 241 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 242 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
243 }
244}
245#else
e08f5f5b
GS
246static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
247{
248 return;
249}
1da177e4
LT
250#endif
251
252
b43a7ffb
VK
253void __cpufreq_notify_transition(struct cpufreq_policy *policy,
254 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
255{
256 BUG_ON(irqs_disabled());
257
d5aaffa9
DB
258 if (cpufreq_disabled())
259 return;
260
1c3d85dd 261 freqs->flags = cpufreq_driver->flags;
2d06d8c4 262 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 263 state, freqs->new);
1da177e4 264
1da177e4 265 switch (state) {
e4472cb3 266
1da177e4 267 case CPUFREQ_PRECHANGE:
32ee8c3e 268 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
269 * which is not equal to what the cpufreq core thinks is
270 * "old frequency".
1da177e4 271 */
1c3d85dd 272 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
273 if ((policy) && (policy->cpu == freqs->cpu) &&
274 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 275 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
276 " %u, cpufreq assumed %u kHz.\n",
277 freqs->old, policy->cur);
278 freqs->old = policy->cur;
1da177e4
LT
279 }
280 }
b4dfdbb3 281 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 282 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
283 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
284 break;
e4472cb3 285
1da177e4
LT
286 case CPUFREQ_POSTCHANGE:
287 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 288 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 289 (unsigned long)freqs->cpu);
25e41933 290 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 291 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 292 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
293 if (likely(policy) && likely(policy->cpu == freqs->cpu))
294 policy->cur = freqs->new;
1da177e4
LT
295 break;
296 }
1da177e4 297}
b43a7ffb
VK
298/**
299 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
300 * on frequency transition.
301 *
302 * This function calls the transition notifiers and the "adjust_jiffies"
303 * function. It is called twice on all CPU frequency changes that have
304 * external effects.
305 */
306void cpufreq_notify_transition(struct cpufreq_policy *policy,
307 struct cpufreq_freqs *freqs, unsigned int state)
308{
309 for_each_cpu(freqs->cpu, policy->cpus)
310 __cpufreq_notify_transition(policy, freqs, state);
311}
1da177e4
LT
312EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
313
314
315
316/*********************************************************************
317 * SYSFS INTERFACE *
318 *********************************************************************/
319
3bcb09a3
JF
320static struct cpufreq_governor *__find_governor(const char *str_governor)
321{
322 struct cpufreq_governor *t;
323
324 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 325 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
326 return t;
327
328 return NULL;
329}
330
1da177e4
LT
331/**
332 * cpufreq_parse_governor - parse a governor string
333 */
905d77cd 334static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
335 struct cpufreq_governor **governor)
336{
3bcb09a3 337 int err = -EINVAL;
1c3d85dd
RW
338
339 if (!cpufreq_driver)
3bcb09a3
JF
340 goto out;
341
1c3d85dd 342 if (cpufreq_driver->setpolicy) {
1da177e4
LT
343 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
344 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 345 err = 0;
e08f5f5b
GS
346 } else if (!strnicmp(str_governor, "powersave",
347 CPUFREQ_NAME_LEN)) {
1da177e4 348 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 349 err = 0;
1da177e4 350 }
1c3d85dd 351 } else if (cpufreq_driver->target) {
1da177e4 352 struct cpufreq_governor *t;
3bcb09a3 353
3fc54d37 354 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
355
356 t = __find_governor(str_governor);
357
ea714970 358 if (t == NULL) {
1a8e1463 359 int ret;
ea714970 360
1a8e1463
KC
361 mutex_unlock(&cpufreq_governor_mutex);
362 ret = request_module("cpufreq_%s", str_governor);
363 mutex_lock(&cpufreq_governor_mutex);
ea714970 364
1a8e1463
KC
365 if (ret == 0)
366 t = __find_governor(str_governor);
ea714970
JF
367 }
368
3bcb09a3
JF
369 if (t != NULL) {
370 *governor = t;
371 err = 0;
1da177e4 372 }
3bcb09a3 373
3fc54d37 374 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 375 }
29464f28 376out:
3bcb09a3 377 return err;
1da177e4 378}
1da177e4
LT
379
380
1da177e4 381/**
e08f5f5b
GS
382 * cpufreq_per_cpu_attr_read() / show_##file_name() -
383 * print out cpufreq information
1da177e4
LT
384 *
385 * Write out information from cpufreq_driver->policy[cpu]; object must be
386 * "unsigned int".
387 */
388
32ee8c3e
DJ
389#define show_one(file_name, object) \
390static ssize_t show_##file_name \
905d77cd 391(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 392{ \
29464f28 393 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
394}
395
396show_one(cpuinfo_min_freq, cpuinfo.min_freq);
397show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 398show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
399show_one(scaling_min_freq, min);
400show_one(scaling_max_freq, max);
401show_one(scaling_cur_freq, cur);
402
e08f5f5b
GS
403static int __cpufreq_set_policy(struct cpufreq_policy *data,
404 struct cpufreq_policy *policy);
7970e08b 405
1da177e4
LT
406/**
407 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
408 */
409#define store_one(file_name, object) \
410static ssize_t store_##file_name \
905d77cd 411(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 412{ \
f55c9c26 413 unsigned int ret; \
1da177e4
LT
414 struct cpufreq_policy new_policy; \
415 \
416 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
417 if (ret) \
418 return -EINVAL; \
419 \
29464f28 420 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
421 if (ret != 1) \
422 return -EINVAL; \
423 \
7970e08b
TR
424 ret = __cpufreq_set_policy(policy, &new_policy); \
425 policy->user_policy.object = policy->object; \
1da177e4
LT
426 \
427 return ret ? ret : count; \
428}
429
29464f28
DJ
430store_one(scaling_min_freq, min);
431store_one(scaling_max_freq, max);
1da177e4
LT
432
433/**
434 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
435 */
905d77cd
DJ
436static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
437 char *buf)
1da177e4 438{
5a01f2e8 439 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
440 if (!cur_freq)
441 return sprintf(buf, "<unknown>");
442 return sprintf(buf, "%u\n", cur_freq);
443}
444
445
446/**
447 * show_scaling_governor - show the current policy for the specified CPU
448 */
905d77cd 449static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 450{
29464f28 451 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
452 return sprintf(buf, "powersave\n");
453 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
454 return sprintf(buf, "performance\n");
455 else if (policy->governor)
4b972f0b 456 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 457 policy->governor->name);
1da177e4
LT
458 return -EINVAL;
459}
460
461
462/**
463 * store_scaling_governor - store policy for the specified CPU
464 */
905d77cd
DJ
465static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
466 const char *buf, size_t count)
1da177e4 467{
f55c9c26 468 unsigned int ret;
1da177e4
LT
469 char str_governor[16];
470 struct cpufreq_policy new_policy;
471
472 ret = cpufreq_get_policy(&new_policy, policy->cpu);
473 if (ret)
474 return ret;
475
29464f28 476 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
477 if (ret != 1)
478 return -EINVAL;
479
e08f5f5b
GS
480 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
481 &new_policy.governor))
1da177e4
LT
482 return -EINVAL;
483
7970e08b
TR
484 /* Do not use cpufreq_set_policy here or the user_policy.max
485 will be wrongly overridden */
7970e08b
TR
486 ret = __cpufreq_set_policy(policy, &new_policy);
487
488 policy->user_policy.policy = policy->policy;
489 policy->user_policy.governor = policy->governor;
7970e08b 490
e08f5f5b
GS
491 if (ret)
492 return ret;
493 else
494 return count;
1da177e4
LT
495}
496
497/**
498 * show_scaling_driver - show the cpufreq driver currently loaded
499 */
905d77cd 500static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 501{
1c3d85dd 502 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
503}
504
505/**
506 * show_scaling_available_governors - show the available CPUfreq governors
507 */
905d77cd
DJ
508static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
509 char *buf)
1da177e4
LT
510{
511 ssize_t i = 0;
512 struct cpufreq_governor *t;
513
1c3d85dd 514 if (!cpufreq_driver->target) {
1da177e4
LT
515 i += sprintf(buf, "performance powersave");
516 goto out;
517 }
518
519 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
520 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
521 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 522 goto out;
4b972f0b 523 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 524 }
7d5e350f 525out:
1da177e4
LT
526 i += sprintf(&buf[i], "\n");
527 return i;
528}
e8628dd0 529
835481d9 530static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
531{
532 ssize_t i = 0;
533 unsigned int cpu;
534
835481d9 535 for_each_cpu(cpu, mask) {
1da177e4
LT
536 if (i)
537 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
538 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
539 if (i >= (PAGE_SIZE - 5))
29464f28 540 break;
1da177e4
LT
541 }
542 i += sprintf(&buf[i], "\n");
543 return i;
544}
545
e8628dd0
DW
546/**
547 * show_related_cpus - show the CPUs affected by each transition even if
548 * hw coordination is in use
549 */
550static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
551{
e8628dd0
DW
552 return show_cpus(policy->related_cpus, buf);
553}
554
555/**
556 * show_affected_cpus - show the CPUs affected by each transition
557 */
558static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
559{
560 return show_cpus(policy->cpus, buf);
561}
562
9e76988e 563static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 564 const char *buf, size_t count)
9e76988e
VP
565{
566 unsigned int freq = 0;
567 unsigned int ret;
568
879000f9 569 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
570 return -EINVAL;
571
572 ret = sscanf(buf, "%u", &freq);
573 if (ret != 1)
574 return -EINVAL;
575
576 policy->governor->store_setspeed(policy, freq);
577
578 return count;
579}
580
581static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
582{
879000f9 583 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
584 return sprintf(buf, "<unsupported>\n");
585
586 return policy->governor->show_setspeed(policy, buf);
587}
1da177e4 588
e2f74f35 589/**
8bf1ac72 590 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
591 */
592static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
593{
594 unsigned int limit;
595 int ret;
1c3d85dd
RW
596 if (cpufreq_driver->bios_limit) {
597 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
598 if (!ret)
599 return sprintf(buf, "%u\n", limit);
600 }
601 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
602}
603
6dad2a29
BP
604cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
605cpufreq_freq_attr_ro(cpuinfo_min_freq);
606cpufreq_freq_attr_ro(cpuinfo_max_freq);
607cpufreq_freq_attr_ro(cpuinfo_transition_latency);
608cpufreq_freq_attr_ro(scaling_available_governors);
609cpufreq_freq_attr_ro(scaling_driver);
610cpufreq_freq_attr_ro(scaling_cur_freq);
611cpufreq_freq_attr_ro(bios_limit);
612cpufreq_freq_attr_ro(related_cpus);
613cpufreq_freq_attr_ro(affected_cpus);
614cpufreq_freq_attr_rw(scaling_min_freq);
615cpufreq_freq_attr_rw(scaling_max_freq);
616cpufreq_freq_attr_rw(scaling_governor);
617cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 618
905d77cd 619static struct attribute *default_attrs[] = {
1da177e4
LT
620 &cpuinfo_min_freq.attr,
621 &cpuinfo_max_freq.attr,
ed129784 622 &cpuinfo_transition_latency.attr,
1da177e4
LT
623 &scaling_min_freq.attr,
624 &scaling_max_freq.attr,
625 &affected_cpus.attr,
e8628dd0 626 &related_cpus.attr,
1da177e4
LT
627 &scaling_governor.attr,
628 &scaling_driver.attr,
629 &scaling_available_governors.attr,
9e76988e 630 &scaling_setspeed.attr,
1da177e4
LT
631 NULL
632};
633
8aa84ad8
TR
634struct kobject *cpufreq_global_kobject;
635EXPORT_SYMBOL(cpufreq_global_kobject);
636
29464f28
DJ
637#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
638#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 639
29464f28 640static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 641{
905d77cd
DJ
642 struct cpufreq_policy *policy = to_policy(kobj);
643 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 644 ssize_t ret = -EINVAL;
a9144436 645 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 646 if (!policy)
0db4a8a9 647 goto no_policy;
5a01f2e8
VP
648
649 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 650 goto fail;
5a01f2e8 651
e08f5f5b
GS
652 if (fattr->show)
653 ret = fattr->show(policy, buf);
654 else
655 ret = -EIO;
656
5a01f2e8 657 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 658fail:
a9144436 659 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 660no_policy:
1da177e4
LT
661 return ret;
662}
663
905d77cd
DJ
664static ssize_t store(struct kobject *kobj, struct attribute *attr,
665 const char *buf, size_t count)
1da177e4 666{
905d77cd
DJ
667 struct cpufreq_policy *policy = to_policy(kobj);
668 struct freq_attr *fattr = to_attr(attr);
a07530b4 669 ssize_t ret = -EINVAL;
a9144436 670 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 671 if (!policy)
a07530b4 672 goto no_policy;
5a01f2e8
VP
673
674 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 675 goto fail;
5a01f2e8 676
e08f5f5b
GS
677 if (fattr->store)
678 ret = fattr->store(policy, buf, count);
679 else
680 ret = -EIO;
681
5a01f2e8 682 unlock_policy_rwsem_write(policy->cpu);
a07530b4 683fail:
a9144436 684 cpufreq_cpu_put_sysfs(policy);
a07530b4 685no_policy:
1da177e4
LT
686 return ret;
687}
688
905d77cd 689static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 690{
905d77cd 691 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 692 pr_debug("last reference is dropped\n");
1da177e4
LT
693 complete(&policy->kobj_unregister);
694}
695
52cf25d0 696static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
697 .show = show,
698 .store = store,
699};
700
701static struct kobj_type ktype_cpufreq = {
702 .sysfs_ops = &sysfs_ops,
703 .default_attrs = default_attrs,
704 .release = cpufreq_sysfs_release,
705};
706
19d6f7ec 707/* symlink affected CPUs */
cf3289d0
AC
708static int cpufreq_add_dev_symlink(unsigned int cpu,
709 struct cpufreq_policy *policy)
19d6f7ec
DJ
710{
711 unsigned int j;
712 int ret = 0;
713
714 for_each_cpu(j, policy->cpus) {
715 struct cpufreq_policy *managed_policy;
8a25a2fd 716 struct device *cpu_dev;
19d6f7ec
DJ
717
718 if (j == cpu)
719 continue;
19d6f7ec 720
2d06d8c4 721 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 722 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
723 cpu_dev = get_cpu_device(j);
724 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
725 "cpufreq");
726 if (ret) {
727 cpufreq_cpu_put(managed_policy);
728 return ret;
729 }
730 }
731 return ret;
732}
733
cf3289d0
AC
734static int cpufreq_add_dev_interface(unsigned int cpu,
735 struct cpufreq_policy *policy,
8a25a2fd 736 struct device *dev)
909a694e 737{
ecf7e461 738 struct cpufreq_policy new_policy;
909a694e
DJ
739 struct freq_attr **drv_attr;
740 unsigned long flags;
741 int ret = 0;
742 unsigned int j;
743
744 /* prepare interface data */
745 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 746 &dev->kobj, "cpufreq");
909a694e
DJ
747 if (ret)
748 return ret;
749
750 /* set up files for this cpu device */
1c3d85dd 751 drv_attr = cpufreq_driver->attr;
909a694e
DJ
752 while ((drv_attr) && (*drv_attr)) {
753 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
754 if (ret)
1c3d85dd 755 goto err_out_kobj_put;
909a694e
DJ
756 drv_attr++;
757 }
1c3d85dd 758 if (cpufreq_driver->get) {
909a694e
DJ
759 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
760 if (ret)
1c3d85dd 761 goto err_out_kobj_put;
909a694e 762 }
1c3d85dd 763 if (cpufreq_driver->target) {
909a694e
DJ
764 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
765 if (ret)
1c3d85dd 766 goto err_out_kobj_put;
909a694e 767 }
1c3d85dd 768 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
769 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
770 if (ret)
1c3d85dd 771 goto err_out_kobj_put;
e2f74f35 772 }
909a694e 773
0d1857a1 774 write_lock_irqsave(&cpufreq_driver_lock, flags);
909a694e 775 for_each_cpu(j, policy->cpus) {
909a694e 776 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 777 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e 778 }
0d1857a1 779 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
909a694e
DJ
780
781 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
782 if (ret)
783 goto err_out_kobj_put;
784
785 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
786 /* assure that the starting sequence is run in __cpufreq_set_policy */
787 policy->governor = NULL;
788
789 /* set default policy */
790 ret = __cpufreq_set_policy(policy, &new_policy);
791 policy->user_policy.policy = policy->policy;
792 policy->user_policy.governor = policy->governor;
793
794 if (ret) {
2d06d8c4 795 pr_debug("setting policy failed\n");
1c3d85dd
RW
796 if (cpufreq_driver->exit)
797 cpufreq_driver->exit(policy);
ecf7e461 798 }
909a694e
DJ
799 return ret;
800
801err_out_kobj_put:
802 kobject_put(&policy->kobj);
803 wait_for_completion(&policy->kobj_unregister);
804 return ret;
805}
806
fcf80582
VK
807#ifdef CONFIG_HOTPLUG_CPU
808static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
809 struct device *dev)
810{
811 struct cpufreq_policy *policy;
1c3d85dd 812 int ret = 0, has_target = !!cpufreq_driver->target;
fcf80582
VK
813 unsigned long flags;
814
815 policy = cpufreq_cpu_get(sibling);
816 WARN_ON(!policy);
817
820c6ca2
VK
818 if (has_target)
819 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
fcf80582 820
2eaa3e2d
VK
821 lock_policy_rwsem_write(sibling);
822
0d1857a1 823 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 824
fcf80582 825 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 826 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 827 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 828 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 829
2eaa3e2d
VK
830 unlock_policy_rwsem_write(sibling);
831
820c6ca2
VK
832 if (has_target) {
833 __cpufreq_governor(policy, CPUFREQ_GOV_START);
834 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
835 }
fcf80582 836
fcf80582
VK
837 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
838 if (ret) {
839 cpufreq_cpu_put(policy);
840 return ret;
841 }
842
843 return 0;
844}
845#endif
1da177e4
LT
846
847/**
848 * cpufreq_add_dev - add a CPU device
849 *
32ee8c3e 850 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
851 *
852 * The Oracle says: try running cpufreq registration/unregistration concurrently
853 * with with cpu hotplugging and all hell will break loose. Tried to clean this
854 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 855 */
8a25a2fd 856static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 857{
fcf80582 858 unsigned int j, cpu = dev->id;
65922465 859 int ret = -ENOMEM;
1da177e4 860 struct cpufreq_policy *policy;
1da177e4 861 unsigned long flags;
90e41bac 862#ifdef CONFIG_HOTPLUG_CPU
fcf80582 863 struct cpufreq_governor *gov;
90e41bac
PB
864 int sibling;
865#endif
1da177e4 866
c32b6b8e
AR
867 if (cpu_is_offline(cpu))
868 return 0;
869
2d06d8c4 870 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
871
872#ifdef CONFIG_SMP
873 /* check whether a different CPU already registered this
874 * CPU because it is in the same boat. */
875 policy = cpufreq_cpu_get(cpu);
876 if (unlikely(policy)) {
8ff69732 877 cpufreq_cpu_put(policy);
1da177e4
LT
878 return 0;
879 }
fcf80582
VK
880
881#ifdef CONFIG_HOTPLUG_CPU
882 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 883 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
884 for_each_online_cpu(sibling) {
885 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 886 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 887 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 888 return cpufreq_add_policy_cpu(cpu, sibling, dev);
2eaa3e2d 889 }
fcf80582 890 }
0d1857a1 891 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 892#endif
1da177e4
LT
893#endif
894
1c3d85dd 895 if (!try_module_get(cpufreq_driver->owner)) {
1da177e4
LT
896 ret = -EINVAL;
897 goto module_out;
898 }
899
e98df50c 900 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 901 if (!policy)
1da177e4 902 goto nomem_out;
059019a3
DJ
903
904 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 905 goto err_free_policy;
059019a3
DJ
906
907 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 908 goto err_free_cpumask;
1da177e4
LT
909
910 policy->cpu = cpu;
65922465 911 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 912 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 913
5a01f2e8 914 /* Initially set CPU itself as the policy_cpu */
f1625066 915 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 916
1da177e4 917 init_completion(&policy->kobj_unregister);
65f27f38 918 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
919
920 /* call driver. From then on the cpufreq must be able
921 * to accept all calls to ->verify and ->setpolicy for this CPU
922 */
1c3d85dd 923 ret = cpufreq_driver->init(policy);
1da177e4 924 if (ret) {
2d06d8c4 925 pr_debug("initialization failed\n");
2eaa3e2d 926 goto err_set_policy_cpu;
1da177e4 927 }
643ae6e8 928
fcf80582
VK
929 /* related cpus should atleast have policy->cpus */
930 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
931
643ae6e8
VK
932 /*
933 * affected cpus must always be the one, which are online. We aren't
934 * managing offline cpus here.
935 */
936 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
937
187d9f4e
MC
938 policy->user_policy.min = policy->min;
939 policy->user_policy.max = policy->max;
1da177e4 940
a1531acd
TR
941 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
942 CPUFREQ_START, policy);
943
fcf80582
VK
944#ifdef CONFIG_HOTPLUG_CPU
945 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
946 if (gov) {
947 policy->governor = gov;
948 pr_debug("Restoring governor %s for cpu %d\n",
949 policy->governor->name, cpu);
4bfa042c 950 }
fcf80582 951#endif
1da177e4 952
8a25a2fd 953 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
954 if (ret)
955 goto err_out_unregister;
8ff69732 956
038c5b3e 957 kobject_uevent(&policy->kobj, KOBJ_ADD);
1c3d85dd 958 module_put(cpufreq_driver->owner);
2d06d8c4 959 pr_debug("initialization complete\n");
87c32271 960
1da177e4
LT
961 return 0;
962
1da177e4 963err_out_unregister:
0d1857a1 964 write_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 965 for_each_cpu(j, policy->cpus)
7a6aedfa 966 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 967 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 968
c10997f6 969 kobject_put(&policy->kobj);
1da177e4
LT
970 wait_for_completion(&policy->kobj_unregister);
971
2eaa3e2d
VK
972err_set_policy_cpu:
973 per_cpu(cpufreq_policy_cpu, cpu) = -1;
cad70a6a 974 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
975err_free_cpumask:
976 free_cpumask_var(policy->cpus);
977err_free_policy:
1da177e4 978 kfree(policy);
1da177e4 979nomem_out:
1c3d85dd 980 module_put(cpufreq_driver->owner);
c32b6b8e 981module_out:
1da177e4
LT
982 return ret;
983}
984
b8eed8af
VK
985static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
986{
987 int j;
988
989 policy->last_cpu = policy->cpu;
990 policy->cpu = cpu;
991
3361b7b1 992 for_each_cpu(j, policy->cpus)
b8eed8af 993 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
994
995#ifdef CONFIG_CPU_FREQ_TABLE
996 cpufreq_frequency_table_update_policy_cpu(policy);
997#endif
998 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
999 CPUFREQ_UPDATE_POLICY_CPU, policy);
1000}
1da177e4
LT
1001
1002/**
5a01f2e8 1003 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1004 *
1005 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1006 * Caller should already have policy_rwsem in write mode for this CPU.
1007 * This routine frees the rwsem before returning.
1da177e4 1008 */
8a25a2fd 1009static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1010{
b8eed8af 1011 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1012 unsigned long flags;
1013 struct cpufreq_policy *data;
499bca9b
AW
1014 struct kobject *kobj;
1015 struct completion *cmp;
8a25a2fd 1016 struct device *cpu_dev;
1da177e4 1017
b8eed8af 1018 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1019
0d1857a1 1020 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1021
7a6aedfa 1022 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1023 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1024
0d1857a1 1025 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1026
1027 if (!data) {
b8eed8af 1028 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1029 return -EINVAL;
1030 }
1da177e4 1031
1c3d85dd 1032 if (cpufreq_driver->target)
f6a7409c 1033 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1da177e4 1034
084f3493 1035#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1036 if (!cpufreq_driver->setpolicy)
fa69e33f
DB
1037 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1038 data->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1039#endif
1040
2eaa3e2d 1041 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1042 cpus = cpumask_weight(data->cpus);
e4969eba
VK
1043
1044 if (cpus > 1)
1045 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1046 unlock_policy_rwsem_write(cpu);
084f3493 1047
73bf0fc2
VK
1048 if (cpu != data->cpu) {
1049 sysfs_remove_link(&dev->kobj, "cpufreq");
1050 } else if (cpus > 1) {
b8eed8af
VK
1051 /* first sibling now owns the new sysfs dir */
1052 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1053 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1054 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1055 if (ret) {
1056 pr_err("%s: Failed to move kobj: %d", __func__, ret);
084f3493 1057
2eaa3e2d 1058 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1059 cpumask_set_cpu(cpu, data->cpus);
1da177e4 1060
0d1857a1 1061 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1062 per_cpu(cpufreq_cpu_data, cpu) = data;
0d1857a1 1063 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1064
499bca9b 1065 unlock_policy_rwsem_write(cpu);
1da177e4 1066
2eaa3e2d
VK
1067 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1068 "cpufreq");
b8eed8af 1069 return -EINVAL;
1da177e4 1070 }
5a01f2e8 1071
2eaa3e2d 1072 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1073 update_policy_cpu(data, cpu_dev->id);
2eaa3e2d 1074 unlock_policy_rwsem_write(cpu);
b8eed8af
VK
1075 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1076 __func__, cpu_dev->id, cpu);
1da177e4 1077 }
1da177e4 1078
d96038e0
VK
1079 if ((cpus == 1) && (cpufreq_driver->target))
1080 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1081
b8eed8af
VK
1082 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1083 cpufreq_cpu_put(data);
1da177e4 1084
b8eed8af
VK
1085 /* If cpu is last user of policy, free policy */
1086 if (cpus == 1) {
2eaa3e2d 1087 lock_policy_rwsem_read(cpu);
b8eed8af
VK
1088 kobj = &data->kobj;
1089 cmp = &data->kobj_unregister;
2eaa3e2d 1090 unlock_policy_rwsem_read(cpu);
b8eed8af 1091 kobject_put(kobj);
7d26e2d5 1092
b8eed8af
VK
1093 /* we need to make sure that the underlying kobj is actually
1094 * not referenced anymore by anybody before we proceed with
1095 * unloading.
1096 */
1097 pr_debug("waiting for dropping of refcount\n");
1098 wait_for_completion(cmp);
1099 pr_debug("wait complete\n");
7d26e2d5 1100
1c3d85dd
RW
1101 if (cpufreq_driver->exit)
1102 cpufreq_driver->exit(data);
27ecddc2 1103
b8eed8af
VK
1104 free_cpumask_var(data->related_cpus);
1105 free_cpumask_var(data->cpus);
1106 kfree(data);
1c3d85dd 1107 } else if (cpufreq_driver->target) {
b8eed8af
VK
1108 __cpufreq_governor(data, CPUFREQ_GOV_START);
1109 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
27ecddc2 1110 }
1da177e4 1111
2eaa3e2d 1112 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1113 return 0;
1114}
1115
1116
8a25a2fd 1117static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1118{
8a25a2fd 1119 unsigned int cpu = dev->id;
5a01f2e8 1120 int retval;
ec28297a
VP
1121
1122 if (cpu_is_offline(cpu))
1123 return 0;
1124
8a25a2fd 1125 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1126 return retval;
1127}
1128
1129
65f27f38 1130static void handle_update(struct work_struct *work)
1da177e4 1131{
65f27f38
DH
1132 struct cpufreq_policy *policy =
1133 container_of(work, struct cpufreq_policy, update);
1134 unsigned int cpu = policy->cpu;
2d06d8c4 1135 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1136 cpufreq_update_policy(cpu);
1137}
1138
1139/**
1140 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1141 * @cpu: cpu number
1142 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1143 * @new_freq: CPU frequency the CPU actually runs at
1144 *
29464f28
DJ
1145 * We adjust to current frequency first, and need to clean up later.
1146 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1147 */
e08f5f5b
GS
1148static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1149 unsigned int new_freq)
1da177e4 1150{
b43a7ffb 1151 struct cpufreq_policy *policy;
1da177e4 1152 struct cpufreq_freqs freqs;
b43a7ffb
VK
1153 unsigned long flags;
1154
1da177e4 1155
2d06d8c4 1156 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1157 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1158
1da177e4
LT
1159 freqs.old = old_freq;
1160 freqs.new = new_freq;
b43a7ffb
VK
1161
1162 read_lock_irqsave(&cpufreq_driver_lock, flags);
1163 policy = per_cpu(cpufreq_cpu_data, cpu);
1164 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1165
1166 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1167 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1168}
1169
1170
32ee8c3e 1171/**
4ab70df4 1172 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1173 * @cpu: CPU number
1174 *
1175 * This is the last known freq, without actually getting it from the driver.
1176 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1177 */
1178unsigned int cpufreq_quick_get(unsigned int cpu)
1179{
9e21ba8b 1180 struct cpufreq_policy *policy;
e08f5f5b 1181 unsigned int ret_freq = 0;
95235ca2 1182
1c3d85dd
RW
1183 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1184 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1185
1186 policy = cpufreq_cpu_get(cpu);
95235ca2 1187 if (policy) {
e08f5f5b 1188 ret_freq = policy->cur;
95235ca2
VP
1189 cpufreq_cpu_put(policy);
1190 }
1191
4d34a67d 1192 return ret_freq;
95235ca2
VP
1193}
1194EXPORT_SYMBOL(cpufreq_quick_get);
1195
3d737108
JB
1196/**
1197 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1198 * @cpu: CPU number
1199 *
1200 * Just return the max possible frequency for a given CPU.
1201 */
1202unsigned int cpufreq_quick_get_max(unsigned int cpu)
1203{
1204 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1205 unsigned int ret_freq = 0;
1206
1207 if (policy) {
1208 ret_freq = policy->max;
1209 cpufreq_cpu_put(policy);
1210 }
1211
1212 return ret_freq;
1213}
1214EXPORT_SYMBOL(cpufreq_quick_get_max);
1215
95235ca2 1216
5a01f2e8 1217static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1218{
7a6aedfa 1219 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1220 unsigned int ret_freq = 0;
5800043b 1221
1c3d85dd 1222 if (!cpufreq_driver->get)
4d34a67d 1223 return ret_freq;
1da177e4 1224
1c3d85dd 1225 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1226
e08f5f5b 1227 if (ret_freq && policy->cur &&
1c3d85dd 1228 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1229 /* verify no discrepancy between actual and
1230 saved value exists */
1231 if (unlikely(ret_freq != policy->cur)) {
1232 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1233 schedule_work(&policy->update);
1234 }
1235 }
1236
4d34a67d 1237 return ret_freq;
5a01f2e8 1238}
1da177e4 1239
5a01f2e8
VP
1240/**
1241 * cpufreq_get - get the current CPU frequency (in kHz)
1242 * @cpu: CPU number
1243 *
1244 * Get the CPU current (static) CPU frequency
1245 */
1246unsigned int cpufreq_get(unsigned int cpu)
1247{
1248 unsigned int ret_freq = 0;
1249 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1250
1251 if (!policy)
1252 goto out;
1253
1254 if (unlikely(lock_policy_rwsem_read(cpu)))
1255 goto out_policy;
1256
1257 ret_freq = __cpufreq_get(cpu);
1258
1259 unlock_policy_rwsem_read(cpu);
1da177e4 1260
5a01f2e8
VP
1261out_policy:
1262 cpufreq_cpu_put(policy);
1263out:
4d34a67d 1264 return ret_freq;
1da177e4
LT
1265}
1266EXPORT_SYMBOL(cpufreq_get);
1267
8a25a2fd
KS
1268static struct subsys_interface cpufreq_interface = {
1269 .name = "cpufreq",
1270 .subsys = &cpu_subsys,
1271 .add_dev = cpufreq_add_dev,
1272 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1273};
1274
1da177e4 1275
42d4dc3f 1276/**
e00e56df
RW
1277 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1278 *
1279 * This function is only executed for the boot processor. The other CPUs
1280 * have been put offline by means of CPU hotplug.
42d4dc3f 1281 */
e00e56df 1282static int cpufreq_bp_suspend(void)
42d4dc3f 1283{
e08f5f5b 1284 int ret = 0;
4bc5d341 1285
e00e56df 1286 int cpu = smp_processor_id();
42d4dc3f
BH
1287 struct cpufreq_policy *cpu_policy;
1288
2d06d8c4 1289 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1290
e00e56df 1291 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1292 cpu_policy = cpufreq_cpu_get(cpu);
1293 if (!cpu_policy)
e00e56df 1294 return 0;
42d4dc3f 1295
1c3d85dd
RW
1296 if (cpufreq_driver->suspend) {
1297 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1298 if (ret)
42d4dc3f
BH
1299 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1300 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1301 }
1302
42d4dc3f 1303 cpufreq_cpu_put(cpu_policy);
c9060494 1304 return ret;
42d4dc3f
BH
1305}
1306
1da177e4 1307/**
e00e56df 1308 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1309 *
1310 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1311 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1312 * restored. It will verify that the current freq is in sync with
1313 * what we believe it to be. This is a bit later than when it
1314 * should be, but nonethteless it's better than calling
1315 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1316 *
1317 * This function is only executed for the boot CPU. The other CPUs have not
1318 * been turned on yet.
1da177e4 1319 */
e00e56df 1320static void cpufreq_bp_resume(void)
1da177e4 1321{
e08f5f5b 1322 int ret = 0;
4bc5d341 1323
e00e56df 1324 int cpu = smp_processor_id();
1da177e4
LT
1325 struct cpufreq_policy *cpu_policy;
1326
2d06d8c4 1327 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1328
e00e56df 1329 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1330 cpu_policy = cpufreq_cpu_get(cpu);
1331 if (!cpu_policy)
e00e56df 1332 return;
1da177e4 1333
1c3d85dd
RW
1334 if (cpufreq_driver->resume) {
1335 ret = cpufreq_driver->resume(cpu_policy);
1da177e4
LT
1336 if (ret) {
1337 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1338 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1339 goto fail;
1da177e4
LT
1340 }
1341 }
1342
1da177e4 1343 schedule_work(&cpu_policy->update);
ce6c3997 1344
c9060494 1345fail:
1da177e4 1346 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1347}
1348
e00e56df
RW
1349static struct syscore_ops cpufreq_syscore_ops = {
1350 .suspend = cpufreq_bp_suspend,
1351 .resume = cpufreq_bp_resume,
1da177e4
LT
1352};
1353
9d95046e
BP
1354/**
1355 * cpufreq_get_current_driver - return current driver's name
1356 *
1357 * Return the name string of the currently loaded cpufreq driver
1358 * or NULL, if none.
1359 */
1360const char *cpufreq_get_current_driver(void)
1361{
1c3d85dd
RW
1362 if (cpufreq_driver)
1363 return cpufreq_driver->name;
1364
1365 return NULL;
9d95046e
BP
1366}
1367EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1368
1369/*********************************************************************
1370 * NOTIFIER LISTS INTERFACE *
1371 *********************************************************************/
1372
1373/**
1374 * cpufreq_register_notifier - register a driver with cpufreq
1375 * @nb: notifier function to register
1376 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1377 *
32ee8c3e 1378 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1379 * are notified about clock rate changes (once before and once after
1380 * the transition), or a list of drivers that are notified about
1381 * changes in cpufreq policy.
1382 *
1383 * This function may sleep, and has the same return conditions as
e041c683 1384 * blocking_notifier_chain_register.
1da177e4
LT
1385 */
1386int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1387{
1388 int ret;
1389
d5aaffa9
DB
1390 if (cpufreq_disabled())
1391 return -EINVAL;
1392
74212ca4
CEB
1393 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1394
1da177e4
LT
1395 switch (list) {
1396 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1397 ret = srcu_notifier_chain_register(
e041c683 1398 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1399 break;
1400 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1401 ret = blocking_notifier_chain_register(
1402 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1403 break;
1404 default:
1405 ret = -EINVAL;
1406 }
1da177e4
LT
1407
1408 return ret;
1409}
1410EXPORT_SYMBOL(cpufreq_register_notifier);
1411
1412
1413/**
1414 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1415 * @nb: notifier block to be unregistered
1416 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1417 *
1418 * Remove a driver from the CPU frequency notifier list.
1419 *
1420 * This function may sleep, and has the same return conditions as
e041c683 1421 * blocking_notifier_chain_unregister.
1da177e4
LT
1422 */
1423int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1424{
1425 int ret;
1426
d5aaffa9
DB
1427 if (cpufreq_disabled())
1428 return -EINVAL;
1429
1da177e4
LT
1430 switch (list) {
1431 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1432 ret = srcu_notifier_chain_unregister(
e041c683 1433 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1434 break;
1435 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1436 ret = blocking_notifier_chain_unregister(
1437 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1438 break;
1439 default:
1440 ret = -EINVAL;
1441 }
1da177e4
LT
1442
1443 return ret;
1444}
1445EXPORT_SYMBOL(cpufreq_unregister_notifier);
1446
1447
1448/*********************************************************************
1449 * GOVERNORS *
1450 *********************************************************************/
1451
1452
1453int __cpufreq_driver_target(struct cpufreq_policy *policy,
1454 unsigned int target_freq,
1455 unsigned int relation)
1456{
1457 int retval = -EINVAL;
7249924e 1458 unsigned int old_target_freq = target_freq;
c32b6b8e 1459
a7b422cd
KRW
1460 if (cpufreq_disabled())
1461 return -ENODEV;
1462
7249924e
VK
1463 /* Make sure that target_freq is within supported range */
1464 if (target_freq > policy->max)
1465 target_freq = policy->max;
1466 if (target_freq < policy->min)
1467 target_freq = policy->min;
1468
1469 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1470 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1471
1472 if (target_freq == policy->cur)
1473 return 0;
1474
1c3d85dd
RW
1475 if (cpufreq_driver->target)
1476 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1477
1da177e4
LT
1478 return retval;
1479}
1480EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1481
1da177e4
LT
1482int cpufreq_driver_target(struct cpufreq_policy *policy,
1483 unsigned int target_freq,
1484 unsigned int relation)
1485{
f1829e4a 1486 int ret = -EINVAL;
1da177e4
LT
1487
1488 policy = cpufreq_cpu_get(policy->cpu);
1489 if (!policy)
f1829e4a 1490 goto no_policy;
1da177e4 1491
5a01f2e8 1492 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1493 goto fail;
1da177e4
LT
1494
1495 ret = __cpufreq_driver_target(policy, target_freq, relation);
1496
5a01f2e8 1497 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1498
f1829e4a 1499fail:
1da177e4 1500 cpufreq_cpu_put(policy);
f1829e4a 1501no_policy:
1da177e4
LT
1502 return ret;
1503}
1504EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1505
bf0b90e3 1506int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1507{
1508 int ret = 0;
1509
d5aaffa9
DB
1510 if (cpufreq_disabled())
1511 return ret;
1512
1c3d85dd 1513 if (!cpufreq_driver->getavg)
0676f7f2
VK
1514 return 0;
1515
dfde5d62
VP
1516 policy = cpufreq_cpu_get(policy->cpu);
1517 if (!policy)
1518 return -EINVAL;
1519
1c3d85dd 1520 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1521
dfde5d62
VP
1522 cpufreq_cpu_put(policy);
1523 return ret;
1524}
5a01f2e8 1525EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1526
153d7f3f 1527/*
153d7f3f
AV
1528 * when "event" is CPUFREQ_GOV_LIMITS
1529 */
1da177e4 1530
e08f5f5b
GS
1531static int __cpufreq_governor(struct cpufreq_policy *policy,
1532 unsigned int event)
1da177e4 1533{
cc993cab 1534 int ret;
6afde10c
TR
1535
1536 /* Only must be defined when default governor is known to have latency
1537 restrictions, like e.g. conservative or ondemand.
1538 That this is the case is already ensured in Kconfig
1539 */
1540#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1541 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1542#else
1543 struct cpufreq_governor *gov = NULL;
1544#endif
1c256245
TR
1545
1546 if (policy->governor->max_transition_latency &&
1547 policy->cpuinfo.transition_latency >
1548 policy->governor->max_transition_latency) {
6afde10c
TR
1549 if (!gov)
1550 return -EINVAL;
1551 else {
1552 printk(KERN_WARNING "%s governor failed, too long"
1553 " transition latency of HW, fallback"
1554 " to %s governor\n",
1555 policy->governor->name,
1556 gov->name);
1557 policy->governor = gov;
1558 }
1c256245 1559 }
1da177e4
LT
1560
1561 if (!try_module_get(policy->governor->owner))
1562 return -EINVAL;
1563
2d06d8c4 1564 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1565 policy->cpu, event);
1da177e4
LT
1566 ret = policy->governor->governor(policy, event);
1567
4d5dcc42
VK
1568 if (!ret) {
1569 if (event == CPUFREQ_GOV_POLICY_INIT)
1570 policy->governor->initialized++;
1571 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1572 policy->governor->initialized--;
1573 }
b394058f 1574
e08f5f5b
GS
1575 /* we keep one module reference alive for
1576 each CPU governed by this CPU */
1da177e4
LT
1577 if ((event != CPUFREQ_GOV_START) || ret)
1578 module_put(policy->governor->owner);
1579 if ((event == CPUFREQ_GOV_STOP) && !ret)
1580 module_put(policy->governor->owner);
1581
1582 return ret;
1583}
1584
1585
1da177e4
LT
1586int cpufreq_register_governor(struct cpufreq_governor *governor)
1587{
3bcb09a3 1588 int err;
1da177e4
LT
1589
1590 if (!governor)
1591 return -EINVAL;
1592
a7b422cd
KRW
1593 if (cpufreq_disabled())
1594 return -ENODEV;
1595
3fc54d37 1596 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1597
b394058f 1598 governor->initialized = 0;
3bcb09a3
JF
1599 err = -EBUSY;
1600 if (__find_governor(governor->name) == NULL) {
1601 err = 0;
1602 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1603 }
1da177e4 1604
32ee8c3e 1605 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1606 return err;
1da177e4
LT
1607}
1608EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1609
1610
1611void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1612{
90e41bac
PB
1613#ifdef CONFIG_HOTPLUG_CPU
1614 int cpu;
1615#endif
1616
1da177e4
LT
1617 if (!governor)
1618 return;
1619
a7b422cd
KRW
1620 if (cpufreq_disabled())
1621 return;
1622
90e41bac
PB
1623#ifdef CONFIG_HOTPLUG_CPU
1624 for_each_present_cpu(cpu) {
1625 if (cpu_online(cpu))
1626 continue;
1627 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1628 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1629 }
1630#endif
1631
3fc54d37 1632 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1633 list_del(&governor->governor_list);
3fc54d37 1634 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1635 return;
1636}
1637EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1638
1639
1640
1641/*********************************************************************
1642 * POLICY INTERFACE *
1643 *********************************************************************/
1644
1645/**
1646 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1647 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1648 * is written
1da177e4
LT
1649 *
1650 * Reads the current cpufreq policy.
1651 */
1652int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1653{
1654 struct cpufreq_policy *cpu_policy;
1655 if (!policy)
1656 return -EINVAL;
1657
1658 cpu_policy = cpufreq_cpu_get(cpu);
1659 if (!cpu_policy)
1660 return -EINVAL;
1661
1da177e4 1662 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1663
1664 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1665 return 0;
1666}
1667EXPORT_SYMBOL(cpufreq_get_policy);
1668
1669
153d7f3f 1670/*
e08f5f5b
GS
1671 * data : current policy.
1672 * policy : policy to be set.
153d7f3f 1673 */
e08f5f5b
GS
1674static int __cpufreq_set_policy(struct cpufreq_policy *data,
1675 struct cpufreq_policy *policy)
1da177e4 1676{
7bd353a9 1677 int ret = 0, failed = 1;
1da177e4 1678
2d06d8c4 1679 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1680 policy->min, policy->max);
1681
e08f5f5b
GS
1682 memcpy(&policy->cpuinfo, &data->cpuinfo,
1683 sizeof(struct cpufreq_cpuinfo));
1da177e4 1684
53391fa2 1685 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1686 ret = -EINVAL;
1687 goto error_out;
1688 }
1689
1da177e4 1690 /* verify the cpu speed can be set within this limit */
1c3d85dd 1691 ret = cpufreq_driver->verify(policy);
1da177e4
LT
1692 if (ret)
1693 goto error_out;
1694
1da177e4 1695 /* adjust if necessary - all reasons */
e041c683
AS
1696 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1697 CPUFREQ_ADJUST, policy);
1da177e4
LT
1698
1699 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1700 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1701 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1702
1703 /* verify the cpu speed can be set within this limit,
1704 which might be different to the first one */
1c3d85dd 1705 ret = cpufreq_driver->verify(policy);
e041c683 1706 if (ret)
1da177e4 1707 goto error_out;
1da177e4
LT
1708
1709 /* notification of the new policy */
e041c683
AS
1710 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1711 CPUFREQ_NOTIFY, policy);
1da177e4 1712
7d5e350f
DJ
1713 data->min = policy->min;
1714 data->max = policy->max;
1da177e4 1715
2d06d8c4 1716 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1717 data->min, data->max);
1da177e4 1718
1c3d85dd 1719 if (cpufreq_driver->setpolicy) {
1da177e4 1720 data->policy = policy->policy;
2d06d8c4 1721 pr_debug("setting range\n");
1c3d85dd 1722 ret = cpufreq_driver->setpolicy(policy);
1da177e4
LT
1723 } else {
1724 if (policy->governor != data->governor) {
1725 /* save old, working values */
1726 struct cpufreq_governor *old_gov = data->governor;
1727
2d06d8c4 1728 pr_debug("governor switch\n");
1da177e4
LT
1729
1730 /* end old governor */
7bd353a9 1731 if (data->governor) {
1da177e4 1732 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
955ef483 1733 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1734 __cpufreq_governor(data,
1735 CPUFREQ_GOV_POLICY_EXIT);
955ef483 1736 lock_policy_rwsem_write(policy->cpu);
7bd353a9 1737 }
1da177e4
LT
1738
1739 /* start new governor */
1740 data->governor = policy->governor;
7bd353a9 1741 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
955ef483 1742 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
7bd353a9 1743 failed = 0;
955ef483
VK
1744 } else {
1745 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1746 __cpufreq_governor(data,
1747 CPUFREQ_GOV_POLICY_EXIT);
955ef483
VK
1748 lock_policy_rwsem_write(policy->cpu);
1749 }
7bd353a9
VK
1750 }
1751
1752 if (failed) {
1da177e4 1753 /* new governor failed, so re-start old one */
2d06d8c4 1754 pr_debug("starting governor %s failed\n",
e08f5f5b 1755 data->governor->name);
1da177e4
LT
1756 if (old_gov) {
1757 data->governor = old_gov;
7bd353a9
VK
1758 __cpufreq_governor(data,
1759 CPUFREQ_GOV_POLICY_INIT);
e08f5f5b
GS
1760 __cpufreq_governor(data,
1761 CPUFREQ_GOV_START);
1da177e4
LT
1762 }
1763 ret = -EINVAL;
1764 goto error_out;
1765 }
1766 /* might be a policy change, too, so fall through */
1767 }
2d06d8c4 1768 pr_debug("governor: change or update limits\n");
1da177e4
LT
1769 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1770 }
1771
7d5e350f 1772error_out:
1da177e4
LT
1773 return ret;
1774}
1775
1da177e4
LT
1776/**
1777 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1778 * @cpu: CPU which shall be re-evaluated
1779 *
25985edc 1780 * Useful for policy notifiers which have different necessities
1da177e4
LT
1781 * at different times.
1782 */
1783int cpufreq_update_policy(unsigned int cpu)
1784{
1785 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1786 struct cpufreq_policy policy;
f1829e4a 1787 int ret;
1da177e4 1788
f1829e4a
JL
1789 if (!data) {
1790 ret = -ENODEV;
1791 goto no_policy;
1792 }
1da177e4 1793
f1829e4a
JL
1794 if (unlikely(lock_policy_rwsem_write(cpu))) {
1795 ret = -EINVAL;
1796 goto fail;
1797 }
1da177e4 1798
2d06d8c4 1799 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1800 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1801 policy.min = data->user_policy.min;
1802 policy.max = data->user_policy.max;
1803 policy.policy = data->user_policy.policy;
1804 policy.governor = data->user_policy.governor;
1805
0961dd0d
TR
1806 /* BIOS might change freq behind our back
1807 -> ask driver for current freq and notify governors about a change */
1c3d85dd
RW
1808 if (cpufreq_driver->get) {
1809 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1810 if (!data->cur) {
2d06d8c4 1811 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1812 data->cur = policy.cur;
1813 } else {
1c3d85dd 1814 if (data->cur != policy.cur && cpufreq_driver->target)
e08f5f5b
GS
1815 cpufreq_out_of_sync(cpu, data->cur,
1816 policy.cur);
a85f7bd3 1817 }
0961dd0d
TR
1818 }
1819
1da177e4
LT
1820 ret = __cpufreq_set_policy(data, &policy);
1821
5a01f2e8
VP
1822 unlock_policy_rwsem_write(cpu);
1823
f1829e4a 1824fail:
1da177e4 1825 cpufreq_cpu_put(data);
f1829e4a 1826no_policy:
1da177e4
LT
1827 return ret;
1828}
1829EXPORT_SYMBOL(cpufreq_update_policy);
1830
dd184a01 1831static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1832 unsigned long action, void *hcpu)
1833{
1834 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1835 struct device *dev;
c32b6b8e 1836
8a25a2fd
KS
1837 dev = get_cpu_device(cpu);
1838 if (dev) {
c32b6b8e
AR
1839 switch (action) {
1840 case CPU_ONLINE:
8a25a2fd 1841 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1842 break;
1843 case CPU_DOWN_PREPARE:
a66b2e50 1844 case CPU_UP_CANCELED_FROZEN:
8a25a2fd 1845 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1846 break;
5a01f2e8 1847 case CPU_DOWN_FAILED:
8a25a2fd 1848 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1849 break;
1850 }
1851 }
1852 return NOTIFY_OK;
1853}
1854
9c36f746 1855static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1856 .notifier_call = cpufreq_cpu_callback,
1857};
1da177e4
LT
1858
1859/*********************************************************************
1860 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1861 *********************************************************************/
1862
1863/**
1864 * cpufreq_register_driver - register a CPU Frequency driver
1865 * @driver_data: A struct cpufreq_driver containing the values#
1866 * submitted by the CPU Frequency driver.
1867 *
32ee8c3e 1868 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1869 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1870 * (and isn't unregistered in the meantime).
1da177e4
LT
1871 *
1872 */
221dee28 1873int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1874{
1875 unsigned long flags;
1876 int ret;
1877
a7b422cd
KRW
1878 if (cpufreq_disabled())
1879 return -ENODEV;
1880
1da177e4
LT
1881 if (!driver_data || !driver_data->verify || !driver_data->init ||
1882 ((!driver_data->setpolicy) && (!driver_data->target)))
1883 return -EINVAL;
1884
2d06d8c4 1885 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1886
1887 if (driver_data->setpolicy)
1888 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1889
0d1857a1 1890 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1891 if (cpufreq_driver) {
0d1857a1 1892 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1893 return -EBUSY;
1894 }
1c3d85dd 1895 cpufreq_driver = driver_data;
0d1857a1 1896 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1897
8a25a2fd 1898 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1899 if (ret)
1900 goto err_null_driver;
1da177e4 1901
1c3d85dd 1902 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1903 int i;
1904 ret = -ENODEV;
1905
1906 /* check for at least one working CPU */
7a6aedfa
MT
1907 for (i = 0; i < nr_cpu_ids; i++)
1908 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1909 ret = 0;
7a6aedfa
MT
1910 break;
1911 }
1da177e4
LT
1912
1913 /* if all ->init() calls failed, unregister */
1914 if (ret) {
2d06d8c4 1915 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1916 driver_data->name);
8a25a2fd 1917 goto err_if_unreg;
1da177e4
LT
1918 }
1919 }
1920
8f5bc2ab 1921 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1922 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1923
8f5bc2ab 1924 return 0;
8a25a2fd
KS
1925err_if_unreg:
1926 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 1927err_null_driver:
0d1857a1 1928 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1929 cpufreq_driver = NULL;
0d1857a1 1930 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 1931 return ret;
1da177e4
LT
1932}
1933EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1934
1935
1936/**
1937 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1938 *
32ee8c3e 1939 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1940 * the right to do so, i.e. if you have succeeded in initialising before!
1941 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1942 * currently not initialised.
1943 */
221dee28 1944int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1945{
1946 unsigned long flags;
1947
1c3d85dd 1948 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 1949 return -EINVAL;
1da177e4 1950
2d06d8c4 1951 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 1952
8a25a2fd 1953 subsys_interface_unregister(&cpufreq_interface);
65edc68c 1954 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 1955
0d1857a1 1956 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1957 cpufreq_driver = NULL;
0d1857a1 1958 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1959
1960 return 0;
1961}
1962EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1963
1964static int __init cpufreq_core_init(void)
1965{
1966 int cpu;
1967
a7b422cd
KRW
1968 if (cpufreq_disabled())
1969 return -ENODEV;
1970
5a01f2e8 1971 for_each_possible_cpu(cpu) {
f1625066 1972 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
1973 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1974 }
8aa84ad8 1975
8a25a2fd 1976 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 1977 BUG_ON(!cpufreq_global_kobject);
e00e56df 1978 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 1979
5a01f2e8
VP
1980 return 0;
1981}
5a01f2e8 1982core_initcall(cpufreq_core_init);
This page took 1.08177 seconds and 5 git commands to generate.