cpufreq: Create a macro for unlock_policy_rwsem{read,write}
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
1da177e4
LT
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/notifier.h>
24#include <linux/cpufreq.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/completion.h>
3fc54d37 32#include <linux/mutex.h>
e00e56df 33#include <linux/syscore_ops.h>
1da177e4 34
6f4f2723
TR
35#include <trace/events/power.h>
36
1da177e4 37/**
cd878479 38 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
41 */
7d5e350f 42static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 47#endif
1da177e4
LT
48static DEFINE_SPINLOCK(cpufreq_driver_lock);
49
5a01f2e8
VP
50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
62 * - All holders of the lock should check to make sure that the CPU they
63 * are concerned with are online after they get the lock.
64 * - Governor routines that can be called in cpufreq hotplug path should not
65 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
66 * - Lock should not be held across
67 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 68 */
f1625066 69static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
70static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
71
72#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 73static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 74{ \
f1625066 75 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
76 BUG_ON(policy_cpu == -1); \
77 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
78 \
79 return 0; \
80}
81
82lock_policy_rwsem(read, cpu);
5a01f2e8 83lock_policy_rwsem(write, cpu);
5a01f2e8 84
fa1d8af4
VK
85#define unlock_policy_rwsem(mode, cpu) \
86static void unlock_policy_rwsem_##mode(int cpu) \
87{ \
88 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
89 BUG_ON(policy_cpu == -1); \
90 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 91}
5a01f2e8 92
fa1d8af4
VK
93unlock_policy_rwsem(read, cpu);
94unlock_policy_rwsem(write, cpu);
5a01f2e8 95
1da177e4 96/* internal prototypes */
29464f28
DJ
97static int __cpufreq_governor(struct cpufreq_policy *policy,
98 unsigned int event);
5a01f2e8 99static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 100static void handle_update(struct work_struct *work);
1da177e4
LT
101
102/**
32ee8c3e
DJ
103 * Two notifier lists: the "policy" list is involved in the
104 * validation process for a new CPU frequency policy; the
1da177e4
LT
105 * "transition" list for kernel code that needs to handle
106 * changes to devices when the CPU clock speed changes.
107 * The mutex locks both lists.
108 */
e041c683 109static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 110static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 111
74212ca4 112static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
113static int __init init_cpufreq_transition_notifier_list(void)
114{
115 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 116 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
117 return 0;
118}
b3438f82 119pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 120
a7b422cd 121static int off __read_mostly;
da584455 122static int cpufreq_disabled(void)
a7b422cd
KRW
123{
124 return off;
125}
126void disable_cpufreq(void)
127{
128 off = 1;
129}
1da177e4 130static LIST_HEAD(cpufreq_governor_list);
29464f28 131static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 132
a9144436 133static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
134{
135 struct cpufreq_policy *data;
136 unsigned long flags;
137
7a6aedfa 138 if (cpu >= nr_cpu_ids)
1da177e4
LT
139 goto err_out;
140
141 /* get the cpufreq driver */
142 spin_lock_irqsave(&cpufreq_driver_lock, flags);
143
144 if (!cpufreq_driver)
145 goto err_out_unlock;
146
147 if (!try_module_get(cpufreq_driver->owner))
148 goto err_out_unlock;
149
150
151 /* get the CPU */
7a6aedfa 152 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
153
154 if (!data)
155 goto err_out_put_module;
156
a9144436 157 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
158 goto err_out_put_module;
159
1da177e4 160 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
161 return data;
162
7d5e350f 163err_out_put_module:
1da177e4 164 module_put(cpufreq_driver->owner);
7d5e350f 165err_out_unlock:
1da177e4 166 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 167err_out:
1da177e4
LT
168 return NULL;
169}
a9144436
SB
170
171struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
172{
d5aaffa9
DB
173 if (cpufreq_disabled())
174 return NULL;
175
a9144436
SB
176 return __cpufreq_cpu_get(cpu, false);
177}
1da177e4
LT
178EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
179
a9144436
SB
180static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
181{
182 return __cpufreq_cpu_get(cpu, true);
183}
184
185static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
186{
187 if (!sysfs)
188 kobject_put(&data->kobj);
189 module_put(cpufreq_driver->owner);
190}
7d5e350f 191
1da177e4
LT
192void cpufreq_cpu_put(struct cpufreq_policy *data)
193{
d5aaffa9
DB
194 if (cpufreq_disabled())
195 return;
196
a9144436 197 __cpufreq_cpu_put(data, false);
1da177e4
LT
198}
199EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
200
a9144436
SB
201static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
202{
203 __cpufreq_cpu_put(data, true);
204}
1da177e4 205
1da177e4
LT
206/*********************************************************************
207 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
208 *********************************************************************/
209
210/**
211 * adjust_jiffies - adjust the system "loops_per_jiffy"
212 *
213 * This function alters the system "loops_per_jiffy" for the clock
214 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 215 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
216 * per-CPU loops_per_jiffy value wherever possible.
217 */
218#ifndef CONFIG_SMP
219static unsigned long l_p_j_ref;
220static unsigned int l_p_j_ref_freq;
221
858119e1 222static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
223{
224 if (ci->flags & CPUFREQ_CONST_LOOPS)
225 return;
226
227 if (!l_p_j_ref_freq) {
228 l_p_j_ref = loops_per_jiffy;
229 l_p_j_ref_freq = ci->old;
2d06d8c4 230 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 231 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 232 }
d08de0c1 233 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 234 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
235 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
236 ci->new);
2d06d8c4 237 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 238 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
239 }
240}
241#else
e08f5f5b
GS
242static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
243{
244 return;
245}
1da177e4
LT
246#endif
247
248
249/**
e4472cb3
DJ
250 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
251 * on frequency transition.
1da177e4 252 *
e4472cb3
DJ
253 * This function calls the transition notifiers and the "adjust_jiffies"
254 * function. It is called twice on all CPU frequency changes that have
32ee8c3e 255 * external effects.
1da177e4
LT
256 */
257void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
258{
e4472cb3
DJ
259 struct cpufreq_policy *policy;
260
1da177e4
LT
261 BUG_ON(irqs_disabled());
262
d5aaffa9
DB
263 if (cpufreq_disabled())
264 return;
265
1da177e4 266 freqs->flags = cpufreq_driver->flags;
2d06d8c4 267 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 268 state, freqs->new);
1da177e4 269
7a6aedfa 270 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
1da177e4 271 switch (state) {
e4472cb3 272
1da177e4 273 case CPUFREQ_PRECHANGE:
32ee8c3e 274 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
275 * which is not equal to what the cpufreq core thinks is
276 * "old frequency".
1da177e4
LT
277 */
278 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
279 if ((policy) && (policy->cpu == freqs->cpu) &&
280 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 281 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
282 " %u, cpufreq assumed %u kHz.\n",
283 freqs->old, policy->cur);
284 freqs->old = policy->cur;
1da177e4
LT
285 }
286 }
b4dfdbb3 287 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 288 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
289 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
290 break;
e4472cb3 291
1da177e4
LT
292 case CPUFREQ_POSTCHANGE:
293 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 294 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723
TR
295 (unsigned long)freqs->cpu);
296 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
25e41933 297 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 298 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 299 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
300 if (likely(policy) && likely(policy->cpu == freqs->cpu))
301 policy->cur = freqs->new;
1da177e4
LT
302 break;
303 }
1da177e4
LT
304}
305EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
306
307
308
309/*********************************************************************
310 * SYSFS INTERFACE *
311 *********************************************************************/
312
3bcb09a3
JF
313static struct cpufreq_governor *__find_governor(const char *str_governor)
314{
315 struct cpufreq_governor *t;
316
317 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 318 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
319 return t;
320
321 return NULL;
322}
323
1da177e4
LT
324/**
325 * cpufreq_parse_governor - parse a governor string
326 */
905d77cd 327static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
328 struct cpufreq_governor **governor)
329{
3bcb09a3
JF
330 int err = -EINVAL;
331
1da177e4 332 if (!cpufreq_driver)
3bcb09a3
JF
333 goto out;
334
1da177e4
LT
335 if (cpufreq_driver->setpolicy) {
336 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
337 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 338 err = 0;
e08f5f5b
GS
339 } else if (!strnicmp(str_governor, "powersave",
340 CPUFREQ_NAME_LEN)) {
1da177e4 341 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 342 err = 0;
1da177e4 343 }
3bcb09a3 344 } else if (cpufreq_driver->target) {
1da177e4 345 struct cpufreq_governor *t;
3bcb09a3 346
3fc54d37 347 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
348
349 t = __find_governor(str_governor);
350
ea714970 351 if (t == NULL) {
1a8e1463 352 int ret;
ea714970 353
1a8e1463
KC
354 mutex_unlock(&cpufreq_governor_mutex);
355 ret = request_module("cpufreq_%s", str_governor);
356 mutex_lock(&cpufreq_governor_mutex);
ea714970 357
1a8e1463
KC
358 if (ret == 0)
359 t = __find_governor(str_governor);
ea714970
JF
360 }
361
3bcb09a3
JF
362 if (t != NULL) {
363 *governor = t;
364 err = 0;
1da177e4 365 }
3bcb09a3 366
3fc54d37 367 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 368 }
29464f28 369out:
3bcb09a3 370 return err;
1da177e4 371}
1da177e4
LT
372
373
1da177e4 374/**
e08f5f5b
GS
375 * cpufreq_per_cpu_attr_read() / show_##file_name() -
376 * print out cpufreq information
1da177e4
LT
377 *
378 * Write out information from cpufreq_driver->policy[cpu]; object must be
379 * "unsigned int".
380 */
381
32ee8c3e
DJ
382#define show_one(file_name, object) \
383static ssize_t show_##file_name \
905d77cd 384(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 385{ \
29464f28 386 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
387}
388
389show_one(cpuinfo_min_freq, cpuinfo.min_freq);
390show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 391show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
392show_one(scaling_min_freq, min);
393show_one(scaling_max_freq, max);
394show_one(scaling_cur_freq, cur);
395
e08f5f5b
GS
396static int __cpufreq_set_policy(struct cpufreq_policy *data,
397 struct cpufreq_policy *policy);
7970e08b 398
1da177e4
LT
399/**
400 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
401 */
402#define store_one(file_name, object) \
403static ssize_t store_##file_name \
905d77cd 404(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 405{ \
f55c9c26 406 unsigned int ret; \
1da177e4
LT
407 struct cpufreq_policy new_policy; \
408 \
409 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
410 if (ret) \
411 return -EINVAL; \
412 \
29464f28 413 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
414 if (ret != 1) \
415 return -EINVAL; \
416 \
7970e08b
TR
417 ret = __cpufreq_set_policy(policy, &new_policy); \
418 policy->user_policy.object = policy->object; \
1da177e4
LT
419 \
420 return ret ? ret : count; \
421}
422
29464f28
DJ
423store_one(scaling_min_freq, min);
424store_one(scaling_max_freq, max);
1da177e4
LT
425
426/**
427 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
428 */
905d77cd
DJ
429static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
430 char *buf)
1da177e4 431{
5a01f2e8 432 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
433 if (!cur_freq)
434 return sprintf(buf, "<unknown>");
435 return sprintf(buf, "%u\n", cur_freq);
436}
437
438
439/**
440 * show_scaling_governor - show the current policy for the specified CPU
441 */
905d77cd 442static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 443{
29464f28 444 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
445 return sprintf(buf, "powersave\n");
446 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
447 return sprintf(buf, "performance\n");
448 else if (policy->governor)
4b972f0b 449 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 450 policy->governor->name);
1da177e4
LT
451 return -EINVAL;
452}
453
454
455/**
456 * store_scaling_governor - store policy for the specified CPU
457 */
905d77cd
DJ
458static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
459 const char *buf, size_t count)
1da177e4 460{
f55c9c26 461 unsigned int ret;
1da177e4
LT
462 char str_governor[16];
463 struct cpufreq_policy new_policy;
464
465 ret = cpufreq_get_policy(&new_policy, policy->cpu);
466 if (ret)
467 return ret;
468
29464f28 469 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
470 if (ret != 1)
471 return -EINVAL;
472
e08f5f5b
GS
473 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
474 &new_policy.governor))
1da177e4
LT
475 return -EINVAL;
476
7970e08b
TR
477 /* Do not use cpufreq_set_policy here or the user_policy.max
478 will be wrongly overridden */
7970e08b
TR
479 ret = __cpufreq_set_policy(policy, &new_policy);
480
481 policy->user_policy.policy = policy->policy;
482 policy->user_policy.governor = policy->governor;
7970e08b 483
e08f5f5b
GS
484 if (ret)
485 return ret;
486 else
487 return count;
1da177e4
LT
488}
489
490/**
491 * show_scaling_driver - show the cpufreq driver currently loaded
492 */
905d77cd 493static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 494{
4b972f0b 495 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
496}
497
498/**
499 * show_scaling_available_governors - show the available CPUfreq governors
500 */
905d77cd
DJ
501static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
502 char *buf)
1da177e4
LT
503{
504 ssize_t i = 0;
505 struct cpufreq_governor *t;
506
507 if (!cpufreq_driver->target) {
508 i += sprintf(buf, "performance powersave");
509 goto out;
510 }
511
512 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
513 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
514 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 515 goto out;
4b972f0b 516 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 517 }
7d5e350f 518out:
1da177e4
LT
519 i += sprintf(&buf[i], "\n");
520 return i;
521}
e8628dd0 522
835481d9 523static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
524{
525 ssize_t i = 0;
526 unsigned int cpu;
527
835481d9 528 for_each_cpu(cpu, mask) {
1da177e4
LT
529 if (i)
530 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
531 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
532 if (i >= (PAGE_SIZE - 5))
29464f28 533 break;
1da177e4
LT
534 }
535 i += sprintf(&buf[i], "\n");
536 return i;
537}
538
e8628dd0
DW
539/**
540 * show_related_cpus - show the CPUs affected by each transition even if
541 * hw coordination is in use
542 */
543static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
544{
e8628dd0
DW
545 return show_cpus(policy->related_cpus, buf);
546}
547
548/**
549 * show_affected_cpus - show the CPUs affected by each transition
550 */
551static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
552{
553 return show_cpus(policy->cpus, buf);
554}
555
9e76988e 556static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 557 const char *buf, size_t count)
9e76988e
VP
558{
559 unsigned int freq = 0;
560 unsigned int ret;
561
879000f9 562 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
563 return -EINVAL;
564
565 ret = sscanf(buf, "%u", &freq);
566 if (ret != 1)
567 return -EINVAL;
568
569 policy->governor->store_setspeed(policy, freq);
570
571 return count;
572}
573
574static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
575{
879000f9 576 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
577 return sprintf(buf, "<unsupported>\n");
578
579 return policy->governor->show_setspeed(policy, buf);
580}
1da177e4 581
e2f74f35 582/**
8bf1ac72 583 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
584 */
585static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
586{
587 unsigned int limit;
588 int ret;
589 if (cpufreq_driver->bios_limit) {
590 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
591 if (!ret)
592 return sprintf(buf, "%u\n", limit);
593 }
594 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
595}
596
6dad2a29
BP
597cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
598cpufreq_freq_attr_ro(cpuinfo_min_freq);
599cpufreq_freq_attr_ro(cpuinfo_max_freq);
600cpufreq_freq_attr_ro(cpuinfo_transition_latency);
601cpufreq_freq_attr_ro(scaling_available_governors);
602cpufreq_freq_attr_ro(scaling_driver);
603cpufreq_freq_attr_ro(scaling_cur_freq);
604cpufreq_freq_attr_ro(bios_limit);
605cpufreq_freq_attr_ro(related_cpus);
606cpufreq_freq_attr_ro(affected_cpus);
607cpufreq_freq_attr_rw(scaling_min_freq);
608cpufreq_freq_attr_rw(scaling_max_freq);
609cpufreq_freq_attr_rw(scaling_governor);
610cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 611
905d77cd 612static struct attribute *default_attrs[] = {
1da177e4
LT
613 &cpuinfo_min_freq.attr,
614 &cpuinfo_max_freq.attr,
ed129784 615 &cpuinfo_transition_latency.attr,
1da177e4
LT
616 &scaling_min_freq.attr,
617 &scaling_max_freq.attr,
618 &affected_cpus.attr,
e8628dd0 619 &related_cpus.attr,
1da177e4
LT
620 &scaling_governor.attr,
621 &scaling_driver.attr,
622 &scaling_available_governors.attr,
9e76988e 623 &scaling_setspeed.attr,
1da177e4
LT
624 NULL
625};
626
8aa84ad8
TR
627struct kobject *cpufreq_global_kobject;
628EXPORT_SYMBOL(cpufreq_global_kobject);
629
29464f28
DJ
630#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
631#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 632
29464f28 633static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 634{
905d77cd
DJ
635 struct cpufreq_policy *policy = to_policy(kobj);
636 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 637 ssize_t ret = -EINVAL;
a9144436 638 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 639 if (!policy)
0db4a8a9 640 goto no_policy;
5a01f2e8
VP
641
642 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 643 goto fail;
5a01f2e8 644
e08f5f5b
GS
645 if (fattr->show)
646 ret = fattr->show(policy, buf);
647 else
648 ret = -EIO;
649
5a01f2e8 650 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 651fail:
a9144436 652 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 653no_policy:
1da177e4
LT
654 return ret;
655}
656
905d77cd
DJ
657static ssize_t store(struct kobject *kobj, struct attribute *attr,
658 const char *buf, size_t count)
1da177e4 659{
905d77cd
DJ
660 struct cpufreq_policy *policy = to_policy(kobj);
661 struct freq_attr *fattr = to_attr(attr);
a07530b4 662 ssize_t ret = -EINVAL;
a9144436 663 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 664 if (!policy)
a07530b4 665 goto no_policy;
5a01f2e8
VP
666
667 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 668 goto fail;
5a01f2e8 669
e08f5f5b
GS
670 if (fattr->store)
671 ret = fattr->store(policy, buf, count);
672 else
673 ret = -EIO;
674
5a01f2e8 675 unlock_policy_rwsem_write(policy->cpu);
a07530b4 676fail:
a9144436 677 cpufreq_cpu_put_sysfs(policy);
a07530b4 678no_policy:
1da177e4
LT
679 return ret;
680}
681
905d77cd 682static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 683{
905d77cd 684 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 685 pr_debug("last reference is dropped\n");
1da177e4
LT
686 complete(&policy->kobj_unregister);
687}
688
52cf25d0 689static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
690 .show = show,
691 .store = store,
692};
693
694static struct kobj_type ktype_cpufreq = {
695 .sysfs_ops = &sysfs_ops,
696 .default_attrs = default_attrs,
697 .release = cpufreq_sysfs_release,
698};
699
19d6f7ec 700/* symlink affected CPUs */
cf3289d0
AC
701static int cpufreq_add_dev_symlink(unsigned int cpu,
702 struct cpufreq_policy *policy)
19d6f7ec
DJ
703{
704 unsigned int j;
705 int ret = 0;
706
707 for_each_cpu(j, policy->cpus) {
708 struct cpufreq_policy *managed_policy;
8a25a2fd 709 struct device *cpu_dev;
19d6f7ec
DJ
710
711 if (j == cpu)
712 continue;
19d6f7ec 713
2d06d8c4 714 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 715 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
716 cpu_dev = get_cpu_device(j);
717 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
718 "cpufreq");
719 if (ret) {
720 cpufreq_cpu_put(managed_policy);
721 return ret;
722 }
723 }
724 return ret;
725}
726
cf3289d0
AC
727static int cpufreq_add_dev_interface(unsigned int cpu,
728 struct cpufreq_policy *policy,
8a25a2fd 729 struct device *dev)
909a694e 730{
ecf7e461 731 struct cpufreq_policy new_policy;
909a694e
DJ
732 struct freq_attr **drv_attr;
733 unsigned long flags;
734 int ret = 0;
735 unsigned int j;
736
737 /* prepare interface data */
738 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 739 &dev->kobj, "cpufreq");
909a694e
DJ
740 if (ret)
741 return ret;
742
743 /* set up files for this cpu device */
744 drv_attr = cpufreq_driver->attr;
745 while ((drv_attr) && (*drv_attr)) {
746 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
747 if (ret)
748 goto err_out_kobj_put;
749 drv_attr++;
750 }
751 if (cpufreq_driver->get) {
752 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
753 if (ret)
754 goto err_out_kobj_put;
755 }
756 if (cpufreq_driver->target) {
757 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
758 if (ret)
759 goto err_out_kobj_put;
760 }
e2f74f35
TR
761 if (cpufreq_driver->bios_limit) {
762 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
763 if (ret)
764 goto err_out_kobj_put;
765 }
909a694e
DJ
766
767 spin_lock_irqsave(&cpufreq_driver_lock, flags);
768 for_each_cpu(j, policy->cpus) {
909a694e 769 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 770 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e
DJ
771 }
772 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
773
774 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
775 if (ret)
776 goto err_out_kobj_put;
777
778 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
779 /* assure that the starting sequence is run in __cpufreq_set_policy */
780 policy->governor = NULL;
781
782 /* set default policy */
783 ret = __cpufreq_set_policy(policy, &new_policy);
784 policy->user_policy.policy = policy->policy;
785 policy->user_policy.governor = policy->governor;
786
787 if (ret) {
2d06d8c4 788 pr_debug("setting policy failed\n");
ecf7e461
DJ
789 if (cpufreq_driver->exit)
790 cpufreq_driver->exit(policy);
791 }
909a694e
DJ
792 return ret;
793
794err_out_kobj_put:
795 kobject_put(&policy->kobj);
796 wait_for_completion(&policy->kobj_unregister);
797 return ret;
798}
799
fcf80582
VK
800#ifdef CONFIG_HOTPLUG_CPU
801static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
802 struct device *dev)
803{
804 struct cpufreq_policy *policy;
805 int ret = 0;
806 unsigned long flags;
807
808 policy = cpufreq_cpu_get(sibling);
809 WARN_ON(!policy);
810
811 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
812
813 lock_policy_rwsem_write(cpu);
814
815 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
816
817 spin_lock_irqsave(&cpufreq_driver_lock, flags);
818 cpumask_set_cpu(cpu, policy->cpus);
819 per_cpu(cpufreq_cpu_data, cpu) = policy;
820 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
821
822 __cpufreq_governor(policy, CPUFREQ_GOV_START);
823 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
824
825 unlock_policy_rwsem_write(cpu);
826
827 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
828 if (ret) {
829 cpufreq_cpu_put(policy);
830 return ret;
831 }
832
833 return 0;
834}
835#endif
1da177e4
LT
836
837/**
838 * cpufreq_add_dev - add a CPU device
839 *
32ee8c3e 840 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
841 *
842 * The Oracle says: try running cpufreq registration/unregistration concurrently
843 * with with cpu hotplugging and all hell will break loose. Tried to clean this
844 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 845 */
8a25a2fd 846static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 847{
fcf80582 848 unsigned int j, cpu = dev->id;
65922465 849 int ret = -ENOMEM;
1da177e4 850 struct cpufreq_policy *policy;
1da177e4 851 unsigned long flags;
90e41bac 852#ifdef CONFIG_HOTPLUG_CPU
fcf80582 853 struct cpufreq_governor *gov;
90e41bac
PB
854 int sibling;
855#endif
1da177e4 856
c32b6b8e
AR
857 if (cpu_is_offline(cpu))
858 return 0;
859
2d06d8c4 860 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
861
862#ifdef CONFIG_SMP
863 /* check whether a different CPU already registered this
864 * CPU because it is in the same boat. */
865 policy = cpufreq_cpu_get(cpu);
866 if (unlikely(policy)) {
8ff69732 867 cpufreq_cpu_put(policy);
1da177e4
LT
868 return 0;
869 }
fcf80582
VK
870
871#ifdef CONFIG_HOTPLUG_CPU
872 /* Check if this cpu was hot-unplugged earlier and has siblings */
873 for_each_online_cpu(sibling) {
874 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
875 if (cp && cpumask_test_cpu(cpu, cp->related_cpus))
876 return cpufreq_add_policy_cpu(cpu, sibling, dev);
877 }
878#endif
1da177e4
LT
879#endif
880
881 if (!try_module_get(cpufreq_driver->owner)) {
882 ret = -EINVAL;
883 goto module_out;
884 }
885
e98df50c 886 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 887 if (!policy)
1da177e4 888 goto nomem_out;
059019a3
DJ
889
890 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 891 goto err_free_policy;
059019a3
DJ
892
893 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 894 goto err_free_cpumask;
1da177e4
LT
895
896 policy->cpu = cpu;
65922465 897 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 898 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 899
5a01f2e8 900 /* Initially set CPU itself as the policy_cpu */
f1625066 901 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
3f4a782b
MD
902 ret = (lock_policy_rwsem_write(cpu) < 0);
903 WARN_ON(ret);
5a01f2e8 904
1da177e4 905 init_completion(&policy->kobj_unregister);
65f27f38 906 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
907
908 /* call driver. From then on the cpufreq must be able
909 * to accept all calls to ->verify and ->setpolicy for this CPU
910 */
911 ret = cpufreq_driver->init(policy);
912 if (ret) {
2d06d8c4 913 pr_debug("initialization failed\n");
3f4a782b 914 goto err_unlock_policy;
1da177e4 915 }
643ae6e8 916
fcf80582
VK
917 /* related cpus should atleast have policy->cpus */
918 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
919
643ae6e8
VK
920 /*
921 * affected cpus must always be the one, which are online. We aren't
922 * managing offline cpus here.
923 */
924 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
925
187d9f4e
MC
926 policy->user_policy.min = policy->min;
927 policy->user_policy.max = policy->max;
1da177e4 928
a1531acd
TR
929 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
930 CPUFREQ_START, policy);
931
fcf80582
VK
932#ifdef CONFIG_HOTPLUG_CPU
933 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
934 if (gov) {
935 policy->governor = gov;
936 pr_debug("Restoring governor %s for cpu %d\n",
937 policy->governor->name, cpu);
4bfa042c 938 }
fcf80582 939#endif
1da177e4 940
8a25a2fd 941 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
942 if (ret)
943 goto err_out_unregister;
8ff69732 944
dca02613
LW
945 unlock_policy_rwsem_write(cpu);
946
038c5b3e 947 kobject_uevent(&policy->kobj, KOBJ_ADD);
1da177e4 948 module_put(cpufreq_driver->owner);
2d06d8c4 949 pr_debug("initialization complete\n");
87c32271 950
1da177e4
LT
951 return 0;
952
1da177e4
LT
953err_out_unregister:
954 spin_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 955 for_each_cpu(j, policy->cpus)
7a6aedfa 956 per_cpu(cpufreq_cpu_data, j) = NULL;
1da177e4
LT
957 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
958
c10997f6 959 kobject_put(&policy->kobj);
1da177e4
LT
960 wait_for_completion(&policy->kobj_unregister);
961
3f4a782b 962err_unlock_policy:
45709118 963 unlock_policy_rwsem_write(cpu);
cad70a6a 964 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
965err_free_cpumask:
966 free_cpumask_var(policy->cpus);
967err_free_policy:
1da177e4 968 kfree(policy);
1da177e4
LT
969nomem_out:
970 module_put(cpufreq_driver->owner);
c32b6b8e 971module_out:
1da177e4
LT
972 return ret;
973}
974
b8eed8af
VK
975static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
976{
977 int j;
978
979 policy->last_cpu = policy->cpu;
980 policy->cpu = cpu;
981
3361b7b1 982 for_each_cpu(j, policy->cpus)
b8eed8af 983 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
984
985#ifdef CONFIG_CPU_FREQ_TABLE
986 cpufreq_frequency_table_update_policy_cpu(policy);
987#endif
988 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
989 CPUFREQ_UPDATE_POLICY_CPU, policy);
990}
1da177e4
LT
991
992/**
5a01f2e8 993 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
994 *
995 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
996 * Caller should already have policy_rwsem in write mode for this CPU.
997 * This routine frees the rwsem before returning.
1da177e4 998 */
8a25a2fd 999static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1000{
b8eed8af 1001 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1002 unsigned long flags;
1003 struct cpufreq_policy *data;
499bca9b
AW
1004 struct kobject *kobj;
1005 struct completion *cmp;
8a25a2fd 1006 struct device *cpu_dev;
1da177e4 1007
b8eed8af 1008 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4
LT
1009
1010 spin_lock_irqsave(&cpufreq_driver_lock, flags);
7a6aedfa 1011 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
1012
1013 if (!data) {
b8eed8af 1014 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4 1015 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
5a01f2e8 1016 unlock_policy_rwsem_write(cpu);
1da177e4
LT
1017 return -EINVAL;
1018 }
1da177e4 1019
b8eed8af 1020 if (cpufreq_driver->target)
f6a7409c 1021 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
084f3493
TR
1022
1023#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
1024 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1025 CPUFREQ_NAME_LEN);
084f3493
TR
1026#endif
1027
b8eed8af
VK
1028 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1029 cpus = cpumask_weight(data->cpus);
1030 cpumask_clear_cpu(cpu, data->cpus);
1da177e4 1031
73bf0fc2
VK
1032 if (cpu != data->cpu) {
1033 sysfs_remove_link(&dev->kobj, "cpufreq");
1034 } else if (cpus > 1) {
b8eed8af
VK
1035 /* first sibling now owns the new sysfs dir */
1036 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1037 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1038 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1039 if (ret) {
1040 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1041 cpumask_set_cpu(cpu, data->cpus);
1042 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1043 "cpufreq");
1044 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
499bca9b 1045 unlock_policy_rwsem_write(cpu);
b8eed8af 1046 return -EINVAL;
1da177e4 1047 }
b8eed8af
VK
1048
1049 update_policy_cpu(data, cpu_dev->id);
1050 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1051 __func__, cpu_dev->id, cpu);
1da177e4 1052 }
1da177e4 1053
b8eed8af 1054 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
5a01f2e8 1055
b8eed8af
VK
1056 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1057 cpufreq_cpu_put(data);
499bca9b 1058 unlock_policy_rwsem_write(cpu);
1da177e4 1059
b8eed8af
VK
1060 /* If cpu is last user of policy, free policy */
1061 if (cpus == 1) {
1062 lock_policy_rwsem_write(cpu);
1063 kobj = &data->kobj;
1064 cmp = &data->kobj_unregister;
1065 unlock_policy_rwsem_write(cpu);
1066 kobject_put(kobj);
7d26e2d5 1067
b8eed8af
VK
1068 /* we need to make sure that the underlying kobj is actually
1069 * not referenced anymore by anybody before we proceed with
1070 * unloading.
1071 */
1072 pr_debug("waiting for dropping of refcount\n");
1073 wait_for_completion(cmp);
1074 pr_debug("wait complete\n");
27ecddc2 1075
27ecddc2 1076 lock_policy_rwsem_write(cpu);
b8eed8af
VK
1077 if (cpufreq_driver->exit)
1078 cpufreq_driver->exit(data);
1079 unlock_policy_rwsem_write(cpu);
27ecddc2 1080
b8eed8af
VK
1081 free_cpumask_var(data->related_cpus);
1082 free_cpumask_var(data->cpus);
1083 kfree(data);
1084 } else if (cpufreq_driver->target) {
1085 __cpufreq_governor(data, CPUFREQ_GOV_START);
1086 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1087 }
1da177e4 1088
1da177e4
LT
1089 return 0;
1090}
1091
1092
8a25a2fd 1093static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1094{
8a25a2fd 1095 unsigned int cpu = dev->id;
5a01f2e8 1096 int retval;
ec28297a
VP
1097
1098 if (cpu_is_offline(cpu))
1099 return 0;
1100
5a01f2e8
VP
1101 if (unlikely(lock_policy_rwsem_write(cpu)))
1102 BUG();
1103
8a25a2fd 1104 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1105 return retval;
1106}
1107
1108
65f27f38 1109static void handle_update(struct work_struct *work)
1da177e4 1110{
65f27f38
DH
1111 struct cpufreq_policy *policy =
1112 container_of(work, struct cpufreq_policy, update);
1113 unsigned int cpu = policy->cpu;
2d06d8c4 1114 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1115 cpufreq_update_policy(cpu);
1116}
1117
1118/**
1119 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1120 * @cpu: cpu number
1121 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1122 * @new_freq: CPU frequency the CPU actually runs at
1123 *
29464f28
DJ
1124 * We adjust to current frequency first, and need to clean up later.
1125 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1126 */
e08f5f5b
GS
1127static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1128 unsigned int new_freq)
1da177e4
LT
1129{
1130 struct cpufreq_freqs freqs;
1131
2d06d8c4 1132 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1133 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1134
1135 freqs.cpu = cpu;
1136 freqs.old = old_freq;
1137 freqs.new = new_freq;
1138 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1139 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1140}
1141
1142
32ee8c3e 1143/**
4ab70df4 1144 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1145 * @cpu: CPU number
1146 *
1147 * This is the last known freq, without actually getting it from the driver.
1148 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1149 */
1150unsigned int cpufreq_quick_get(unsigned int cpu)
1151{
1152 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
e08f5f5b 1153 unsigned int ret_freq = 0;
95235ca2
VP
1154
1155 if (policy) {
e08f5f5b 1156 ret_freq = policy->cur;
95235ca2
VP
1157 cpufreq_cpu_put(policy);
1158 }
1159
4d34a67d 1160 return ret_freq;
95235ca2
VP
1161}
1162EXPORT_SYMBOL(cpufreq_quick_get);
1163
3d737108
JB
1164/**
1165 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1166 * @cpu: CPU number
1167 *
1168 * Just return the max possible frequency for a given CPU.
1169 */
1170unsigned int cpufreq_quick_get_max(unsigned int cpu)
1171{
1172 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1173 unsigned int ret_freq = 0;
1174
1175 if (policy) {
1176 ret_freq = policy->max;
1177 cpufreq_cpu_put(policy);
1178 }
1179
1180 return ret_freq;
1181}
1182EXPORT_SYMBOL(cpufreq_quick_get_max);
1183
95235ca2 1184
5a01f2e8 1185static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1186{
7a6aedfa 1187 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1188 unsigned int ret_freq = 0;
1da177e4 1189
1da177e4 1190 if (!cpufreq_driver->get)
4d34a67d 1191 return ret_freq;
1da177e4 1192
e08f5f5b 1193 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1194
e08f5f5b
GS
1195 if (ret_freq && policy->cur &&
1196 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1197 /* verify no discrepancy between actual and
1198 saved value exists */
1199 if (unlikely(ret_freq != policy->cur)) {
1200 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1201 schedule_work(&policy->update);
1202 }
1203 }
1204
4d34a67d 1205 return ret_freq;
5a01f2e8 1206}
1da177e4 1207
5a01f2e8
VP
1208/**
1209 * cpufreq_get - get the current CPU frequency (in kHz)
1210 * @cpu: CPU number
1211 *
1212 * Get the CPU current (static) CPU frequency
1213 */
1214unsigned int cpufreq_get(unsigned int cpu)
1215{
1216 unsigned int ret_freq = 0;
1217 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1218
1219 if (!policy)
1220 goto out;
1221
1222 if (unlikely(lock_policy_rwsem_read(cpu)))
1223 goto out_policy;
1224
1225 ret_freq = __cpufreq_get(cpu);
1226
1227 unlock_policy_rwsem_read(cpu);
1da177e4 1228
5a01f2e8
VP
1229out_policy:
1230 cpufreq_cpu_put(policy);
1231out:
4d34a67d 1232 return ret_freq;
1da177e4
LT
1233}
1234EXPORT_SYMBOL(cpufreq_get);
1235
8a25a2fd
KS
1236static struct subsys_interface cpufreq_interface = {
1237 .name = "cpufreq",
1238 .subsys = &cpu_subsys,
1239 .add_dev = cpufreq_add_dev,
1240 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1241};
1242
1da177e4 1243
42d4dc3f 1244/**
e00e56df
RW
1245 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1246 *
1247 * This function is only executed for the boot processor. The other CPUs
1248 * have been put offline by means of CPU hotplug.
42d4dc3f 1249 */
e00e56df 1250static int cpufreq_bp_suspend(void)
42d4dc3f 1251{
e08f5f5b 1252 int ret = 0;
4bc5d341 1253
e00e56df 1254 int cpu = smp_processor_id();
42d4dc3f
BH
1255 struct cpufreq_policy *cpu_policy;
1256
2d06d8c4 1257 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1258
e00e56df 1259 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1260 cpu_policy = cpufreq_cpu_get(cpu);
1261 if (!cpu_policy)
e00e56df 1262 return 0;
42d4dc3f
BH
1263
1264 if (cpufreq_driver->suspend) {
7ca64e2d 1265 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1266 if (ret)
42d4dc3f
BH
1267 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1268 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1269 }
1270
42d4dc3f 1271 cpufreq_cpu_put(cpu_policy);
c9060494 1272 return ret;
42d4dc3f
BH
1273}
1274
1da177e4 1275/**
e00e56df 1276 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1277 *
1278 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1279 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1280 * restored. It will verify that the current freq is in sync with
1281 * what we believe it to be. This is a bit later than when it
1282 * should be, but nonethteless it's better than calling
1283 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1284 *
1285 * This function is only executed for the boot CPU. The other CPUs have not
1286 * been turned on yet.
1da177e4 1287 */
e00e56df 1288static void cpufreq_bp_resume(void)
1da177e4 1289{
e08f5f5b 1290 int ret = 0;
4bc5d341 1291
e00e56df 1292 int cpu = smp_processor_id();
1da177e4
LT
1293 struct cpufreq_policy *cpu_policy;
1294
2d06d8c4 1295 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1296
e00e56df 1297 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1298 cpu_policy = cpufreq_cpu_get(cpu);
1299 if (!cpu_policy)
e00e56df 1300 return;
1da177e4
LT
1301
1302 if (cpufreq_driver->resume) {
1303 ret = cpufreq_driver->resume(cpu_policy);
1304 if (ret) {
1305 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1306 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1307 goto fail;
1da177e4
LT
1308 }
1309 }
1310
1da177e4 1311 schedule_work(&cpu_policy->update);
ce6c3997 1312
c9060494 1313fail:
1da177e4 1314 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1315}
1316
e00e56df
RW
1317static struct syscore_ops cpufreq_syscore_ops = {
1318 .suspend = cpufreq_bp_suspend,
1319 .resume = cpufreq_bp_resume,
1da177e4
LT
1320};
1321
9d95046e
BP
1322/**
1323 * cpufreq_get_current_driver - return current driver's name
1324 *
1325 * Return the name string of the currently loaded cpufreq driver
1326 * or NULL, if none.
1327 */
1328const char *cpufreq_get_current_driver(void)
1329{
1330 if (cpufreq_driver)
1331 return cpufreq_driver->name;
1332
1333 return NULL;
1334}
1335EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1336
1337/*********************************************************************
1338 * NOTIFIER LISTS INTERFACE *
1339 *********************************************************************/
1340
1341/**
1342 * cpufreq_register_notifier - register a driver with cpufreq
1343 * @nb: notifier function to register
1344 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1345 *
32ee8c3e 1346 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1347 * are notified about clock rate changes (once before and once after
1348 * the transition), or a list of drivers that are notified about
1349 * changes in cpufreq policy.
1350 *
1351 * This function may sleep, and has the same return conditions as
e041c683 1352 * blocking_notifier_chain_register.
1da177e4
LT
1353 */
1354int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1355{
1356 int ret;
1357
d5aaffa9
DB
1358 if (cpufreq_disabled())
1359 return -EINVAL;
1360
74212ca4
CEB
1361 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1362
1da177e4
LT
1363 switch (list) {
1364 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1365 ret = srcu_notifier_chain_register(
e041c683 1366 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1367 break;
1368 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1369 ret = blocking_notifier_chain_register(
1370 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1371 break;
1372 default:
1373 ret = -EINVAL;
1374 }
1da177e4
LT
1375
1376 return ret;
1377}
1378EXPORT_SYMBOL(cpufreq_register_notifier);
1379
1380
1381/**
1382 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1383 * @nb: notifier block to be unregistered
1384 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1385 *
1386 * Remove a driver from the CPU frequency notifier list.
1387 *
1388 * This function may sleep, and has the same return conditions as
e041c683 1389 * blocking_notifier_chain_unregister.
1da177e4
LT
1390 */
1391int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1392{
1393 int ret;
1394
d5aaffa9
DB
1395 if (cpufreq_disabled())
1396 return -EINVAL;
1397
1da177e4
LT
1398 switch (list) {
1399 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1400 ret = srcu_notifier_chain_unregister(
e041c683 1401 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1402 break;
1403 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1404 ret = blocking_notifier_chain_unregister(
1405 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1406 break;
1407 default:
1408 ret = -EINVAL;
1409 }
1da177e4
LT
1410
1411 return ret;
1412}
1413EXPORT_SYMBOL(cpufreq_unregister_notifier);
1414
1415
1416/*********************************************************************
1417 * GOVERNORS *
1418 *********************************************************************/
1419
1420
1421int __cpufreq_driver_target(struct cpufreq_policy *policy,
1422 unsigned int target_freq,
1423 unsigned int relation)
1424{
1425 int retval = -EINVAL;
7249924e 1426 unsigned int old_target_freq = target_freq;
c32b6b8e 1427
a7b422cd
KRW
1428 if (cpufreq_disabled())
1429 return -ENODEV;
1430
7249924e
VK
1431 /* Make sure that target_freq is within supported range */
1432 if (target_freq > policy->max)
1433 target_freq = policy->max;
1434 if (target_freq < policy->min)
1435 target_freq = policy->min;
1436
1437 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1438 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1439
1440 if (target_freq == policy->cur)
1441 return 0;
1442
3361b7b1 1443 if (cpufreq_driver->target)
1da177e4 1444 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1445
1da177e4
LT
1446 return retval;
1447}
1448EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1449
1da177e4
LT
1450int cpufreq_driver_target(struct cpufreq_policy *policy,
1451 unsigned int target_freq,
1452 unsigned int relation)
1453{
f1829e4a 1454 int ret = -EINVAL;
1da177e4
LT
1455
1456 policy = cpufreq_cpu_get(policy->cpu);
1457 if (!policy)
f1829e4a 1458 goto no_policy;
1da177e4 1459
5a01f2e8 1460 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1461 goto fail;
1da177e4
LT
1462
1463 ret = __cpufreq_driver_target(policy, target_freq, relation);
1464
5a01f2e8 1465 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1466
f1829e4a 1467fail:
1da177e4 1468 cpufreq_cpu_put(policy);
f1829e4a 1469no_policy:
1da177e4
LT
1470 return ret;
1471}
1472EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1473
bf0b90e3 1474int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1475{
1476 int ret = 0;
1477
d5aaffa9
DB
1478 if (cpufreq_disabled())
1479 return ret;
1480
3361b7b1 1481 if (!cpufreq_driver->getavg)
0676f7f2
VK
1482 return 0;
1483
dfde5d62
VP
1484 policy = cpufreq_cpu_get(policy->cpu);
1485 if (!policy)
1486 return -EINVAL;
1487
0676f7f2 1488 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1489
dfde5d62
VP
1490 cpufreq_cpu_put(policy);
1491 return ret;
1492}
5a01f2e8 1493EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1494
153d7f3f 1495/*
153d7f3f
AV
1496 * when "event" is CPUFREQ_GOV_LIMITS
1497 */
1da177e4 1498
e08f5f5b
GS
1499static int __cpufreq_governor(struct cpufreq_policy *policy,
1500 unsigned int event)
1da177e4 1501{
cc993cab 1502 int ret;
6afde10c
TR
1503
1504 /* Only must be defined when default governor is known to have latency
1505 restrictions, like e.g. conservative or ondemand.
1506 That this is the case is already ensured in Kconfig
1507 */
1508#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1509 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1510#else
1511 struct cpufreq_governor *gov = NULL;
1512#endif
1c256245
TR
1513
1514 if (policy->governor->max_transition_latency &&
1515 policy->cpuinfo.transition_latency >
1516 policy->governor->max_transition_latency) {
6afde10c
TR
1517 if (!gov)
1518 return -EINVAL;
1519 else {
1520 printk(KERN_WARNING "%s governor failed, too long"
1521 " transition latency of HW, fallback"
1522 " to %s governor\n",
1523 policy->governor->name,
1524 gov->name);
1525 policy->governor = gov;
1526 }
1c256245 1527 }
1da177e4
LT
1528
1529 if (!try_module_get(policy->governor->owner))
1530 return -EINVAL;
1531
2d06d8c4 1532 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1533 policy->cpu, event);
1da177e4
LT
1534 ret = policy->governor->governor(policy, event);
1535
8e53695f
VK
1536 if (event == CPUFREQ_GOV_START)
1537 policy->governor->initialized++;
1538 else if (event == CPUFREQ_GOV_STOP)
1539 policy->governor->initialized--;
b394058f 1540
e08f5f5b
GS
1541 /* we keep one module reference alive for
1542 each CPU governed by this CPU */
1da177e4
LT
1543 if ((event != CPUFREQ_GOV_START) || ret)
1544 module_put(policy->governor->owner);
1545 if ((event == CPUFREQ_GOV_STOP) && !ret)
1546 module_put(policy->governor->owner);
1547
1548 return ret;
1549}
1550
1551
1da177e4
LT
1552int cpufreq_register_governor(struct cpufreq_governor *governor)
1553{
3bcb09a3 1554 int err;
1da177e4
LT
1555
1556 if (!governor)
1557 return -EINVAL;
1558
a7b422cd
KRW
1559 if (cpufreq_disabled())
1560 return -ENODEV;
1561
3fc54d37 1562 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1563
b394058f 1564 governor->initialized = 0;
3bcb09a3
JF
1565 err = -EBUSY;
1566 if (__find_governor(governor->name) == NULL) {
1567 err = 0;
1568 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1569 }
1da177e4 1570
32ee8c3e 1571 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1572 return err;
1da177e4
LT
1573}
1574EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1575
1576
1577void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1578{
90e41bac
PB
1579#ifdef CONFIG_HOTPLUG_CPU
1580 int cpu;
1581#endif
1582
1da177e4
LT
1583 if (!governor)
1584 return;
1585
a7b422cd
KRW
1586 if (cpufreq_disabled())
1587 return;
1588
90e41bac
PB
1589#ifdef CONFIG_HOTPLUG_CPU
1590 for_each_present_cpu(cpu) {
1591 if (cpu_online(cpu))
1592 continue;
1593 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1594 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1595 }
1596#endif
1597
3fc54d37 1598 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1599 list_del(&governor->governor_list);
3fc54d37 1600 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1601 return;
1602}
1603EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1604
1605
1606
1607/*********************************************************************
1608 * POLICY INTERFACE *
1609 *********************************************************************/
1610
1611/**
1612 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1613 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1614 * is written
1da177e4
LT
1615 *
1616 * Reads the current cpufreq policy.
1617 */
1618int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1619{
1620 struct cpufreq_policy *cpu_policy;
1621 if (!policy)
1622 return -EINVAL;
1623
1624 cpu_policy = cpufreq_cpu_get(cpu);
1625 if (!cpu_policy)
1626 return -EINVAL;
1627
1da177e4 1628 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1629
1630 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1631 return 0;
1632}
1633EXPORT_SYMBOL(cpufreq_get_policy);
1634
1635
153d7f3f 1636/*
e08f5f5b
GS
1637 * data : current policy.
1638 * policy : policy to be set.
153d7f3f 1639 */
e08f5f5b
GS
1640static int __cpufreq_set_policy(struct cpufreq_policy *data,
1641 struct cpufreq_policy *policy)
1da177e4
LT
1642{
1643 int ret = 0;
1644
2d06d8c4 1645 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1646 policy->min, policy->max);
1647
e08f5f5b
GS
1648 memcpy(&policy->cpuinfo, &data->cpuinfo,
1649 sizeof(struct cpufreq_cpuinfo));
1da177e4 1650
53391fa2 1651 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1652 ret = -EINVAL;
1653 goto error_out;
1654 }
1655
1da177e4
LT
1656 /* verify the cpu speed can be set within this limit */
1657 ret = cpufreq_driver->verify(policy);
1658 if (ret)
1659 goto error_out;
1660
1da177e4 1661 /* adjust if necessary - all reasons */
e041c683
AS
1662 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1663 CPUFREQ_ADJUST, policy);
1da177e4
LT
1664
1665 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1666 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1667 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1668
1669 /* verify the cpu speed can be set within this limit,
1670 which might be different to the first one */
1671 ret = cpufreq_driver->verify(policy);
e041c683 1672 if (ret)
1da177e4 1673 goto error_out;
1da177e4
LT
1674
1675 /* notification of the new policy */
e041c683
AS
1676 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1677 CPUFREQ_NOTIFY, policy);
1da177e4 1678
7d5e350f
DJ
1679 data->min = policy->min;
1680 data->max = policy->max;
1da177e4 1681
2d06d8c4 1682 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1683 data->min, data->max);
1da177e4
LT
1684
1685 if (cpufreq_driver->setpolicy) {
1686 data->policy = policy->policy;
2d06d8c4 1687 pr_debug("setting range\n");
1da177e4
LT
1688 ret = cpufreq_driver->setpolicy(policy);
1689 } else {
1690 if (policy->governor != data->governor) {
1691 /* save old, working values */
1692 struct cpufreq_governor *old_gov = data->governor;
1693
2d06d8c4 1694 pr_debug("governor switch\n");
1da177e4
LT
1695
1696 /* end old governor */
ffe6275f 1697 if (data->governor)
1da177e4
LT
1698 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1699
1700 /* start new governor */
1701 data->governor = policy->governor;
1702 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1703 /* new governor failed, so re-start old one */
2d06d8c4 1704 pr_debug("starting governor %s failed\n",
e08f5f5b 1705 data->governor->name);
1da177e4
LT
1706 if (old_gov) {
1707 data->governor = old_gov;
e08f5f5b
GS
1708 __cpufreq_governor(data,
1709 CPUFREQ_GOV_START);
1da177e4
LT
1710 }
1711 ret = -EINVAL;
1712 goto error_out;
1713 }
1714 /* might be a policy change, too, so fall through */
1715 }
2d06d8c4 1716 pr_debug("governor: change or update limits\n");
1da177e4
LT
1717 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1718 }
1719
7d5e350f 1720error_out:
1da177e4
LT
1721 return ret;
1722}
1723
1da177e4
LT
1724/**
1725 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1726 * @cpu: CPU which shall be re-evaluated
1727 *
25985edc 1728 * Useful for policy notifiers which have different necessities
1da177e4
LT
1729 * at different times.
1730 */
1731int cpufreq_update_policy(unsigned int cpu)
1732{
1733 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1734 struct cpufreq_policy policy;
f1829e4a 1735 int ret;
1da177e4 1736
f1829e4a
JL
1737 if (!data) {
1738 ret = -ENODEV;
1739 goto no_policy;
1740 }
1da177e4 1741
f1829e4a
JL
1742 if (unlikely(lock_policy_rwsem_write(cpu))) {
1743 ret = -EINVAL;
1744 goto fail;
1745 }
1da177e4 1746
2d06d8c4 1747 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1748 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1749 policy.min = data->user_policy.min;
1750 policy.max = data->user_policy.max;
1751 policy.policy = data->user_policy.policy;
1752 policy.governor = data->user_policy.governor;
1753
0961dd0d
TR
1754 /* BIOS might change freq behind our back
1755 -> ask driver for current freq and notify governors about a change */
1756 if (cpufreq_driver->get) {
1757 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1758 if (!data->cur) {
2d06d8c4 1759 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1760 data->cur = policy.cur;
1761 } else {
1762 if (data->cur != policy.cur)
e08f5f5b
GS
1763 cpufreq_out_of_sync(cpu, data->cur,
1764 policy.cur);
a85f7bd3 1765 }
0961dd0d
TR
1766 }
1767
1da177e4
LT
1768 ret = __cpufreq_set_policy(data, &policy);
1769
5a01f2e8
VP
1770 unlock_policy_rwsem_write(cpu);
1771
f1829e4a 1772fail:
1da177e4 1773 cpufreq_cpu_put(data);
f1829e4a 1774no_policy:
1da177e4
LT
1775 return ret;
1776}
1777EXPORT_SYMBOL(cpufreq_update_policy);
1778
dd184a01 1779static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1780 unsigned long action, void *hcpu)
1781{
1782 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1783 struct device *dev;
c32b6b8e 1784
8a25a2fd
KS
1785 dev = get_cpu_device(cpu);
1786 if (dev) {
c32b6b8e
AR
1787 switch (action) {
1788 case CPU_ONLINE:
8bb78442 1789 case CPU_ONLINE_FROZEN:
8a25a2fd 1790 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1791 break;
1792 case CPU_DOWN_PREPARE:
8bb78442 1793 case CPU_DOWN_PREPARE_FROZEN:
5a01f2e8
VP
1794 if (unlikely(lock_policy_rwsem_write(cpu)))
1795 BUG();
1796
8a25a2fd 1797 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1798 break;
5a01f2e8 1799 case CPU_DOWN_FAILED:
8bb78442 1800 case CPU_DOWN_FAILED_FROZEN:
8a25a2fd 1801 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1802 break;
1803 }
1804 }
1805 return NOTIFY_OK;
1806}
1807
9c36f746 1808static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1809 .notifier_call = cpufreq_cpu_callback,
1810};
1da177e4
LT
1811
1812/*********************************************************************
1813 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1814 *********************************************************************/
1815
1816/**
1817 * cpufreq_register_driver - register a CPU Frequency driver
1818 * @driver_data: A struct cpufreq_driver containing the values#
1819 * submitted by the CPU Frequency driver.
1820 *
32ee8c3e 1821 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1822 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1823 * (and isn't unregistered in the meantime).
1da177e4
LT
1824 *
1825 */
221dee28 1826int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1827{
1828 unsigned long flags;
1829 int ret;
1830
a7b422cd
KRW
1831 if (cpufreq_disabled())
1832 return -ENODEV;
1833
1da177e4
LT
1834 if (!driver_data || !driver_data->verify || !driver_data->init ||
1835 ((!driver_data->setpolicy) && (!driver_data->target)))
1836 return -EINVAL;
1837
2d06d8c4 1838 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1839
1840 if (driver_data->setpolicy)
1841 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1842
1843 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1844 if (cpufreq_driver) {
1845 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1846 return -EBUSY;
1847 }
1848 cpufreq_driver = driver_data;
1849 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1850
8a25a2fd 1851 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1852 if (ret)
1853 goto err_null_driver;
1da177e4 1854
8f5bc2ab 1855 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1856 int i;
1857 ret = -ENODEV;
1858
1859 /* check for at least one working CPU */
7a6aedfa
MT
1860 for (i = 0; i < nr_cpu_ids; i++)
1861 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1862 ret = 0;
7a6aedfa
MT
1863 break;
1864 }
1da177e4
LT
1865
1866 /* if all ->init() calls failed, unregister */
1867 if (ret) {
2d06d8c4 1868 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1869 driver_data->name);
8a25a2fd 1870 goto err_if_unreg;
1da177e4
LT
1871 }
1872 }
1873
8f5bc2ab 1874 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1875 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1876
8f5bc2ab 1877 return 0;
8a25a2fd
KS
1878err_if_unreg:
1879 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab
JS
1880err_null_driver:
1881 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1882 cpufreq_driver = NULL;
1883 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 1884 return ret;
1da177e4
LT
1885}
1886EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1887
1888
1889/**
1890 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1891 *
32ee8c3e 1892 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1893 * the right to do so, i.e. if you have succeeded in initialising before!
1894 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1895 * currently not initialised.
1896 */
221dee28 1897int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1898{
1899 unsigned long flags;
1900
2d06d8c4 1901 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 1902 return -EINVAL;
1da177e4 1903
2d06d8c4 1904 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 1905
8a25a2fd 1906 subsys_interface_unregister(&cpufreq_interface);
65edc68c 1907 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1908
1909 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1910 cpufreq_driver = NULL;
1911 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1912
1913 return 0;
1914}
1915EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1916
1917static int __init cpufreq_core_init(void)
1918{
1919 int cpu;
1920
a7b422cd
KRW
1921 if (cpufreq_disabled())
1922 return -ENODEV;
1923
5a01f2e8 1924 for_each_possible_cpu(cpu) {
f1625066 1925 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
1926 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1927 }
8aa84ad8 1928
8a25a2fd 1929 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 1930 BUG_ON(!cpufreq_global_kobject);
e00e56df 1931 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 1932
5a01f2e8
VP
1933 return 0;
1934}
5a01f2e8 1935core_initcall(cpufreq_core_init);
This page took 0.707166 seconds and 5 git commands to generate.