cpufreq: Fix minor formatting issues
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
72a4ce34 20#include <asm/cputime.h>
1da177e4 21#include <linux/kernel.h>
72a4ce34 22#include <linux/kernel_stat.h>
1da177e4
LT
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/notifier.h>
26#include <linux/cpufreq.h>
27#include <linux/delay.h>
28#include <linux/interrupt.h>
29#include <linux/spinlock.h>
72a4ce34 30#include <linux/tick.h>
1da177e4
LT
31#include <linux/device.h>
32#include <linux/slab.h>
33#include <linux/cpu.h>
34#include <linux/completion.h>
3fc54d37 35#include <linux/mutex.h>
e00e56df 36#include <linux/syscore_ops.h>
1da177e4 37
6f4f2723
TR
38#include <trace/events/power.h>
39
1da177e4 40/**
cd878479 41 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
42 * level driver of CPUFreq support, and its spinlock. This lock
43 * also protects the cpufreq_cpu_data array.
44 */
1c3d85dd 45static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 46static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
bb176f7d
VK
47static DEFINE_RWLOCK(cpufreq_driver_lock);
48static DEFINE_MUTEX(cpufreq_governor_lock);
49
084f3493
TR
50#ifdef CONFIG_HOTPLUG_CPU
51/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 52static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 53#endif
1da177e4 54
5a01f2e8
VP
55/*
56 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
57 * all cpufreq/hotplug/workqueue/etc related lock issues.
58 *
59 * The rules for this semaphore:
60 * - Any routine that wants to read from the policy structure will
61 * do a down_read on this semaphore.
62 * - Any routine that will write to the policy structure and/or may take away
63 * the policy altogether (eg. CPU hotplug), will hold this lock in write
64 * mode before doing so.
65 *
66 * Additional rules:
5a01f2e8
VP
67 * - Governor routines that can be called in cpufreq hotplug path should not
68 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
69 * - Lock should not be held across
70 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 71 */
f1625066 72static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
73static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
74
75#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 76static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 77{ \
f1625066 78 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
79 BUG_ON(policy_cpu == -1); \
80 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
81 \
82 return 0; \
83}
84
85lock_policy_rwsem(read, cpu);
5a01f2e8 86lock_policy_rwsem(write, cpu);
5a01f2e8 87
fa1d8af4
VK
88#define unlock_policy_rwsem(mode, cpu) \
89static void unlock_policy_rwsem_##mode(int cpu) \
90{ \
91 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
92 BUG_ON(policy_cpu == -1); \
93 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 94}
5a01f2e8 95
fa1d8af4
VK
96unlock_policy_rwsem(read, cpu);
97unlock_policy_rwsem(write, cpu);
5a01f2e8 98
1da177e4 99/* internal prototypes */
29464f28
DJ
100static int __cpufreq_governor(struct cpufreq_policy *policy,
101 unsigned int event);
5a01f2e8 102static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 103static void handle_update(struct work_struct *work);
1da177e4
LT
104
105/**
32ee8c3e
DJ
106 * Two notifier lists: the "policy" list is involved in the
107 * validation process for a new CPU frequency policy; the
1da177e4
LT
108 * "transition" list for kernel code that needs to handle
109 * changes to devices when the CPU clock speed changes.
110 * The mutex locks both lists.
111 */
e041c683 112static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 113static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 114
74212ca4 115static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
116static int __init init_cpufreq_transition_notifier_list(void)
117{
118 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 119 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
120 return 0;
121}
b3438f82 122pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 123
a7b422cd 124static int off __read_mostly;
da584455 125static int cpufreq_disabled(void)
a7b422cd
KRW
126{
127 return off;
128}
129void disable_cpufreq(void)
130{
131 off = 1;
132}
1da177e4 133static LIST_HEAD(cpufreq_governor_list);
29464f28 134static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 135
4d5dcc42
VK
136bool have_governor_per_policy(void)
137{
1c3d85dd 138 return cpufreq_driver->have_governor_per_policy;
4d5dcc42 139}
3f869d6d 140EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 141
944e9a03
VK
142struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
143{
144 if (have_governor_per_policy())
145 return &policy->kobj;
146 else
147 return cpufreq_global_kobject;
148}
149EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
150
72a4ce34
VK
151static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
152{
153 u64 idle_time;
154 u64 cur_wall_time;
155 u64 busy_time;
156
157 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
158
159 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
160 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
163 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
164 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
165
166 idle_time = cur_wall_time - busy_time;
167 if (wall)
168 *wall = cputime_to_usecs(cur_wall_time);
169
170 return cputime_to_usecs(idle_time);
171}
172
173u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
174{
175 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
176
177 if (idle_time == -1ULL)
178 return get_cpu_idle_time_jiffy(cpu, wall);
179 else if (!io_busy)
180 idle_time += get_cpu_iowait_time_us(cpu, wall);
181
182 return idle_time;
183}
184EXPORT_SYMBOL_GPL(get_cpu_idle_time);
185
a9144436 186static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
187{
188 struct cpufreq_policy *data;
189 unsigned long flags;
190
7a6aedfa 191 if (cpu >= nr_cpu_ids)
1da177e4
LT
192 goto err_out;
193
194 /* get the cpufreq driver */
1c3d85dd 195 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 196
1c3d85dd 197 if (!cpufreq_driver)
1da177e4
LT
198 goto err_out_unlock;
199
1c3d85dd 200 if (!try_module_get(cpufreq_driver->owner))
1da177e4
LT
201 goto err_out_unlock;
202
1da177e4 203 /* get the CPU */
7a6aedfa 204 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
205
206 if (!data)
207 goto err_out_put_module;
208
a9144436 209 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
210 goto err_out_put_module;
211
0d1857a1 212 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
213 return data;
214
7d5e350f 215err_out_put_module:
1c3d85dd 216 module_put(cpufreq_driver->owner);
5800043b 217err_out_unlock:
1c3d85dd 218 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 219err_out:
1da177e4
LT
220 return NULL;
221}
a9144436
SB
222
223struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
224{
d5aaffa9
DB
225 if (cpufreq_disabled())
226 return NULL;
227
a9144436
SB
228 return __cpufreq_cpu_get(cpu, false);
229}
1da177e4
LT
230EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
231
a9144436
SB
232static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
233{
234 return __cpufreq_cpu_get(cpu, true);
235}
236
237static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
238{
239 if (!sysfs)
240 kobject_put(&data->kobj);
1c3d85dd 241 module_put(cpufreq_driver->owner);
a9144436 242}
7d5e350f 243
1da177e4
LT
244void cpufreq_cpu_put(struct cpufreq_policy *data)
245{
d5aaffa9
DB
246 if (cpufreq_disabled())
247 return;
248
a9144436 249 __cpufreq_cpu_put(data, false);
1da177e4
LT
250}
251EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
252
a9144436
SB
253static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
254{
255 __cpufreq_cpu_put(data, true);
256}
1da177e4 257
1da177e4
LT
258/*********************************************************************
259 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
260 *********************************************************************/
261
262/**
263 * adjust_jiffies - adjust the system "loops_per_jiffy"
264 *
265 * This function alters the system "loops_per_jiffy" for the clock
266 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 267 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
268 * per-CPU loops_per_jiffy value wherever possible.
269 */
270#ifndef CONFIG_SMP
271static unsigned long l_p_j_ref;
bb176f7d 272static unsigned int l_p_j_ref_freq;
1da177e4 273
858119e1 274static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
275{
276 if (ci->flags & CPUFREQ_CONST_LOOPS)
277 return;
278
279 if (!l_p_j_ref_freq) {
280 l_p_j_ref = loops_per_jiffy;
281 l_p_j_ref_freq = ci->old;
2d06d8c4 282 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 283 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 284 }
bb176f7d 285 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 286 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
287 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
288 ci->new);
2d06d8c4 289 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 290 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
291 }
292}
293#else
e08f5f5b
GS
294static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
295{
296 return;
297}
1da177e4
LT
298#endif
299
b43a7ffb
VK
300void __cpufreq_notify_transition(struct cpufreq_policy *policy,
301 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
302{
303 BUG_ON(irqs_disabled());
304
d5aaffa9
DB
305 if (cpufreq_disabled())
306 return;
307
1c3d85dd 308 freqs->flags = cpufreq_driver->flags;
2d06d8c4 309 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 310 state, freqs->new);
1da177e4 311
1da177e4 312 switch (state) {
e4472cb3 313
1da177e4 314 case CPUFREQ_PRECHANGE:
32ee8c3e 315 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
316 * which is not equal to what the cpufreq core thinks is
317 * "old frequency".
1da177e4 318 */
1c3d85dd 319 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
320 if ((policy) && (policy->cpu == freqs->cpu) &&
321 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 322 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
323 " %u, cpufreq assumed %u kHz.\n",
324 freqs->old, policy->cur);
325 freqs->old = policy->cur;
1da177e4
LT
326 }
327 }
b4dfdbb3 328 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 329 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
330 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
331 break;
e4472cb3 332
1da177e4
LT
333 case CPUFREQ_POSTCHANGE:
334 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 335 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 336 (unsigned long)freqs->cpu);
25e41933 337 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 338 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 339 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
340 if (likely(policy) && likely(policy->cpu == freqs->cpu))
341 policy->cur = freqs->new;
1da177e4
LT
342 break;
343 }
1da177e4 344}
bb176f7d 345
b43a7ffb
VK
346/**
347 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
348 * on frequency transition.
349 *
350 * This function calls the transition notifiers and the "adjust_jiffies"
351 * function. It is called twice on all CPU frequency changes that have
352 * external effects.
353 */
354void cpufreq_notify_transition(struct cpufreq_policy *policy,
355 struct cpufreq_freqs *freqs, unsigned int state)
356{
357 for_each_cpu(freqs->cpu, policy->cpus)
358 __cpufreq_notify_transition(policy, freqs, state);
359}
1da177e4
LT
360EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
361
362
1da177e4
LT
363/*********************************************************************
364 * SYSFS INTERFACE *
365 *********************************************************************/
366
3bcb09a3
JF
367static struct cpufreq_governor *__find_governor(const char *str_governor)
368{
369 struct cpufreq_governor *t;
370
371 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 372 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
373 return t;
374
375 return NULL;
376}
377
1da177e4
LT
378/**
379 * cpufreq_parse_governor - parse a governor string
380 */
905d77cd 381static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
382 struct cpufreq_governor **governor)
383{
3bcb09a3 384 int err = -EINVAL;
1c3d85dd
RW
385
386 if (!cpufreq_driver)
3bcb09a3
JF
387 goto out;
388
1c3d85dd 389 if (cpufreq_driver->setpolicy) {
1da177e4
LT
390 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
391 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 392 err = 0;
e08f5f5b
GS
393 } else if (!strnicmp(str_governor, "powersave",
394 CPUFREQ_NAME_LEN)) {
1da177e4 395 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 396 err = 0;
1da177e4 397 }
1c3d85dd 398 } else if (cpufreq_driver->target) {
1da177e4 399 struct cpufreq_governor *t;
3bcb09a3 400
3fc54d37 401 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
402
403 t = __find_governor(str_governor);
404
ea714970 405 if (t == NULL) {
1a8e1463 406 int ret;
ea714970 407
1a8e1463
KC
408 mutex_unlock(&cpufreq_governor_mutex);
409 ret = request_module("cpufreq_%s", str_governor);
410 mutex_lock(&cpufreq_governor_mutex);
ea714970 411
1a8e1463
KC
412 if (ret == 0)
413 t = __find_governor(str_governor);
ea714970
JF
414 }
415
3bcb09a3
JF
416 if (t != NULL) {
417 *governor = t;
418 err = 0;
1da177e4 419 }
3bcb09a3 420
3fc54d37 421 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 422 }
29464f28 423out:
3bcb09a3 424 return err;
1da177e4 425}
1da177e4 426
1da177e4 427/**
e08f5f5b
GS
428 * cpufreq_per_cpu_attr_read() / show_##file_name() -
429 * print out cpufreq information
1da177e4
LT
430 *
431 * Write out information from cpufreq_driver->policy[cpu]; object must be
432 * "unsigned int".
433 */
434
32ee8c3e
DJ
435#define show_one(file_name, object) \
436static ssize_t show_##file_name \
905d77cd 437(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 438{ \
29464f28 439 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
440}
441
442show_one(cpuinfo_min_freq, cpuinfo.min_freq);
443show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 444show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
445show_one(scaling_min_freq, min);
446show_one(scaling_max_freq, max);
447show_one(scaling_cur_freq, cur);
448
e08f5f5b
GS
449static int __cpufreq_set_policy(struct cpufreq_policy *data,
450 struct cpufreq_policy *policy);
7970e08b 451
1da177e4
LT
452/**
453 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
454 */
455#define store_one(file_name, object) \
456static ssize_t store_##file_name \
905d77cd 457(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 458{ \
f55c9c26 459 unsigned int ret; \
1da177e4
LT
460 struct cpufreq_policy new_policy; \
461 \
462 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
463 if (ret) \
464 return -EINVAL; \
465 \
29464f28 466 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
467 if (ret != 1) \
468 return -EINVAL; \
469 \
7970e08b
TR
470 ret = __cpufreq_set_policy(policy, &new_policy); \
471 policy->user_policy.object = policy->object; \
1da177e4
LT
472 \
473 return ret ? ret : count; \
474}
475
29464f28
DJ
476store_one(scaling_min_freq, min);
477store_one(scaling_max_freq, max);
1da177e4
LT
478
479/**
480 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
481 */
905d77cd
DJ
482static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
483 char *buf)
1da177e4 484{
5a01f2e8 485 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
486 if (!cur_freq)
487 return sprintf(buf, "<unknown>");
488 return sprintf(buf, "%u\n", cur_freq);
489}
490
1da177e4
LT
491/**
492 * show_scaling_governor - show the current policy for the specified CPU
493 */
905d77cd 494static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 495{
29464f28 496 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
497 return sprintf(buf, "powersave\n");
498 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
499 return sprintf(buf, "performance\n");
500 else if (policy->governor)
4b972f0b 501 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 502 policy->governor->name);
1da177e4
LT
503 return -EINVAL;
504}
505
1da177e4
LT
506/**
507 * store_scaling_governor - store policy for the specified CPU
508 */
905d77cd
DJ
509static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
510 const char *buf, size_t count)
1da177e4 511{
f55c9c26 512 unsigned int ret;
1da177e4
LT
513 char str_governor[16];
514 struct cpufreq_policy new_policy;
515
516 ret = cpufreq_get_policy(&new_policy, policy->cpu);
517 if (ret)
518 return ret;
519
29464f28 520 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
521 if (ret != 1)
522 return -EINVAL;
523
e08f5f5b
GS
524 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
525 &new_policy.governor))
1da177e4
LT
526 return -EINVAL;
527
bb176f7d
VK
528 /*
529 * Do not use cpufreq_set_policy here or the user_policy.max
530 * will be wrongly overridden
531 */
7970e08b
TR
532 ret = __cpufreq_set_policy(policy, &new_policy);
533
534 policy->user_policy.policy = policy->policy;
535 policy->user_policy.governor = policy->governor;
7970e08b 536
e08f5f5b
GS
537 if (ret)
538 return ret;
539 else
540 return count;
1da177e4
LT
541}
542
543/**
544 * show_scaling_driver - show the cpufreq driver currently loaded
545 */
905d77cd 546static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 547{
1c3d85dd 548 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
549}
550
551/**
552 * show_scaling_available_governors - show the available CPUfreq governors
553 */
905d77cd
DJ
554static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
555 char *buf)
1da177e4
LT
556{
557 ssize_t i = 0;
558 struct cpufreq_governor *t;
559
1c3d85dd 560 if (!cpufreq_driver->target) {
1da177e4
LT
561 i += sprintf(buf, "performance powersave");
562 goto out;
563 }
564
565 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
566 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
567 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 568 goto out;
4b972f0b 569 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 570 }
7d5e350f 571out:
1da177e4
LT
572 i += sprintf(&buf[i], "\n");
573 return i;
574}
e8628dd0 575
835481d9 576static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
577{
578 ssize_t i = 0;
579 unsigned int cpu;
580
835481d9 581 for_each_cpu(cpu, mask) {
1da177e4
LT
582 if (i)
583 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
584 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
585 if (i >= (PAGE_SIZE - 5))
29464f28 586 break;
1da177e4
LT
587 }
588 i += sprintf(&buf[i], "\n");
589 return i;
590}
591
e8628dd0
DW
592/**
593 * show_related_cpus - show the CPUs affected by each transition even if
594 * hw coordination is in use
595 */
596static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
597{
e8628dd0
DW
598 return show_cpus(policy->related_cpus, buf);
599}
600
601/**
602 * show_affected_cpus - show the CPUs affected by each transition
603 */
604static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
605{
606 return show_cpus(policy->cpus, buf);
607}
608
9e76988e 609static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 610 const char *buf, size_t count)
9e76988e
VP
611{
612 unsigned int freq = 0;
613 unsigned int ret;
614
879000f9 615 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
616 return -EINVAL;
617
618 ret = sscanf(buf, "%u", &freq);
619 if (ret != 1)
620 return -EINVAL;
621
622 policy->governor->store_setspeed(policy, freq);
623
624 return count;
625}
626
627static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
628{
879000f9 629 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
630 return sprintf(buf, "<unsupported>\n");
631
632 return policy->governor->show_setspeed(policy, buf);
633}
1da177e4 634
e2f74f35 635/**
8bf1ac72 636 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
637 */
638static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
639{
640 unsigned int limit;
641 int ret;
1c3d85dd
RW
642 if (cpufreq_driver->bios_limit) {
643 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
644 if (!ret)
645 return sprintf(buf, "%u\n", limit);
646 }
647 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
648}
649
6dad2a29
BP
650cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
651cpufreq_freq_attr_ro(cpuinfo_min_freq);
652cpufreq_freq_attr_ro(cpuinfo_max_freq);
653cpufreq_freq_attr_ro(cpuinfo_transition_latency);
654cpufreq_freq_attr_ro(scaling_available_governors);
655cpufreq_freq_attr_ro(scaling_driver);
656cpufreq_freq_attr_ro(scaling_cur_freq);
657cpufreq_freq_attr_ro(bios_limit);
658cpufreq_freq_attr_ro(related_cpus);
659cpufreq_freq_attr_ro(affected_cpus);
660cpufreq_freq_attr_rw(scaling_min_freq);
661cpufreq_freq_attr_rw(scaling_max_freq);
662cpufreq_freq_attr_rw(scaling_governor);
663cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 664
905d77cd 665static struct attribute *default_attrs[] = {
1da177e4
LT
666 &cpuinfo_min_freq.attr,
667 &cpuinfo_max_freq.attr,
ed129784 668 &cpuinfo_transition_latency.attr,
1da177e4
LT
669 &scaling_min_freq.attr,
670 &scaling_max_freq.attr,
671 &affected_cpus.attr,
e8628dd0 672 &related_cpus.attr,
1da177e4
LT
673 &scaling_governor.attr,
674 &scaling_driver.attr,
675 &scaling_available_governors.attr,
9e76988e 676 &scaling_setspeed.attr,
1da177e4
LT
677 NULL
678};
679
29464f28
DJ
680#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
681#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 682
29464f28 683static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 684{
905d77cd
DJ
685 struct cpufreq_policy *policy = to_policy(kobj);
686 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 687 ssize_t ret = -EINVAL;
a9144436 688 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 689 if (!policy)
0db4a8a9 690 goto no_policy;
5a01f2e8
VP
691
692 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 693 goto fail;
5a01f2e8 694
e08f5f5b
GS
695 if (fattr->show)
696 ret = fattr->show(policy, buf);
697 else
698 ret = -EIO;
699
5a01f2e8 700 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 701fail:
a9144436 702 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 703no_policy:
1da177e4
LT
704 return ret;
705}
706
905d77cd
DJ
707static ssize_t store(struct kobject *kobj, struct attribute *attr,
708 const char *buf, size_t count)
1da177e4 709{
905d77cd
DJ
710 struct cpufreq_policy *policy = to_policy(kobj);
711 struct freq_attr *fattr = to_attr(attr);
a07530b4 712 ssize_t ret = -EINVAL;
a9144436 713 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 714 if (!policy)
a07530b4 715 goto no_policy;
5a01f2e8
VP
716
717 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 718 goto fail;
5a01f2e8 719
e08f5f5b
GS
720 if (fattr->store)
721 ret = fattr->store(policy, buf, count);
722 else
723 ret = -EIO;
724
5a01f2e8 725 unlock_policy_rwsem_write(policy->cpu);
a07530b4 726fail:
a9144436 727 cpufreq_cpu_put_sysfs(policy);
a07530b4 728no_policy:
1da177e4
LT
729 return ret;
730}
731
905d77cd 732static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 733{
905d77cd 734 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 735 pr_debug("last reference is dropped\n");
1da177e4
LT
736 complete(&policy->kobj_unregister);
737}
738
52cf25d0 739static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
740 .show = show,
741 .store = store,
742};
743
744static struct kobj_type ktype_cpufreq = {
745 .sysfs_ops = &sysfs_ops,
746 .default_attrs = default_attrs,
747 .release = cpufreq_sysfs_release,
748};
749
2361be23
VK
750struct kobject *cpufreq_global_kobject;
751EXPORT_SYMBOL(cpufreq_global_kobject);
752
753static int cpufreq_global_kobject_usage;
754
755int cpufreq_get_global_kobject(void)
756{
757 if (!cpufreq_global_kobject_usage++)
758 return kobject_add(cpufreq_global_kobject,
759 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
760
761 return 0;
762}
763EXPORT_SYMBOL(cpufreq_get_global_kobject);
764
765void cpufreq_put_global_kobject(void)
766{
767 if (!--cpufreq_global_kobject_usage)
768 kobject_del(cpufreq_global_kobject);
769}
770EXPORT_SYMBOL(cpufreq_put_global_kobject);
771
772int cpufreq_sysfs_create_file(const struct attribute *attr)
773{
774 int ret = cpufreq_get_global_kobject();
775
776 if (!ret) {
777 ret = sysfs_create_file(cpufreq_global_kobject, attr);
778 if (ret)
779 cpufreq_put_global_kobject();
780 }
781
782 return ret;
783}
784EXPORT_SYMBOL(cpufreq_sysfs_create_file);
785
786void cpufreq_sysfs_remove_file(const struct attribute *attr)
787{
788 sysfs_remove_file(cpufreq_global_kobject, attr);
789 cpufreq_put_global_kobject();
790}
791EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
792
19d6f7ec 793/* symlink affected CPUs */
cf3289d0
AC
794static int cpufreq_add_dev_symlink(unsigned int cpu,
795 struct cpufreq_policy *policy)
19d6f7ec
DJ
796{
797 unsigned int j;
798 int ret = 0;
799
800 for_each_cpu(j, policy->cpus) {
801 struct cpufreq_policy *managed_policy;
8a25a2fd 802 struct device *cpu_dev;
19d6f7ec
DJ
803
804 if (j == cpu)
805 continue;
19d6f7ec 806
2d06d8c4 807 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 808 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
809 cpu_dev = get_cpu_device(j);
810 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
811 "cpufreq");
812 if (ret) {
813 cpufreq_cpu_put(managed_policy);
814 return ret;
815 }
816 }
817 return ret;
818}
819
cf3289d0
AC
820static int cpufreq_add_dev_interface(unsigned int cpu,
821 struct cpufreq_policy *policy,
8a25a2fd 822 struct device *dev)
909a694e 823{
ecf7e461 824 struct cpufreq_policy new_policy;
909a694e
DJ
825 struct freq_attr **drv_attr;
826 unsigned long flags;
827 int ret = 0;
828 unsigned int j;
829
830 /* prepare interface data */
831 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 832 &dev->kobj, "cpufreq");
909a694e
DJ
833 if (ret)
834 return ret;
835
836 /* set up files for this cpu device */
1c3d85dd 837 drv_attr = cpufreq_driver->attr;
909a694e
DJ
838 while ((drv_attr) && (*drv_attr)) {
839 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
840 if (ret)
1c3d85dd 841 goto err_out_kobj_put;
909a694e
DJ
842 drv_attr++;
843 }
1c3d85dd 844 if (cpufreq_driver->get) {
909a694e
DJ
845 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
846 if (ret)
1c3d85dd 847 goto err_out_kobj_put;
909a694e 848 }
1c3d85dd 849 if (cpufreq_driver->target) {
909a694e
DJ
850 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
851 if (ret)
1c3d85dd 852 goto err_out_kobj_put;
909a694e 853 }
1c3d85dd 854 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
855 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
856 if (ret)
1c3d85dd 857 goto err_out_kobj_put;
e2f74f35 858 }
909a694e 859
0d1857a1 860 write_lock_irqsave(&cpufreq_driver_lock, flags);
909a694e 861 for_each_cpu(j, policy->cpus) {
909a694e 862 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 863 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e 864 }
0d1857a1 865 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
909a694e
DJ
866
867 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
868 if (ret)
869 goto err_out_kobj_put;
870
871 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
872 /* assure that the starting sequence is run in __cpufreq_set_policy */
873 policy->governor = NULL;
874
875 /* set default policy */
876 ret = __cpufreq_set_policy(policy, &new_policy);
877 policy->user_policy.policy = policy->policy;
878 policy->user_policy.governor = policy->governor;
879
880 if (ret) {
2d06d8c4 881 pr_debug("setting policy failed\n");
1c3d85dd
RW
882 if (cpufreq_driver->exit)
883 cpufreq_driver->exit(policy);
ecf7e461 884 }
909a694e
DJ
885 return ret;
886
887err_out_kobj_put:
888 kobject_put(&policy->kobj);
889 wait_for_completion(&policy->kobj_unregister);
890 return ret;
891}
892
fcf80582
VK
893#ifdef CONFIG_HOTPLUG_CPU
894static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
895 struct device *dev)
896{
897 struct cpufreq_policy *policy;
1c3d85dd 898 int ret = 0, has_target = !!cpufreq_driver->target;
fcf80582
VK
899 unsigned long flags;
900
901 policy = cpufreq_cpu_get(sibling);
902 WARN_ON(!policy);
903
820c6ca2
VK
904 if (has_target)
905 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
fcf80582 906
2eaa3e2d
VK
907 lock_policy_rwsem_write(sibling);
908
0d1857a1 909 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 910
fcf80582 911 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 912 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 913 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 914 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 915
2eaa3e2d
VK
916 unlock_policy_rwsem_write(sibling);
917
820c6ca2
VK
918 if (has_target) {
919 __cpufreq_governor(policy, CPUFREQ_GOV_START);
920 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
921 }
fcf80582 922
fcf80582
VK
923 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
924 if (ret) {
925 cpufreq_cpu_put(policy);
926 return ret;
927 }
928
929 return 0;
930}
931#endif
1da177e4
LT
932
933/**
934 * cpufreq_add_dev - add a CPU device
935 *
32ee8c3e 936 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
937 *
938 * The Oracle says: try running cpufreq registration/unregistration concurrently
939 * with with cpu hotplugging and all hell will break loose. Tried to clean this
940 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 941 */
8a25a2fd 942static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 943{
fcf80582 944 unsigned int j, cpu = dev->id;
65922465 945 int ret = -ENOMEM;
1da177e4 946 struct cpufreq_policy *policy;
1da177e4 947 unsigned long flags;
90e41bac 948#ifdef CONFIG_HOTPLUG_CPU
fcf80582 949 struct cpufreq_governor *gov;
90e41bac
PB
950 int sibling;
951#endif
1da177e4 952
c32b6b8e
AR
953 if (cpu_is_offline(cpu))
954 return 0;
955
2d06d8c4 956 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
957
958#ifdef CONFIG_SMP
959 /* check whether a different CPU already registered this
960 * CPU because it is in the same boat. */
961 policy = cpufreq_cpu_get(cpu);
962 if (unlikely(policy)) {
8ff69732 963 cpufreq_cpu_put(policy);
1da177e4
LT
964 return 0;
965 }
fcf80582
VK
966
967#ifdef CONFIG_HOTPLUG_CPU
968 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 969 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
970 for_each_online_cpu(sibling) {
971 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 972 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 973 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 974 return cpufreq_add_policy_cpu(cpu, sibling, dev);
2eaa3e2d 975 }
fcf80582 976 }
0d1857a1 977 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 978#endif
1da177e4
LT
979#endif
980
1c3d85dd 981 if (!try_module_get(cpufreq_driver->owner)) {
1da177e4
LT
982 ret = -EINVAL;
983 goto module_out;
984 }
985
e98df50c 986 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 987 if (!policy)
1da177e4 988 goto nomem_out;
059019a3
DJ
989
990 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 991 goto err_free_policy;
059019a3
DJ
992
993 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 994 goto err_free_cpumask;
1da177e4
LT
995
996 policy->cpu = cpu;
65922465 997 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 998 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 999
5a01f2e8 1000 /* Initially set CPU itself as the policy_cpu */
f1625066 1001 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 1002
1da177e4 1003 init_completion(&policy->kobj_unregister);
65f27f38 1004 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
1005
1006 /* call driver. From then on the cpufreq must be able
1007 * to accept all calls to ->verify and ->setpolicy for this CPU
1008 */
1c3d85dd 1009 ret = cpufreq_driver->init(policy);
1da177e4 1010 if (ret) {
2d06d8c4 1011 pr_debug("initialization failed\n");
2eaa3e2d 1012 goto err_set_policy_cpu;
1da177e4 1013 }
643ae6e8 1014
fcf80582
VK
1015 /* related cpus should atleast have policy->cpus */
1016 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1017
643ae6e8
VK
1018 /*
1019 * affected cpus must always be the one, which are online. We aren't
1020 * managing offline cpus here.
1021 */
1022 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1023
187d9f4e
MC
1024 policy->user_policy.min = policy->min;
1025 policy->user_policy.max = policy->max;
1da177e4 1026
a1531acd
TR
1027 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1028 CPUFREQ_START, policy);
1029
fcf80582
VK
1030#ifdef CONFIG_HOTPLUG_CPU
1031 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1032 if (gov) {
1033 policy->governor = gov;
1034 pr_debug("Restoring governor %s for cpu %d\n",
1035 policy->governor->name, cpu);
4bfa042c 1036 }
fcf80582 1037#endif
1da177e4 1038
8a25a2fd 1039 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
1040 if (ret)
1041 goto err_out_unregister;
8ff69732 1042
038c5b3e 1043 kobject_uevent(&policy->kobj, KOBJ_ADD);
1c3d85dd 1044 module_put(cpufreq_driver->owner);
2d06d8c4 1045 pr_debug("initialization complete\n");
87c32271 1046
1da177e4
LT
1047 return 0;
1048
1da177e4 1049err_out_unregister:
0d1857a1 1050 write_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 1051 for_each_cpu(j, policy->cpus)
7a6aedfa 1052 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1053 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1054
c10997f6 1055 kobject_put(&policy->kobj);
1da177e4
LT
1056 wait_for_completion(&policy->kobj_unregister);
1057
2eaa3e2d
VK
1058err_set_policy_cpu:
1059 per_cpu(cpufreq_policy_cpu, cpu) = -1;
cad70a6a 1060 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
1061err_free_cpumask:
1062 free_cpumask_var(policy->cpus);
1063err_free_policy:
1da177e4 1064 kfree(policy);
1da177e4 1065nomem_out:
1c3d85dd 1066 module_put(cpufreq_driver->owner);
c32b6b8e 1067module_out:
1da177e4
LT
1068 return ret;
1069}
1070
b8eed8af
VK
1071static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1072{
1073 int j;
1074
1075 policy->last_cpu = policy->cpu;
1076 policy->cpu = cpu;
1077
3361b7b1 1078 for_each_cpu(j, policy->cpus)
b8eed8af 1079 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
1080
1081#ifdef CONFIG_CPU_FREQ_TABLE
1082 cpufreq_frequency_table_update_policy_cpu(policy);
1083#endif
1084 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1085 CPUFREQ_UPDATE_POLICY_CPU, policy);
1086}
1da177e4
LT
1087
1088/**
5a01f2e8 1089 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1090 *
1091 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1092 * Caller should already have policy_rwsem in write mode for this CPU.
1093 * This routine frees the rwsem before returning.
1da177e4 1094 */
bb176f7d
VK
1095static int __cpufreq_remove_dev(struct device *dev,
1096 struct subsys_interface *sif)
1da177e4 1097{
b8eed8af 1098 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1099 unsigned long flags;
1100 struct cpufreq_policy *data;
499bca9b
AW
1101 struct kobject *kobj;
1102 struct completion *cmp;
8a25a2fd 1103 struct device *cpu_dev;
1da177e4 1104
b8eed8af 1105 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1106
0d1857a1 1107 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1108
7a6aedfa 1109 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1110 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1111
0d1857a1 1112 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1113
1114 if (!data) {
b8eed8af 1115 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1116 return -EINVAL;
1117 }
1da177e4 1118
1c3d85dd 1119 if (cpufreq_driver->target)
f6a7409c 1120 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1da177e4 1121
084f3493 1122#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1123 if (!cpufreq_driver->setpolicy)
fa69e33f
DB
1124 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1125 data->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1126#endif
1127
2eaa3e2d 1128 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1129 cpus = cpumask_weight(data->cpus);
e4969eba
VK
1130
1131 if (cpus > 1)
1132 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1133 unlock_policy_rwsem_write(cpu);
084f3493 1134
73bf0fc2
VK
1135 if (cpu != data->cpu) {
1136 sysfs_remove_link(&dev->kobj, "cpufreq");
1137 } else if (cpus > 1) {
b8eed8af
VK
1138 /* first sibling now owns the new sysfs dir */
1139 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1140 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1141 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1142 if (ret) {
1143 pr_err("%s: Failed to move kobj: %d", __func__, ret);
084f3493 1144
2eaa3e2d 1145 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1146 cpumask_set_cpu(cpu, data->cpus);
1da177e4 1147
0d1857a1 1148 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1149 per_cpu(cpufreq_cpu_data, cpu) = data;
0d1857a1 1150 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1151
499bca9b 1152 unlock_policy_rwsem_write(cpu);
1da177e4 1153
2eaa3e2d
VK
1154 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1155 "cpufreq");
b8eed8af 1156 return -EINVAL;
1da177e4 1157 }
5a01f2e8 1158
2eaa3e2d 1159 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1160 update_policy_cpu(data, cpu_dev->id);
2eaa3e2d 1161 unlock_policy_rwsem_write(cpu);
b8eed8af
VK
1162 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1163 __func__, cpu_dev->id, cpu);
1da177e4 1164 }
1da177e4 1165
d96038e0
VK
1166 if ((cpus == 1) && (cpufreq_driver->target))
1167 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1168
b8eed8af
VK
1169 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1170 cpufreq_cpu_put(data);
1da177e4 1171
b8eed8af
VK
1172 /* If cpu is last user of policy, free policy */
1173 if (cpus == 1) {
2eaa3e2d 1174 lock_policy_rwsem_read(cpu);
b8eed8af
VK
1175 kobj = &data->kobj;
1176 cmp = &data->kobj_unregister;
2eaa3e2d 1177 unlock_policy_rwsem_read(cpu);
b8eed8af 1178 kobject_put(kobj);
7d26e2d5 1179
b8eed8af
VK
1180 /* we need to make sure that the underlying kobj is actually
1181 * not referenced anymore by anybody before we proceed with
1182 * unloading.
1183 */
1184 pr_debug("waiting for dropping of refcount\n");
1185 wait_for_completion(cmp);
1186 pr_debug("wait complete\n");
7d26e2d5 1187
1c3d85dd
RW
1188 if (cpufreq_driver->exit)
1189 cpufreq_driver->exit(data);
27ecddc2 1190
b8eed8af
VK
1191 free_cpumask_var(data->related_cpus);
1192 free_cpumask_var(data->cpus);
1193 kfree(data);
1c3d85dd 1194 } else if (cpufreq_driver->target) {
b8eed8af
VK
1195 __cpufreq_governor(data, CPUFREQ_GOV_START);
1196 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
27ecddc2 1197 }
1da177e4 1198
2eaa3e2d 1199 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1200 return 0;
1201}
1202
8a25a2fd 1203static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1204{
8a25a2fd 1205 unsigned int cpu = dev->id;
5a01f2e8 1206 int retval;
ec28297a
VP
1207
1208 if (cpu_is_offline(cpu))
1209 return 0;
1210
8a25a2fd 1211 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1212 return retval;
1213}
1214
65f27f38 1215static void handle_update(struct work_struct *work)
1da177e4 1216{
65f27f38
DH
1217 struct cpufreq_policy *policy =
1218 container_of(work, struct cpufreq_policy, update);
1219 unsigned int cpu = policy->cpu;
2d06d8c4 1220 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1221 cpufreq_update_policy(cpu);
1222}
1223
1224/**
bb176f7d
VK
1225 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1226 * in deep trouble.
1da177e4
LT
1227 * @cpu: cpu number
1228 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1229 * @new_freq: CPU frequency the CPU actually runs at
1230 *
29464f28
DJ
1231 * We adjust to current frequency first, and need to clean up later.
1232 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1233 */
e08f5f5b
GS
1234static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1235 unsigned int new_freq)
1da177e4 1236{
b43a7ffb 1237 struct cpufreq_policy *policy;
1da177e4 1238 struct cpufreq_freqs freqs;
b43a7ffb
VK
1239 unsigned long flags;
1240
2d06d8c4 1241 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1242 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1243
1da177e4
LT
1244 freqs.old = old_freq;
1245 freqs.new = new_freq;
b43a7ffb
VK
1246
1247 read_lock_irqsave(&cpufreq_driver_lock, flags);
1248 policy = per_cpu(cpufreq_cpu_data, cpu);
1249 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1250
1251 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1252 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1253}
1254
32ee8c3e 1255/**
4ab70df4 1256 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1257 * @cpu: CPU number
1258 *
1259 * This is the last known freq, without actually getting it from the driver.
1260 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1261 */
1262unsigned int cpufreq_quick_get(unsigned int cpu)
1263{
9e21ba8b 1264 struct cpufreq_policy *policy;
e08f5f5b 1265 unsigned int ret_freq = 0;
95235ca2 1266
1c3d85dd
RW
1267 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1268 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1269
1270 policy = cpufreq_cpu_get(cpu);
95235ca2 1271 if (policy) {
e08f5f5b 1272 ret_freq = policy->cur;
95235ca2
VP
1273 cpufreq_cpu_put(policy);
1274 }
1275
4d34a67d 1276 return ret_freq;
95235ca2
VP
1277}
1278EXPORT_SYMBOL(cpufreq_quick_get);
1279
3d737108
JB
1280/**
1281 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1282 * @cpu: CPU number
1283 *
1284 * Just return the max possible frequency for a given CPU.
1285 */
1286unsigned int cpufreq_quick_get_max(unsigned int cpu)
1287{
1288 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1289 unsigned int ret_freq = 0;
1290
1291 if (policy) {
1292 ret_freq = policy->max;
1293 cpufreq_cpu_put(policy);
1294 }
1295
1296 return ret_freq;
1297}
1298EXPORT_SYMBOL(cpufreq_quick_get_max);
1299
5a01f2e8 1300static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1301{
7a6aedfa 1302 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1303 unsigned int ret_freq = 0;
5800043b 1304
1c3d85dd 1305 if (!cpufreq_driver->get)
4d34a67d 1306 return ret_freq;
1da177e4 1307
1c3d85dd 1308 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1309
e08f5f5b 1310 if (ret_freq && policy->cur &&
1c3d85dd 1311 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1312 /* verify no discrepancy between actual and
1313 saved value exists */
1314 if (unlikely(ret_freq != policy->cur)) {
1315 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1316 schedule_work(&policy->update);
1317 }
1318 }
1319
4d34a67d 1320 return ret_freq;
5a01f2e8 1321}
1da177e4 1322
5a01f2e8
VP
1323/**
1324 * cpufreq_get - get the current CPU frequency (in kHz)
1325 * @cpu: CPU number
1326 *
1327 * Get the CPU current (static) CPU frequency
1328 */
1329unsigned int cpufreq_get(unsigned int cpu)
1330{
1331 unsigned int ret_freq = 0;
1332 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1333
1334 if (!policy)
1335 goto out;
1336
1337 if (unlikely(lock_policy_rwsem_read(cpu)))
1338 goto out_policy;
1339
1340 ret_freq = __cpufreq_get(cpu);
1341
1342 unlock_policy_rwsem_read(cpu);
1da177e4 1343
5a01f2e8
VP
1344out_policy:
1345 cpufreq_cpu_put(policy);
1346out:
4d34a67d 1347 return ret_freq;
1da177e4
LT
1348}
1349EXPORT_SYMBOL(cpufreq_get);
1350
8a25a2fd
KS
1351static struct subsys_interface cpufreq_interface = {
1352 .name = "cpufreq",
1353 .subsys = &cpu_subsys,
1354 .add_dev = cpufreq_add_dev,
1355 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1356};
1357
42d4dc3f 1358/**
e00e56df
RW
1359 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1360 *
1361 * This function is only executed for the boot processor. The other CPUs
1362 * have been put offline by means of CPU hotplug.
42d4dc3f 1363 */
e00e56df 1364static int cpufreq_bp_suspend(void)
42d4dc3f 1365{
e08f5f5b 1366 int ret = 0;
4bc5d341 1367
e00e56df 1368 int cpu = smp_processor_id();
42d4dc3f
BH
1369 struct cpufreq_policy *cpu_policy;
1370
2d06d8c4 1371 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1372
e00e56df 1373 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1374 cpu_policy = cpufreq_cpu_get(cpu);
1375 if (!cpu_policy)
e00e56df 1376 return 0;
42d4dc3f 1377
1c3d85dd
RW
1378 if (cpufreq_driver->suspend) {
1379 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1380 if (ret)
42d4dc3f
BH
1381 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1382 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1383 }
1384
42d4dc3f 1385 cpufreq_cpu_put(cpu_policy);
c9060494 1386 return ret;
42d4dc3f
BH
1387}
1388
1da177e4 1389/**
e00e56df 1390 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1391 *
1392 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1393 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1394 * restored. It will verify that the current freq is in sync with
1395 * what we believe it to be. This is a bit later than when it
1396 * should be, but nonethteless it's better than calling
1397 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1398 *
1399 * This function is only executed for the boot CPU. The other CPUs have not
1400 * been turned on yet.
1da177e4 1401 */
e00e56df 1402static void cpufreq_bp_resume(void)
1da177e4 1403{
e08f5f5b 1404 int ret = 0;
4bc5d341 1405
e00e56df 1406 int cpu = smp_processor_id();
1da177e4
LT
1407 struct cpufreq_policy *cpu_policy;
1408
2d06d8c4 1409 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1410
e00e56df 1411 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1412 cpu_policy = cpufreq_cpu_get(cpu);
1413 if (!cpu_policy)
e00e56df 1414 return;
1da177e4 1415
1c3d85dd
RW
1416 if (cpufreq_driver->resume) {
1417 ret = cpufreq_driver->resume(cpu_policy);
1da177e4
LT
1418 if (ret) {
1419 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1420 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1421 goto fail;
1da177e4
LT
1422 }
1423 }
1424
1da177e4 1425 schedule_work(&cpu_policy->update);
ce6c3997 1426
c9060494 1427fail:
1da177e4 1428 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1429}
1430
e00e56df
RW
1431static struct syscore_ops cpufreq_syscore_ops = {
1432 .suspend = cpufreq_bp_suspend,
1433 .resume = cpufreq_bp_resume,
1da177e4
LT
1434};
1435
9d95046e
BP
1436/**
1437 * cpufreq_get_current_driver - return current driver's name
1438 *
1439 * Return the name string of the currently loaded cpufreq driver
1440 * or NULL, if none.
1441 */
1442const char *cpufreq_get_current_driver(void)
1443{
1c3d85dd
RW
1444 if (cpufreq_driver)
1445 return cpufreq_driver->name;
1446
1447 return NULL;
9d95046e
BP
1448}
1449EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1450
1451/*********************************************************************
1452 * NOTIFIER LISTS INTERFACE *
1453 *********************************************************************/
1454
1455/**
1456 * cpufreq_register_notifier - register a driver with cpufreq
1457 * @nb: notifier function to register
1458 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1459 *
32ee8c3e 1460 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1461 * are notified about clock rate changes (once before and once after
1462 * the transition), or a list of drivers that are notified about
1463 * changes in cpufreq policy.
1464 *
1465 * This function may sleep, and has the same return conditions as
e041c683 1466 * blocking_notifier_chain_register.
1da177e4
LT
1467 */
1468int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1469{
1470 int ret;
1471
d5aaffa9
DB
1472 if (cpufreq_disabled())
1473 return -EINVAL;
1474
74212ca4
CEB
1475 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1476
1da177e4
LT
1477 switch (list) {
1478 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1479 ret = srcu_notifier_chain_register(
e041c683 1480 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1481 break;
1482 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1483 ret = blocking_notifier_chain_register(
1484 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1485 break;
1486 default:
1487 ret = -EINVAL;
1488 }
1da177e4
LT
1489
1490 return ret;
1491}
1492EXPORT_SYMBOL(cpufreq_register_notifier);
1493
1da177e4
LT
1494/**
1495 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1496 * @nb: notifier block to be unregistered
bb176f7d 1497 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1498 *
1499 * Remove a driver from the CPU frequency notifier list.
1500 *
1501 * This function may sleep, and has the same return conditions as
e041c683 1502 * blocking_notifier_chain_unregister.
1da177e4
LT
1503 */
1504int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1505{
1506 int ret;
1507
d5aaffa9
DB
1508 if (cpufreq_disabled())
1509 return -EINVAL;
1510
1da177e4
LT
1511 switch (list) {
1512 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1513 ret = srcu_notifier_chain_unregister(
e041c683 1514 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1515 break;
1516 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1517 ret = blocking_notifier_chain_unregister(
1518 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1519 break;
1520 default:
1521 ret = -EINVAL;
1522 }
1da177e4
LT
1523
1524 return ret;
1525}
1526EXPORT_SYMBOL(cpufreq_unregister_notifier);
1527
1528
1529/*********************************************************************
1530 * GOVERNORS *
1531 *********************************************************************/
1532
1da177e4
LT
1533int __cpufreq_driver_target(struct cpufreq_policy *policy,
1534 unsigned int target_freq,
1535 unsigned int relation)
1536{
1537 int retval = -EINVAL;
7249924e 1538 unsigned int old_target_freq = target_freq;
c32b6b8e 1539
a7b422cd
KRW
1540 if (cpufreq_disabled())
1541 return -ENODEV;
1542
7249924e
VK
1543 /* Make sure that target_freq is within supported range */
1544 if (target_freq > policy->max)
1545 target_freq = policy->max;
1546 if (target_freq < policy->min)
1547 target_freq = policy->min;
1548
1549 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1550 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1551
1552 if (target_freq == policy->cur)
1553 return 0;
1554
1c3d85dd
RW
1555 if (cpufreq_driver->target)
1556 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1557
1da177e4
LT
1558 return retval;
1559}
1560EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1561
1da177e4
LT
1562int cpufreq_driver_target(struct cpufreq_policy *policy,
1563 unsigned int target_freq,
1564 unsigned int relation)
1565{
f1829e4a 1566 int ret = -EINVAL;
1da177e4 1567
5a01f2e8 1568 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1569 goto fail;
1da177e4
LT
1570
1571 ret = __cpufreq_driver_target(policy, target_freq, relation);
1572
5a01f2e8 1573 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1574
f1829e4a 1575fail:
1da177e4
LT
1576 return ret;
1577}
1578EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1579
bf0b90e3 1580int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62 1581{
d5aaffa9 1582 if (cpufreq_disabled())
a262e94c 1583 return 0;
d5aaffa9 1584
1c3d85dd 1585 if (!cpufreq_driver->getavg)
0676f7f2
VK
1586 return 0;
1587
a262e94c 1588 return cpufreq_driver->getavg(policy, cpu);
dfde5d62 1589}
5a01f2e8 1590EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1591
153d7f3f 1592/*
153d7f3f
AV
1593 * when "event" is CPUFREQ_GOV_LIMITS
1594 */
1da177e4 1595
e08f5f5b
GS
1596static int __cpufreq_governor(struct cpufreq_policy *policy,
1597 unsigned int event)
1da177e4 1598{
cc993cab 1599 int ret;
6afde10c
TR
1600
1601 /* Only must be defined when default governor is known to have latency
1602 restrictions, like e.g. conservative or ondemand.
1603 That this is the case is already ensured in Kconfig
1604 */
1605#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1606 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1607#else
1608 struct cpufreq_governor *gov = NULL;
1609#endif
1c256245
TR
1610
1611 if (policy->governor->max_transition_latency &&
1612 policy->cpuinfo.transition_latency >
1613 policy->governor->max_transition_latency) {
6afde10c
TR
1614 if (!gov)
1615 return -EINVAL;
1616 else {
1617 printk(KERN_WARNING "%s governor failed, too long"
1618 " transition latency of HW, fallback"
1619 " to %s governor\n",
1620 policy->governor->name,
1621 gov->name);
1622 policy->governor = gov;
1623 }
1c256245 1624 }
1da177e4
LT
1625
1626 if (!try_module_get(policy->governor->owner))
1627 return -EINVAL;
1628
2d06d8c4 1629 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1630 policy->cpu, event);
95731ebb
XC
1631
1632 mutex_lock(&cpufreq_governor_lock);
1633 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1634 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1635 mutex_unlock(&cpufreq_governor_lock);
1636 return -EBUSY;
1637 }
1638
1639 if (event == CPUFREQ_GOV_STOP)
1640 policy->governor_enabled = false;
1641 else if (event == CPUFREQ_GOV_START)
1642 policy->governor_enabled = true;
1643
1644 mutex_unlock(&cpufreq_governor_lock);
1645
1da177e4
LT
1646 ret = policy->governor->governor(policy, event);
1647
4d5dcc42
VK
1648 if (!ret) {
1649 if (event == CPUFREQ_GOV_POLICY_INIT)
1650 policy->governor->initialized++;
1651 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1652 policy->governor->initialized--;
95731ebb
XC
1653 } else {
1654 /* Restore original values */
1655 mutex_lock(&cpufreq_governor_lock);
1656 if (event == CPUFREQ_GOV_STOP)
1657 policy->governor_enabled = true;
1658 else if (event == CPUFREQ_GOV_START)
1659 policy->governor_enabled = false;
1660 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1661 }
b394058f 1662
e08f5f5b
GS
1663 /* we keep one module reference alive for
1664 each CPU governed by this CPU */
1da177e4
LT
1665 if ((event != CPUFREQ_GOV_START) || ret)
1666 module_put(policy->governor->owner);
1667 if ((event == CPUFREQ_GOV_STOP) && !ret)
1668 module_put(policy->governor->owner);
1669
1670 return ret;
1671}
1672
1da177e4
LT
1673int cpufreq_register_governor(struct cpufreq_governor *governor)
1674{
3bcb09a3 1675 int err;
1da177e4
LT
1676
1677 if (!governor)
1678 return -EINVAL;
1679
a7b422cd
KRW
1680 if (cpufreq_disabled())
1681 return -ENODEV;
1682
3fc54d37 1683 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1684
b394058f 1685 governor->initialized = 0;
3bcb09a3
JF
1686 err = -EBUSY;
1687 if (__find_governor(governor->name) == NULL) {
1688 err = 0;
1689 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1690 }
1da177e4 1691
32ee8c3e 1692 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1693 return err;
1da177e4
LT
1694}
1695EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1696
1da177e4
LT
1697void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1698{
90e41bac
PB
1699#ifdef CONFIG_HOTPLUG_CPU
1700 int cpu;
1701#endif
1702
1da177e4
LT
1703 if (!governor)
1704 return;
1705
a7b422cd
KRW
1706 if (cpufreq_disabled())
1707 return;
1708
90e41bac
PB
1709#ifdef CONFIG_HOTPLUG_CPU
1710 for_each_present_cpu(cpu) {
1711 if (cpu_online(cpu))
1712 continue;
1713 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1714 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1715 }
1716#endif
1717
3fc54d37 1718 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1719 list_del(&governor->governor_list);
3fc54d37 1720 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1721 return;
1722}
1723EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1724
1725
1da177e4
LT
1726/*********************************************************************
1727 * POLICY INTERFACE *
1728 *********************************************************************/
1729
1730/**
1731 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1732 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1733 * is written
1da177e4
LT
1734 *
1735 * Reads the current cpufreq policy.
1736 */
1737int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1738{
1739 struct cpufreq_policy *cpu_policy;
1740 if (!policy)
1741 return -EINVAL;
1742
1743 cpu_policy = cpufreq_cpu_get(cpu);
1744 if (!cpu_policy)
1745 return -EINVAL;
1746
1da177e4 1747 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1748
1749 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1750 return 0;
1751}
1752EXPORT_SYMBOL(cpufreq_get_policy);
1753
153d7f3f 1754/*
e08f5f5b
GS
1755 * data : current policy.
1756 * policy : policy to be set.
153d7f3f 1757 */
e08f5f5b
GS
1758static int __cpufreq_set_policy(struct cpufreq_policy *data,
1759 struct cpufreq_policy *policy)
1da177e4 1760{
7bd353a9 1761 int ret = 0, failed = 1;
1da177e4 1762
2d06d8c4 1763 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1764 policy->min, policy->max);
1765
e08f5f5b
GS
1766 memcpy(&policy->cpuinfo, &data->cpuinfo,
1767 sizeof(struct cpufreq_cpuinfo));
1da177e4 1768
53391fa2 1769 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1770 ret = -EINVAL;
1771 goto error_out;
1772 }
1773
1da177e4 1774 /* verify the cpu speed can be set within this limit */
1c3d85dd 1775 ret = cpufreq_driver->verify(policy);
1da177e4
LT
1776 if (ret)
1777 goto error_out;
1778
1da177e4 1779 /* adjust if necessary - all reasons */
e041c683
AS
1780 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1781 CPUFREQ_ADJUST, policy);
1da177e4
LT
1782
1783 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1784 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1785 CPUFREQ_INCOMPATIBLE, policy);
1da177e4 1786
bb176f7d
VK
1787 /*
1788 * verify the cpu speed can be set within this limit, which might be
1789 * different to the first one
1790 */
1c3d85dd 1791 ret = cpufreq_driver->verify(policy);
e041c683 1792 if (ret)
1da177e4 1793 goto error_out;
1da177e4
LT
1794
1795 /* notification of the new policy */
e041c683
AS
1796 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1797 CPUFREQ_NOTIFY, policy);
1da177e4 1798
7d5e350f
DJ
1799 data->min = policy->min;
1800 data->max = policy->max;
1da177e4 1801
2d06d8c4 1802 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1803 data->min, data->max);
1da177e4 1804
1c3d85dd 1805 if (cpufreq_driver->setpolicy) {
1da177e4 1806 data->policy = policy->policy;
2d06d8c4 1807 pr_debug("setting range\n");
1c3d85dd 1808 ret = cpufreq_driver->setpolicy(policy);
1da177e4
LT
1809 } else {
1810 if (policy->governor != data->governor) {
1811 /* save old, working values */
1812 struct cpufreq_governor *old_gov = data->governor;
1813
2d06d8c4 1814 pr_debug("governor switch\n");
1da177e4
LT
1815
1816 /* end old governor */
7bd353a9 1817 if (data->governor) {
1da177e4 1818 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
955ef483 1819 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1820 __cpufreq_governor(data,
1821 CPUFREQ_GOV_POLICY_EXIT);
955ef483 1822 lock_policy_rwsem_write(policy->cpu);
7bd353a9 1823 }
1da177e4
LT
1824
1825 /* start new governor */
1826 data->governor = policy->governor;
7bd353a9 1827 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
955ef483 1828 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
7bd353a9 1829 failed = 0;
955ef483
VK
1830 } else {
1831 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1832 __cpufreq_governor(data,
1833 CPUFREQ_GOV_POLICY_EXIT);
955ef483
VK
1834 lock_policy_rwsem_write(policy->cpu);
1835 }
7bd353a9
VK
1836 }
1837
1838 if (failed) {
1da177e4 1839 /* new governor failed, so re-start old one */
2d06d8c4 1840 pr_debug("starting governor %s failed\n",
e08f5f5b 1841 data->governor->name);
1da177e4
LT
1842 if (old_gov) {
1843 data->governor = old_gov;
7bd353a9
VK
1844 __cpufreq_governor(data,
1845 CPUFREQ_GOV_POLICY_INIT);
e08f5f5b
GS
1846 __cpufreq_governor(data,
1847 CPUFREQ_GOV_START);
1da177e4
LT
1848 }
1849 ret = -EINVAL;
1850 goto error_out;
1851 }
1852 /* might be a policy change, too, so fall through */
1853 }
2d06d8c4 1854 pr_debug("governor: change or update limits\n");
1da177e4
LT
1855 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1856 }
1857
7d5e350f 1858error_out:
1da177e4
LT
1859 return ret;
1860}
1861
1da177e4
LT
1862/**
1863 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1864 * @cpu: CPU which shall be re-evaluated
1865 *
25985edc 1866 * Useful for policy notifiers which have different necessities
1da177e4
LT
1867 * at different times.
1868 */
1869int cpufreq_update_policy(unsigned int cpu)
1870{
1871 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1872 struct cpufreq_policy policy;
f1829e4a 1873 int ret;
1da177e4 1874
f1829e4a
JL
1875 if (!data) {
1876 ret = -ENODEV;
1877 goto no_policy;
1878 }
1da177e4 1879
f1829e4a
JL
1880 if (unlikely(lock_policy_rwsem_write(cpu))) {
1881 ret = -EINVAL;
1882 goto fail;
1883 }
1da177e4 1884
2d06d8c4 1885 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1886 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1887 policy.min = data->user_policy.min;
1888 policy.max = data->user_policy.max;
1889 policy.policy = data->user_policy.policy;
1890 policy.governor = data->user_policy.governor;
1891
bb176f7d
VK
1892 /*
1893 * BIOS might change freq behind our back
1894 * -> ask driver for current freq and notify governors about a change
1895 */
1c3d85dd
RW
1896 if (cpufreq_driver->get) {
1897 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1898 if (!data->cur) {
2d06d8c4 1899 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1900 data->cur = policy.cur;
1901 } else {
1c3d85dd 1902 if (data->cur != policy.cur && cpufreq_driver->target)
e08f5f5b
GS
1903 cpufreq_out_of_sync(cpu, data->cur,
1904 policy.cur);
a85f7bd3 1905 }
0961dd0d
TR
1906 }
1907
1da177e4
LT
1908 ret = __cpufreq_set_policy(data, &policy);
1909
5a01f2e8
VP
1910 unlock_policy_rwsem_write(cpu);
1911
f1829e4a 1912fail:
1da177e4 1913 cpufreq_cpu_put(data);
f1829e4a 1914no_policy:
1da177e4
LT
1915 return ret;
1916}
1917EXPORT_SYMBOL(cpufreq_update_policy);
1918
dd184a01 1919static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1920 unsigned long action, void *hcpu)
1921{
1922 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1923 struct device *dev;
c32b6b8e 1924
8a25a2fd
KS
1925 dev = get_cpu_device(cpu);
1926 if (dev) {
c32b6b8e
AR
1927 switch (action) {
1928 case CPU_ONLINE:
8a25a2fd 1929 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1930 break;
1931 case CPU_DOWN_PREPARE:
a66b2e50 1932 case CPU_UP_CANCELED_FROZEN:
8a25a2fd 1933 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1934 break;
5a01f2e8 1935 case CPU_DOWN_FAILED:
8a25a2fd 1936 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1937 break;
1938 }
1939 }
1940 return NOTIFY_OK;
1941}
1942
9c36f746 1943static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 1944 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 1945};
1da177e4
LT
1946
1947/*********************************************************************
1948 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1949 *********************************************************************/
1950
1951/**
1952 * cpufreq_register_driver - register a CPU Frequency driver
1953 * @driver_data: A struct cpufreq_driver containing the values#
1954 * submitted by the CPU Frequency driver.
1955 *
bb176f7d 1956 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1957 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1958 * (and isn't unregistered in the meantime).
1da177e4
LT
1959 *
1960 */
221dee28 1961int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1962{
1963 unsigned long flags;
1964 int ret;
1965
a7b422cd
KRW
1966 if (cpufreq_disabled())
1967 return -ENODEV;
1968
1da177e4
LT
1969 if (!driver_data || !driver_data->verify || !driver_data->init ||
1970 ((!driver_data->setpolicy) && (!driver_data->target)))
1971 return -EINVAL;
1972
2d06d8c4 1973 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1974
1975 if (driver_data->setpolicy)
1976 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1977
0d1857a1 1978 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1979 if (cpufreq_driver) {
0d1857a1 1980 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1981 return -EBUSY;
1982 }
1c3d85dd 1983 cpufreq_driver = driver_data;
0d1857a1 1984 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1985
8a25a2fd 1986 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1987 if (ret)
1988 goto err_null_driver;
1da177e4 1989
1c3d85dd 1990 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1991 int i;
1992 ret = -ENODEV;
1993
1994 /* check for at least one working CPU */
7a6aedfa
MT
1995 for (i = 0; i < nr_cpu_ids; i++)
1996 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1997 ret = 0;
7a6aedfa
MT
1998 break;
1999 }
1da177e4
LT
2000
2001 /* if all ->init() calls failed, unregister */
2002 if (ret) {
2d06d8c4 2003 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 2004 driver_data->name);
8a25a2fd 2005 goto err_if_unreg;
1da177e4
LT
2006 }
2007 }
2008
8f5bc2ab 2009 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2010 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2011
8f5bc2ab 2012 return 0;
8a25a2fd
KS
2013err_if_unreg:
2014 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2015err_null_driver:
0d1857a1 2016 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2017 cpufreq_driver = NULL;
0d1857a1 2018 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2019 return ret;
1da177e4
LT
2020}
2021EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2022
1da177e4
LT
2023/**
2024 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2025 *
bb176f7d 2026 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2027 * the right to do so, i.e. if you have succeeded in initialising before!
2028 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2029 * currently not initialised.
2030 */
221dee28 2031int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2032{
2033 unsigned long flags;
2034
1c3d85dd 2035 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2036 return -EINVAL;
1da177e4 2037
2d06d8c4 2038 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2039
8a25a2fd 2040 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2041 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2042
0d1857a1 2043 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2044 cpufreq_driver = NULL;
0d1857a1 2045 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2046
2047 return 0;
2048}
2049EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2050
2051static int __init cpufreq_core_init(void)
2052{
2053 int cpu;
2054
a7b422cd
KRW
2055 if (cpufreq_disabled())
2056 return -ENODEV;
2057
5a01f2e8 2058 for_each_possible_cpu(cpu) {
f1625066 2059 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
2060 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2061 }
8aa84ad8 2062
2361be23 2063 cpufreq_global_kobject = kobject_create();
8aa84ad8 2064 BUG_ON(!cpufreq_global_kobject);
e00e56df 2065 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2066
5a01f2e8
VP
2067 return 0;
2068}
5a01f2e8 2069core_initcall(cpufreq_core_init);
This page took 0.732557 seconds and 5 git commands to generate.