cpufreq: Perform light-weight init/teardown during suspend/resume
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
72a4ce34 20#include <asm/cputime.h>
1da177e4 21#include <linux/kernel.h>
72a4ce34 22#include <linux/kernel_stat.h>
1da177e4
LT
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/notifier.h>
26#include <linux/cpufreq.h>
27#include <linux/delay.h>
28#include <linux/interrupt.h>
29#include <linux/spinlock.h>
72a4ce34 30#include <linux/tick.h>
1da177e4
LT
31#include <linux/device.h>
32#include <linux/slab.h>
33#include <linux/cpu.h>
34#include <linux/completion.h>
3fc54d37 35#include <linux/mutex.h>
e00e56df 36#include <linux/syscore_ops.h>
1da177e4 37
6f4f2723
TR
38#include <trace/events/power.h>
39
1da177e4 40/**
cd878479 41 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
42 * level driver of CPUFreq support, and its spinlock. This lock
43 * also protects the cpufreq_cpu_data array.
44 */
1c3d85dd 45static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 46static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
8414809c 47static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
bb176f7d
VK
48static DEFINE_RWLOCK(cpufreq_driver_lock);
49static DEFINE_MUTEX(cpufreq_governor_lock);
50
084f3493
TR
51#ifdef CONFIG_HOTPLUG_CPU
52/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 53static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 54#endif
1da177e4 55
5a01f2e8
VP
56/*
57 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
58 * all cpufreq/hotplug/workqueue/etc related lock issues.
59 *
60 * The rules for this semaphore:
61 * - Any routine that wants to read from the policy structure will
62 * do a down_read on this semaphore.
63 * - Any routine that will write to the policy structure and/or may take away
64 * the policy altogether (eg. CPU hotplug), will hold this lock in write
65 * mode before doing so.
66 *
67 * Additional rules:
5a01f2e8
VP
68 * - Governor routines that can be called in cpufreq hotplug path should not
69 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
70 * - Lock should not be held across
71 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 72 */
f1625066 73static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
74static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
75
76#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 77static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 78{ \
f1625066 79 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
80 BUG_ON(policy_cpu == -1); \
81 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
82 \
83 return 0; \
84}
85
86lock_policy_rwsem(read, cpu);
5a01f2e8 87lock_policy_rwsem(write, cpu);
5a01f2e8 88
fa1d8af4
VK
89#define unlock_policy_rwsem(mode, cpu) \
90static void unlock_policy_rwsem_##mode(int cpu) \
91{ \
92 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
93 BUG_ON(policy_cpu == -1); \
94 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 95}
5a01f2e8 96
fa1d8af4
VK
97unlock_policy_rwsem(read, cpu);
98unlock_policy_rwsem(write, cpu);
5a01f2e8 99
1da177e4 100/* internal prototypes */
29464f28
DJ
101static int __cpufreq_governor(struct cpufreq_policy *policy,
102 unsigned int event);
5a01f2e8 103static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 104static void handle_update(struct work_struct *work);
1da177e4
LT
105
106/**
32ee8c3e
DJ
107 * Two notifier lists: the "policy" list is involved in the
108 * validation process for a new CPU frequency policy; the
1da177e4
LT
109 * "transition" list for kernel code that needs to handle
110 * changes to devices when the CPU clock speed changes.
111 * The mutex locks both lists.
112 */
e041c683 113static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 114static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 115
74212ca4 116static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
117static int __init init_cpufreq_transition_notifier_list(void)
118{
119 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 120 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
121 return 0;
122}
b3438f82 123pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 124
a7b422cd 125static int off __read_mostly;
da584455 126static int cpufreq_disabled(void)
a7b422cd
KRW
127{
128 return off;
129}
130void disable_cpufreq(void)
131{
132 off = 1;
133}
1da177e4 134static LIST_HEAD(cpufreq_governor_list);
29464f28 135static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 136
4d5dcc42
VK
137bool have_governor_per_policy(void)
138{
1c3d85dd 139 return cpufreq_driver->have_governor_per_policy;
4d5dcc42 140}
3f869d6d 141EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 142
944e9a03
VK
143struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
144{
145 if (have_governor_per_policy())
146 return &policy->kobj;
147 else
148 return cpufreq_global_kobject;
149}
150EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
151
72a4ce34
VK
152static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
153{
154 u64 idle_time;
155 u64 cur_wall_time;
156 u64 busy_time;
157
158 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
159
160 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
163 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
164 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
165 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
166
167 idle_time = cur_wall_time - busy_time;
168 if (wall)
169 *wall = cputime_to_usecs(cur_wall_time);
170
171 return cputime_to_usecs(idle_time);
172}
173
174u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
175{
176 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
177
178 if (idle_time == -1ULL)
179 return get_cpu_idle_time_jiffy(cpu, wall);
180 else if (!io_busy)
181 idle_time += get_cpu_iowait_time_us(cpu, wall);
182
183 return idle_time;
184}
185EXPORT_SYMBOL_GPL(get_cpu_idle_time);
186
a9144436 187static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
188{
189 struct cpufreq_policy *data;
190 unsigned long flags;
191
7a6aedfa 192 if (cpu >= nr_cpu_ids)
1da177e4
LT
193 goto err_out;
194
195 /* get the cpufreq driver */
1c3d85dd 196 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 197
1c3d85dd 198 if (!cpufreq_driver)
1da177e4
LT
199 goto err_out_unlock;
200
1c3d85dd 201 if (!try_module_get(cpufreq_driver->owner))
1da177e4
LT
202 goto err_out_unlock;
203
1da177e4 204 /* get the CPU */
7a6aedfa 205 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
206
207 if (!data)
208 goto err_out_put_module;
209
a9144436 210 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
211 goto err_out_put_module;
212
0d1857a1 213 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
214 return data;
215
7d5e350f 216err_out_put_module:
1c3d85dd 217 module_put(cpufreq_driver->owner);
5800043b 218err_out_unlock:
1c3d85dd 219 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 220err_out:
1da177e4
LT
221 return NULL;
222}
a9144436
SB
223
224struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
225{
d5aaffa9
DB
226 if (cpufreq_disabled())
227 return NULL;
228
a9144436
SB
229 return __cpufreq_cpu_get(cpu, false);
230}
1da177e4
LT
231EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
232
a9144436
SB
233static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
234{
235 return __cpufreq_cpu_get(cpu, true);
236}
237
238static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
239{
240 if (!sysfs)
241 kobject_put(&data->kobj);
1c3d85dd 242 module_put(cpufreq_driver->owner);
a9144436 243}
7d5e350f 244
1da177e4
LT
245void cpufreq_cpu_put(struct cpufreq_policy *data)
246{
d5aaffa9
DB
247 if (cpufreq_disabled())
248 return;
249
a9144436 250 __cpufreq_cpu_put(data, false);
1da177e4
LT
251}
252EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
253
a9144436
SB
254static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
255{
256 __cpufreq_cpu_put(data, true);
257}
1da177e4 258
1da177e4
LT
259/*********************************************************************
260 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
261 *********************************************************************/
262
263/**
264 * adjust_jiffies - adjust the system "loops_per_jiffy"
265 *
266 * This function alters the system "loops_per_jiffy" for the clock
267 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 268 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
269 * per-CPU loops_per_jiffy value wherever possible.
270 */
271#ifndef CONFIG_SMP
272static unsigned long l_p_j_ref;
bb176f7d 273static unsigned int l_p_j_ref_freq;
1da177e4 274
858119e1 275static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
276{
277 if (ci->flags & CPUFREQ_CONST_LOOPS)
278 return;
279
280 if (!l_p_j_ref_freq) {
281 l_p_j_ref = loops_per_jiffy;
282 l_p_j_ref_freq = ci->old;
2d06d8c4 283 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 284 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 285 }
bb176f7d 286 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 287 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
288 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
289 ci->new);
2d06d8c4 290 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 291 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
292 }
293}
294#else
e08f5f5b
GS
295static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
296{
297 return;
298}
1da177e4
LT
299#endif
300
0956df9c 301static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 302 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
303{
304 BUG_ON(irqs_disabled());
305
d5aaffa9
DB
306 if (cpufreq_disabled())
307 return;
308
1c3d85dd 309 freqs->flags = cpufreq_driver->flags;
2d06d8c4 310 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 311 state, freqs->new);
1da177e4 312
1da177e4 313 switch (state) {
e4472cb3 314
1da177e4 315 case CPUFREQ_PRECHANGE:
266c13d7
VK
316 if (WARN(policy->transition_ongoing ==
317 cpumask_weight(policy->cpus),
7c30ed53
VK
318 "In middle of another frequency transition\n"))
319 return;
320
266c13d7 321 policy->transition_ongoing++;
7c30ed53 322
32ee8c3e 323 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
324 * which is not equal to what the cpufreq core thinks is
325 * "old frequency".
1da177e4 326 */
1c3d85dd 327 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
328 if ((policy) && (policy->cpu == freqs->cpu) &&
329 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 330 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
331 " %u, cpufreq assumed %u kHz.\n",
332 freqs->old, policy->cur);
333 freqs->old = policy->cur;
1da177e4
LT
334 }
335 }
b4dfdbb3 336 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 337 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
338 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
339 break;
e4472cb3 340
1da177e4 341 case CPUFREQ_POSTCHANGE:
7c30ed53
VK
342 if (WARN(!policy->transition_ongoing,
343 "No frequency transition in progress\n"))
344 return;
345
266c13d7 346 policy->transition_ongoing--;
7c30ed53 347
1da177e4 348 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 349 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 350 (unsigned long)freqs->cpu);
25e41933 351 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 352 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 353 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
354 if (likely(policy) && likely(policy->cpu == freqs->cpu))
355 policy->cur = freqs->new;
1da177e4
LT
356 break;
357 }
1da177e4 358}
bb176f7d 359
b43a7ffb
VK
360/**
361 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
362 * on frequency transition.
363 *
364 * This function calls the transition notifiers and the "adjust_jiffies"
365 * function. It is called twice on all CPU frequency changes that have
366 * external effects.
367 */
368void cpufreq_notify_transition(struct cpufreq_policy *policy,
369 struct cpufreq_freqs *freqs, unsigned int state)
370{
371 for_each_cpu(freqs->cpu, policy->cpus)
372 __cpufreq_notify_transition(policy, freqs, state);
373}
1da177e4
LT
374EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
375
376
1da177e4
LT
377/*********************************************************************
378 * SYSFS INTERFACE *
379 *********************************************************************/
380
3bcb09a3
JF
381static struct cpufreq_governor *__find_governor(const char *str_governor)
382{
383 struct cpufreq_governor *t;
384
385 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 386 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
387 return t;
388
389 return NULL;
390}
391
1da177e4
LT
392/**
393 * cpufreq_parse_governor - parse a governor string
394 */
905d77cd 395static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
396 struct cpufreq_governor **governor)
397{
3bcb09a3 398 int err = -EINVAL;
1c3d85dd
RW
399
400 if (!cpufreq_driver)
3bcb09a3
JF
401 goto out;
402
1c3d85dd 403 if (cpufreq_driver->setpolicy) {
1da177e4
LT
404 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
405 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 406 err = 0;
e08f5f5b
GS
407 } else if (!strnicmp(str_governor, "powersave",
408 CPUFREQ_NAME_LEN)) {
1da177e4 409 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 410 err = 0;
1da177e4 411 }
1c3d85dd 412 } else if (cpufreq_driver->target) {
1da177e4 413 struct cpufreq_governor *t;
3bcb09a3 414
3fc54d37 415 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
416
417 t = __find_governor(str_governor);
418
ea714970 419 if (t == NULL) {
1a8e1463 420 int ret;
ea714970 421
1a8e1463
KC
422 mutex_unlock(&cpufreq_governor_mutex);
423 ret = request_module("cpufreq_%s", str_governor);
424 mutex_lock(&cpufreq_governor_mutex);
ea714970 425
1a8e1463
KC
426 if (ret == 0)
427 t = __find_governor(str_governor);
ea714970
JF
428 }
429
3bcb09a3
JF
430 if (t != NULL) {
431 *governor = t;
432 err = 0;
1da177e4 433 }
3bcb09a3 434
3fc54d37 435 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 436 }
29464f28 437out:
3bcb09a3 438 return err;
1da177e4 439}
1da177e4 440
1da177e4 441/**
e08f5f5b
GS
442 * cpufreq_per_cpu_attr_read() / show_##file_name() -
443 * print out cpufreq information
1da177e4
LT
444 *
445 * Write out information from cpufreq_driver->policy[cpu]; object must be
446 * "unsigned int".
447 */
448
32ee8c3e
DJ
449#define show_one(file_name, object) \
450static ssize_t show_##file_name \
905d77cd 451(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 452{ \
29464f28 453 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
454}
455
456show_one(cpuinfo_min_freq, cpuinfo.min_freq);
457show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 458show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
459show_one(scaling_min_freq, min);
460show_one(scaling_max_freq, max);
461show_one(scaling_cur_freq, cur);
462
e08f5f5b
GS
463static int __cpufreq_set_policy(struct cpufreq_policy *data,
464 struct cpufreq_policy *policy);
7970e08b 465
1da177e4
LT
466/**
467 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
468 */
469#define store_one(file_name, object) \
470static ssize_t store_##file_name \
905d77cd 471(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 472{ \
f55c9c26 473 unsigned int ret; \
1da177e4
LT
474 struct cpufreq_policy new_policy; \
475 \
476 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
477 if (ret) \
478 return -EINVAL; \
479 \
29464f28 480 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
481 if (ret != 1) \
482 return -EINVAL; \
483 \
7970e08b
TR
484 ret = __cpufreq_set_policy(policy, &new_policy); \
485 policy->user_policy.object = policy->object; \
1da177e4
LT
486 \
487 return ret ? ret : count; \
488}
489
29464f28
DJ
490store_one(scaling_min_freq, min);
491store_one(scaling_max_freq, max);
1da177e4
LT
492
493/**
494 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
495 */
905d77cd
DJ
496static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
497 char *buf)
1da177e4 498{
5a01f2e8 499 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
500 if (!cur_freq)
501 return sprintf(buf, "<unknown>");
502 return sprintf(buf, "%u\n", cur_freq);
503}
504
1da177e4
LT
505/**
506 * show_scaling_governor - show the current policy for the specified CPU
507 */
905d77cd 508static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 509{
29464f28 510 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
511 return sprintf(buf, "powersave\n");
512 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
513 return sprintf(buf, "performance\n");
514 else if (policy->governor)
4b972f0b 515 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 516 policy->governor->name);
1da177e4
LT
517 return -EINVAL;
518}
519
1da177e4
LT
520/**
521 * store_scaling_governor - store policy for the specified CPU
522 */
905d77cd
DJ
523static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
524 const char *buf, size_t count)
1da177e4 525{
f55c9c26 526 unsigned int ret;
1da177e4
LT
527 char str_governor[16];
528 struct cpufreq_policy new_policy;
529
530 ret = cpufreq_get_policy(&new_policy, policy->cpu);
531 if (ret)
532 return ret;
533
29464f28 534 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
535 if (ret != 1)
536 return -EINVAL;
537
e08f5f5b
GS
538 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
539 &new_policy.governor))
1da177e4
LT
540 return -EINVAL;
541
bb176f7d
VK
542 /*
543 * Do not use cpufreq_set_policy here or the user_policy.max
544 * will be wrongly overridden
545 */
7970e08b
TR
546 ret = __cpufreq_set_policy(policy, &new_policy);
547
548 policy->user_policy.policy = policy->policy;
549 policy->user_policy.governor = policy->governor;
7970e08b 550
e08f5f5b
GS
551 if (ret)
552 return ret;
553 else
554 return count;
1da177e4
LT
555}
556
557/**
558 * show_scaling_driver - show the cpufreq driver currently loaded
559 */
905d77cd 560static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 561{
1c3d85dd 562 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
563}
564
565/**
566 * show_scaling_available_governors - show the available CPUfreq governors
567 */
905d77cd
DJ
568static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
569 char *buf)
1da177e4
LT
570{
571 ssize_t i = 0;
572 struct cpufreq_governor *t;
573
1c3d85dd 574 if (!cpufreq_driver->target) {
1da177e4
LT
575 i += sprintf(buf, "performance powersave");
576 goto out;
577 }
578
579 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
580 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
581 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 582 goto out;
4b972f0b 583 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 584 }
7d5e350f 585out:
1da177e4
LT
586 i += sprintf(&buf[i], "\n");
587 return i;
588}
e8628dd0 589
f4fd3797 590ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
591{
592 ssize_t i = 0;
593 unsigned int cpu;
594
835481d9 595 for_each_cpu(cpu, mask) {
1da177e4
LT
596 if (i)
597 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
598 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
599 if (i >= (PAGE_SIZE - 5))
29464f28 600 break;
1da177e4
LT
601 }
602 i += sprintf(&buf[i], "\n");
603 return i;
604}
f4fd3797 605EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 606
e8628dd0
DW
607/**
608 * show_related_cpus - show the CPUs affected by each transition even if
609 * hw coordination is in use
610 */
611static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
612{
f4fd3797 613 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
614}
615
616/**
617 * show_affected_cpus - show the CPUs affected by each transition
618 */
619static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
620{
f4fd3797 621 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
622}
623
9e76988e 624static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 625 const char *buf, size_t count)
9e76988e
VP
626{
627 unsigned int freq = 0;
628 unsigned int ret;
629
879000f9 630 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
631 return -EINVAL;
632
633 ret = sscanf(buf, "%u", &freq);
634 if (ret != 1)
635 return -EINVAL;
636
637 policy->governor->store_setspeed(policy, freq);
638
639 return count;
640}
641
642static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
643{
879000f9 644 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
645 return sprintf(buf, "<unsupported>\n");
646
647 return policy->governor->show_setspeed(policy, buf);
648}
1da177e4 649
e2f74f35 650/**
8bf1ac72 651 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
652 */
653static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
654{
655 unsigned int limit;
656 int ret;
1c3d85dd
RW
657 if (cpufreq_driver->bios_limit) {
658 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
659 if (!ret)
660 return sprintf(buf, "%u\n", limit);
661 }
662 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
663}
664
6dad2a29
BP
665cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
666cpufreq_freq_attr_ro(cpuinfo_min_freq);
667cpufreq_freq_attr_ro(cpuinfo_max_freq);
668cpufreq_freq_attr_ro(cpuinfo_transition_latency);
669cpufreq_freq_attr_ro(scaling_available_governors);
670cpufreq_freq_attr_ro(scaling_driver);
671cpufreq_freq_attr_ro(scaling_cur_freq);
672cpufreq_freq_attr_ro(bios_limit);
673cpufreq_freq_attr_ro(related_cpus);
674cpufreq_freq_attr_ro(affected_cpus);
675cpufreq_freq_attr_rw(scaling_min_freq);
676cpufreq_freq_attr_rw(scaling_max_freq);
677cpufreq_freq_attr_rw(scaling_governor);
678cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 679
905d77cd 680static struct attribute *default_attrs[] = {
1da177e4
LT
681 &cpuinfo_min_freq.attr,
682 &cpuinfo_max_freq.attr,
ed129784 683 &cpuinfo_transition_latency.attr,
1da177e4
LT
684 &scaling_min_freq.attr,
685 &scaling_max_freq.attr,
686 &affected_cpus.attr,
e8628dd0 687 &related_cpus.attr,
1da177e4
LT
688 &scaling_governor.attr,
689 &scaling_driver.attr,
690 &scaling_available_governors.attr,
9e76988e 691 &scaling_setspeed.attr,
1da177e4
LT
692 NULL
693};
694
29464f28
DJ
695#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
696#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 697
29464f28 698static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 699{
905d77cd
DJ
700 struct cpufreq_policy *policy = to_policy(kobj);
701 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 702 ssize_t ret = -EINVAL;
a9144436 703 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 704 if (!policy)
0db4a8a9 705 goto no_policy;
5a01f2e8
VP
706
707 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 708 goto fail;
5a01f2e8 709
e08f5f5b
GS
710 if (fattr->show)
711 ret = fattr->show(policy, buf);
712 else
713 ret = -EIO;
714
5a01f2e8 715 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 716fail:
a9144436 717 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 718no_policy:
1da177e4
LT
719 return ret;
720}
721
905d77cd
DJ
722static ssize_t store(struct kobject *kobj, struct attribute *attr,
723 const char *buf, size_t count)
1da177e4 724{
905d77cd
DJ
725 struct cpufreq_policy *policy = to_policy(kobj);
726 struct freq_attr *fattr = to_attr(attr);
a07530b4 727 ssize_t ret = -EINVAL;
a9144436 728 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 729 if (!policy)
a07530b4 730 goto no_policy;
5a01f2e8
VP
731
732 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 733 goto fail;
5a01f2e8 734
e08f5f5b
GS
735 if (fattr->store)
736 ret = fattr->store(policy, buf, count);
737 else
738 ret = -EIO;
739
5a01f2e8 740 unlock_policy_rwsem_write(policy->cpu);
a07530b4 741fail:
a9144436 742 cpufreq_cpu_put_sysfs(policy);
a07530b4 743no_policy:
1da177e4
LT
744 return ret;
745}
746
905d77cd 747static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 748{
905d77cd 749 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 750 pr_debug("last reference is dropped\n");
1da177e4
LT
751 complete(&policy->kobj_unregister);
752}
753
52cf25d0 754static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
755 .show = show,
756 .store = store,
757};
758
759static struct kobj_type ktype_cpufreq = {
760 .sysfs_ops = &sysfs_ops,
761 .default_attrs = default_attrs,
762 .release = cpufreq_sysfs_release,
763};
764
2361be23
VK
765struct kobject *cpufreq_global_kobject;
766EXPORT_SYMBOL(cpufreq_global_kobject);
767
768static int cpufreq_global_kobject_usage;
769
770int cpufreq_get_global_kobject(void)
771{
772 if (!cpufreq_global_kobject_usage++)
773 return kobject_add(cpufreq_global_kobject,
774 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
775
776 return 0;
777}
778EXPORT_SYMBOL(cpufreq_get_global_kobject);
779
780void cpufreq_put_global_kobject(void)
781{
782 if (!--cpufreq_global_kobject_usage)
783 kobject_del(cpufreq_global_kobject);
784}
785EXPORT_SYMBOL(cpufreq_put_global_kobject);
786
787int cpufreq_sysfs_create_file(const struct attribute *attr)
788{
789 int ret = cpufreq_get_global_kobject();
790
791 if (!ret) {
792 ret = sysfs_create_file(cpufreq_global_kobject, attr);
793 if (ret)
794 cpufreq_put_global_kobject();
795 }
796
797 return ret;
798}
799EXPORT_SYMBOL(cpufreq_sysfs_create_file);
800
801void cpufreq_sysfs_remove_file(const struct attribute *attr)
802{
803 sysfs_remove_file(cpufreq_global_kobject, attr);
804 cpufreq_put_global_kobject();
805}
806EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
807
19d6f7ec 808/* symlink affected CPUs */
cf3289d0
AC
809static int cpufreq_add_dev_symlink(unsigned int cpu,
810 struct cpufreq_policy *policy)
19d6f7ec
DJ
811{
812 unsigned int j;
813 int ret = 0;
814
815 for_each_cpu(j, policy->cpus) {
816 struct cpufreq_policy *managed_policy;
8a25a2fd 817 struct device *cpu_dev;
19d6f7ec
DJ
818
819 if (j == cpu)
820 continue;
19d6f7ec 821
2d06d8c4 822 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 823 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
824 cpu_dev = get_cpu_device(j);
825 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
826 "cpufreq");
827 if (ret) {
828 cpufreq_cpu_put(managed_policy);
829 return ret;
830 }
831 }
832 return ret;
833}
834
cf3289d0
AC
835static int cpufreq_add_dev_interface(unsigned int cpu,
836 struct cpufreq_policy *policy,
8a25a2fd 837 struct device *dev)
909a694e
DJ
838{
839 struct freq_attr **drv_attr;
909a694e 840 int ret = 0;
909a694e
DJ
841
842 /* prepare interface data */
843 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 844 &dev->kobj, "cpufreq");
909a694e
DJ
845 if (ret)
846 return ret;
847
848 /* set up files for this cpu device */
1c3d85dd 849 drv_attr = cpufreq_driver->attr;
909a694e
DJ
850 while ((drv_attr) && (*drv_attr)) {
851 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
852 if (ret)
1c3d85dd 853 goto err_out_kobj_put;
909a694e
DJ
854 drv_attr++;
855 }
1c3d85dd 856 if (cpufreq_driver->get) {
909a694e
DJ
857 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
858 if (ret)
1c3d85dd 859 goto err_out_kobj_put;
909a694e 860 }
1c3d85dd 861 if (cpufreq_driver->target) {
909a694e
DJ
862 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
863 if (ret)
1c3d85dd 864 goto err_out_kobj_put;
909a694e 865 }
1c3d85dd 866 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
867 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
868 if (ret)
1c3d85dd 869 goto err_out_kobj_put;
e2f74f35 870 }
909a694e 871
909a694e 872 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
873 if (ret)
874 goto err_out_kobj_put;
875
e18f1682
SB
876 return ret;
877
878err_out_kobj_put:
879 kobject_put(&policy->kobj);
880 wait_for_completion(&policy->kobj_unregister);
881 return ret;
882}
883
884static void cpufreq_init_policy(struct cpufreq_policy *policy)
885{
886 struct cpufreq_policy new_policy;
887 int ret = 0;
888
ecf7e461
DJ
889 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
890 /* assure that the starting sequence is run in __cpufreq_set_policy */
891 policy->governor = NULL;
892
893 /* set default policy */
894 ret = __cpufreq_set_policy(policy, &new_policy);
895 policy->user_policy.policy = policy->policy;
896 policy->user_policy.governor = policy->governor;
897
898 if (ret) {
2d06d8c4 899 pr_debug("setting policy failed\n");
1c3d85dd
RW
900 if (cpufreq_driver->exit)
901 cpufreq_driver->exit(policy);
ecf7e461 902 }
909a694e
DJ
903}
904
fcf80582
VK
905#ifdef CONFIG_HOTPLUG_CPU
906static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
a82fab29 907 struct device *dev, bool frozen)
fcf80582
VK
908{
909 struct cpufreq_policy *policy;
1c3d85dd 910 int ret = 0, has_target = !!cpufreq_driver->target;
fcf80582
VK
911 unsigned long flags;
912
913 policy = cpufreq_cpu_get(sibling);
914 WARN_ON(!policy);
915
820c6ca2
VK
916 if (has_target)
917 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
fcf80582 918
2eaa3e2d
VK
919 lock_policy_rwsem_write(sibling);
920
0d1857a1 921 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 922
fcf80582 923 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 924 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 925 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 926 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 927
2eaa3e2d
VK
928 unlock_policy_rwsem_write(sibling);
929
820c6ca2
VK
930 if (has_target) {
931 __cpufreq_governor(policy, CPUFREQ_GOV_START);
932 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
933 }
fcf80582 934
a82fab29
SB
935 /* Don't touch sysfs links during light-weight init */
936 if (frozen) {
937 /* Drop the extra refcount that we took above */
fcf80582 938 cpufreq_cpu_put(policy);
a82fab29 939 return 0;
fcf80582
VK
940 }
941
a82fab29
SB
942 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
943 if (ret)
944 cpufreq_cpu_put(policy);
945
946 return ret;
fcf80582
VK
947}
948#endif
1da177e4 949
8414809c
SB
950static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
951{
952 struct cpufreq_policy *policy;
953 unsigned long flags;
954
955 write_lock_irqsave(&cpufreq_driver_lock, flags);
956
957 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
958
959 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
960
961 return policy;
962}
963
e9698cc5
SB
964static struct cpufreq_policy *cpufreq_policy_alloc(void)
965{
966 struct cpufreq_policy *policy;
967
968 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
969 if (!policy)
970 return NULL;
971
972 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
973 goto err_free_policy;
974
975 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
976 goto err_free_cpumask;
977
978 return policy;
979
980err_free_cpumask:
981 free_cpumask_var(policy->cpus);
982err_free_policy:
983 kfree(policy);
984
985 return NULL;
986}
987
988static void cpufreq_policy_free(struct cpufreq_policy *policy)
989{
990 free_cpumask_var(policy->related_cpus);
991 free_cpumask_var(policy->cpus);
992 kfree(policy);
993}
994
a82fab29
SB
995static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
996 bool frozen)
1da177e4 997{
fcf80582 998 unsigned int j, cpu = dev->id;
65922465 999 int ret = -ENOMEM;
1da177e4 1000 struct cpufreq_policy *policy;
1da177e4 1001 unsigned long flags;
90e41bac 1002#ifdef CONFIG_HOTPLUG_CPU
fcf80582 1003 struct cpufreq_governor *gov;
90e41bac
PB
1004 int sibling;
1005#endif
1da177e4 1006
c32b6b8e
AR
1007 if (cpu_is_offline(cpu))
1008 return 0;
1009
2d06d8c4 1010 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
1011
1012#ifdef CONFIG_SMP
1013 /* check whether a different CPU already registered this
1014 * CPU because it is in the same boat. */
1015 policy = cpufreq_cpu_get(cpu);
1016 if (unlikely(policy)) {
8ff69732 1017 cpufreq_cpu_put(policy);
1da177e4
LT
1018 return 0;
1019 }
fcf80582
VK
1020
1021#ifdef CONFIG_HOTPLUG_CPU
1022 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 1023 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
1024 for_each_online_cpu(sibling) {
1025 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 1026 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 1027 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
a82fab29
SB
1028 return cpufreq_add_policy_cpu(cpu, sibling, dev,
1029 frozen);
2eaa3e2d 1030 }
fcf80582 1031 }
0d1857a1 1032 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 1033#endif
1da177e4
LT
1034#endif
1035
1c3d85dd 1036 if (!try_module_get(cpufreq_driver->owner)) {
1da177e4
LT
1037 ret = -EINVAL;
1038 goto module_out;
1039 }
1040
8414809c
SB
1041 if (frozen)
1042 /* Restore the saved policy when doing light-weight init */
1043 policy = cpufreq_policy_restore(cpu);
1044 else
1045 policy = cpufreq_policy_alloc();
1046
059019a3 1047 if (!policy)
1da177e4 1048 goto nomem_out;
059019a3 1049
1da177e4 1050 policy->cpu = cpu;
65922465 1051 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 1052 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1053
5a01f2e8 1054 /* Initially set CPU itself as the policy_cpu */
f1625066 1055 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 1056
1da177e4 1057 init_completion(&policy->kobj_unregister);
65f27f38 1058 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
1059
1060 /* call driver. From then on the cpufreq must be able
1061 * to accept all calls to ->verify and ->setpolicy for this CPU
1062 */
1c3d85dd 1063 ret = cpufreq_driver->init(policy);
1da177e4 1064 if (ret) {
2d06d8c4 1065 pr_debug("initialization failed\n");
2eaa3e2d 1066 goto err_set_policy_cpu;
1da177e4 1067 }
643ae6e8 1068
fcf80582
VK
1069 /* related cpus should atleast have policy->cpus */
1070 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1071
643ae6e8
VK
1072 /*
1073 * affected cpus must always be the one, which are online. We aren't
1074 * managing offline cpus here.
1075 */
1076 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1077
187d9f4e
MC
1078 policy->user_policy.min = policy->min;
1079 policy->user_policy.max = policy->max;
1da177e4 1080
a1531acd
TR
1081 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1082 CPUFREQ_START, policy);
1083
fcf80582
VK
1084#ifdef CONFIG_HOTPLUG_CPU
1085 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1086 if (gov) {
1087 policy->governor = gov;
1088 pr_debug("Restoring governor %s for cpu %d\n",
1089 policy->governor->name, cpu);
4bfa042c 1090 }
fcf80582 1091#endif
1da177e4 1092
e18f1682
SB
1093 write_lock_irqsave(&cpufreq_driver_lock, flags);
1094 for_each_cpu(j, policy->cpus) {
1095 per_cpu(cpufreq_cpu_data, j) = policy;
1096 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
1097 }
1098 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1099
a82fab29
SB
1100 if (!frozen) {
1101 ret = cpufreq_add_dev_interface(cpu, policy, dev);
1102 if (ret)
1103 goto err_out_unregister;
1104 }
8ff69732 1105
e18f1682
SB
1106 cpufreq_init_policy(policy);
1107
038c5b3e 1108 kobject_uevent(&policy->kobj, KOBJ_ADD);
1c3d85dd 1109 module_put(cpufreq_driver->owner);
2d06d8c4 1110 pr_debug("initialization complete\n");
87c32271 1111
1da177e4
LT
1112 return 0;
1113
1da177e4 1114err_out_unregister:
0d1857a1 1115 write_lock_irqsave(&cpufreq_driver_lock, flags);
e18f1682 1116 for_each_cpu(j, policy->cpus) {
7a6aedfa 1117 per_cpu(cpufreq_cpu_data, j) = NULL;
e18f1682
SB
1118 if (j != cpu)
1119 per_cpu(cpufreq_policy_cpu, j) = -1;
1120 }
0d1857a1 1121 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1122
c10997f6 1123 kobject_put(&policy->kobj);
1da177e4
LT
1124 wait_for_completion(&policy->kobj_unregister);
1125
2eaa3e2d
VK
1126err_set_policy_cpu:
1127 per_cpu(cpufreq_policy_cpu, cpu) = -1;
e9698cc5 1128 cpufreq_policy_free(policy);
1da177e4 1129nomem_out:
1c3d85dd 1130 module_put(cpufreq_driver->owner);
c32b6b8e 1131module_out:
1da177e4
LT
1132 return ret;
1133}
1134
a82fab29
SB
1135/**
1136 * cpufreq_add_dev - add a CPU device
1137 *
1138 * Adds the cpufreq interface for a CPU device.
1139 *
1140 * The Oracle says: try running cpufreq registration/unregistration concurrently
1141 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1142 * mess up, but more thorough testing is needed. - Mathieu
1143 */
1144static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1145{
1146 return __cpufreq_add_dev(dev, sif, false);
1147}
1148
b8eed8af
VK
1149static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1150{
1151 int j;
1152
1153 policy->last_cpu = policy->cpu;
1154 policy->cpu = cpu;
1155
3361b7b1 1156 for_each_cpu(j, policy->cpus)
b8eed8af 1157 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
1158
1159#ifdef CONFIG_CPU_FREQ_TABLE
1160 cpufreq_frequency_table_update_policy_cpu(policy);
1161#endif
1162 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1163 CPUFREQ_UPDATE_POLICY_CPU, policy);
1164}
1da177e4 1165
f9ba680d 1166static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *data,
a82fab29 1167 unsigned int old_cpu, bool frozen)
f9ba680d
SB
1168{
1169 struct device *cpu_dev;
1170 unsigned long flags;
1171 int ret;
1172
1173 /* first sibling now owns the new sysfs dir */
1174 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
a82fab29
SB
1175
1176 /* Don't touch sysfs files during light-weight tear-down */
1177 if (frozen)
1178 return cpu_dev->id;
1179
f9ba680d
SB
1180 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1181 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1182 if (ret) {
1183 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1184
1185 WARN_ON(lock_policy_rwsem_write(old_cpu));
1186 cpumask_set_cpu(old_cpu, data->cpus);
1187
1188 write_lock_irqsave(&cpufreq_driver_lock, flags);
1189 per_cpu(cpufreq_cpu_data, old_cpu) = data;
1190 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1191
1192 unlock_policy_rwsem_write(old_cpu);
1193
1194 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1195 "cpufreq");
1196
1197 return -EINVAL;
1198 }
1199
1200 return cpu_dev->id;
1201}
1202
1da177e4 1203/**
5a01f2e8 1204 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1205 *
1206 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1207 * Caller should already have policy_rwsem in write mode for this CPU.
1208 * This routine frees the rwsem before returning.
1da177e4 1209 */
bb176f7d 1210static int __cpufreq_remove_dev(struct device *dev,
a82fab29 1211 struct subsys_interface *sif, bool frozen)
1da177e4 1212{
f9ba680d
SB
1213 unsigned int cpu = dev->id, cpus;
1214 int new_cpu;
1da177e4
LT
1215 unsigned long flags;
1216 struct cpufreq_policy *data;
499bca9b
AW
1217 struct kobject *kobj;
1218 struct completion *cmp;
1da177e4 1219
b8eed8af 1220 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1221
0d1857a1 1222 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1223
7a6aedfa 1224 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1225 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1226
8414809c
SB
1227 /* Save the policy somewhere when doing a light-weight tear-down */
1228 if (frozen)
1229 per_cpu(cpufreq_cpu_data_fallback, cpu) = data;
1230
0d1857a1 1231 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1232
1233 if (!data) {
b8eed8af 1234 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1235 return -EINVAL;
1236 }
1da177e4 1237
1c3d85dd 1238 if (cpufreq_driver->target)
f6a7409c 1239 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1da177e4 1240
084f3493 1241#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1242 if (!cpufreq_driver->setpolicy)
fa69e33f
DB
1243 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1244 data->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1245#endif
1246
2eaa3e2d 1247 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1248 cpus = cpumask_weight(data->cpus);
e4969eba
VK
1249
1250 if (cpus > 1)
1251 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1252 unlock_policy_rwsem_write(cpu);
084f3493 1253
a82fab29 1254 if (cpu != data->cpu && !frozen) {
73bf0fc2
VK
1255 sysfs_remove_link(&dev->kobj, "cpufreq");
1256 } else if (cpus > 1) {
084f3493 1257
a82fab29 1258 new_cpu = cpufreq_nominate_new_policy_cpu(data, cpu, frozen);
f9ba680d 1259 if (new_cpu >= 0) {
2eaa3e2d 1260 WARN_ON(lock_policy_rwsem_write(cpu));
f9ba680d 1261 update_policy_cpu(data, new_cpu);
499bca9b 1262 unlock_policy_rwsem_write(cpu);
a82fab29
SB
1263
1264 if (!frozen) {
1265 pr_debug("%s: policy Kobject moved to cpu: %d "
1266 "from: %d\n",__func__, new_cpu, cpu);
1267 }
1da177e4
LT
1268 }
1269 }
1da177e4 1270
b8eed8af
VK
1271 /* If cpu is last user of policy, free policy */
1272 if (cpus == 1) {
2a998599
RW
1273 if (cpufreq_driver->target)
1274 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1275
8414809c
SB
1276 if (!frozen) {
1277 lock_policy_rwsem_read(cpu);
1278 kobj = &data->kobj;
1279 cmp = &data->kobj_unregister;
1280 unlock_policy_rwsem_read(cpu);
1281 kobject_put(kobj);
1282
1283 /*
1284 * We need to make sure that the underlying kobj is
1285 * actually not referenced anymore by anybody before we
1286 * proceed with unloading.
1287 */
1288 pr_debug("waiting for dropping of refcount\n");
1289 wait_for_completion(cmp);
1290 pr_debug("wait complete\n");
1291 }
7d26e2d5 1292
8414809c
SB
1293 /*
1294 * Perform the ->exit() even during light-weight tear-down,
1295 * since this is a core component, and is essential for the
1296 * subsequent light-weight ->init() to succeed.
b8eed8af 1297 */
1c3d85dd
RW
1298 if (cpufreq_driver->exit)
1299 cpufreq_driver->exit(data);
27ecddc2 1300
8414809c
SB
1301 if (!frozen)
1302 cpufreq_policy_free(data);
2a998599 1303 } else {
8414809c
SB
1304
1305 if (!frozen) {
1306 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1307 cpufreq_cpu_put(data);
1308 }
1309
2a998599
RW
1310 if (cpufreq_driver->target) {
1311 __cpufreq_governor(data, CPUFREQ_GOV_START);
1312 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1313 }
27ecddc2 1314 }
1da177e4 1315
2eaa3e2d 1316 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1317 return 0;
1318}
1319
8a25a2fd 1320static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1321{
8a25a2fd 1322 unsigned int cpu = dev->id;
5a01f2e8 1323 int retval;
ec28297a
VP
1324
1325 if (cpu_is_offline(cpu))
1326 return 0;
1327
a82fab29 1328 retval = __cpufreq_remove_dev(dev, sif, false);
5a01f2e8
VP
1329 return retval;
1330}
1331
65f27f38 1332static void handle_update(struct work_struct *work)
1da177e4 1333{
65f27f38
DH
1334 struct cpufreq_policy *policy =
1335 container_of(work, struct cpufreq_policy, update);
1336 unsigned int cpu = policy->cpu;
2d06d8c4 1337 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1338 cpufreq_update_policy(cpu);
1339}
1340
1341/**
bb176f7d
VK
1342 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1343 * in deep trouble.
1da177e4
LT
1344 * @cpu: cpu number
1345 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1346 * @new_freq: CPU frequency the CPU actually runs at
1347 *
29464f28
DJ
1348 * We adjust to current frequency first, and need to clean up later.
1349 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1350 */
e08f5f5b
GS
1351static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1352 unsigned int new_freq)
1da177e4 1353{
b43a7ffb 1354 struct cpufreq_policy *policy;
1da177e4 1355 struct cpufreq_freqs freqs;
b43a7ffb
VK
1356 unsigned long flags;
1357
2d06d8c4 1358 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1359 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1360
1da177e4
LT
1361 freqs.old = old_freq;
1362 freqs.new = new_freq;
b43a7ffb
VK
1363
1364 read_lock_irqsave(&cpufreq_driver_lock, flags);
1365 policy = per_cpu(cpufreq_cpu_data, cpu);
1366 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1367
1368 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1369 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1370}
1371
32ee8c3e 1372/**
4ab70df4 1373 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1374 * @cpu: CPU number
1375 *
1376 * This is the last known freq, without actually getting it from the driver.
1377 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1378 */
1379unsigned int cpufreq_quick_get(unsigned int cpu)
1380{
9e21ba8b 1381 struct cpufreq_policy *policy;
e08f5f5b 1382 unsigned int ret_freq = 0;
95235ca2 1383
1c3d85dd
RW
1384 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1385 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1386
1387 policy = cpufreq_cpu_get(cpu);
95235ca2 1388 if (policy) {
e08f5f5b 1389 ret_freq = policy->cur;
95235ca2
VP
1390 cpufreq_cpu_put(policy);
1391 }
1392
4d34a67d 1393 return ret_freq;
95235ca2
VP
1394}
1395EXPORT_SYMBOL(cpufreq_quick_get);
1396
3d737108
JB
1397/**
1398 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1399 * @cpu: CPU number
1400 *
1401 * Just return the max possible frequency for a given CPU.
1402 */
1403unsigned int cpufreq_quick_get_max(unsigned int cpu)
1404{
1405 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1406 unsigned int ret_freq = 0;
1407
1408 if (policy) {
1409 ret_freq = policy->max;
1410 cpufreq_cpu_put(policy);
1411 }
1412
1413 return ret_freq;
1414}
1415EXPORT_SYMBOL(cpufreq_quick_get_max);
1416
5a01f2e8 1417static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1418{
7a6aedfa 1419 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1420 unsigned int ret_freq = 0;
5800043b 1421
1c3d85dd 1422 if (!cpufreq_driver->get)
4d34a67d 1423 return ret_freq;
1da177e4 1424
1c3d85dd 1425 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1426
e08f5f5b 1427 if (ret_freq && policy->cur &&
1c3d85dd 1428 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1429 /* verify no discrepancy between actual and
1430 saved value exists */
1431 if (unlikely(ret_freq != policy->cur)) {
1432 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1433 schedule_work(&policy->update);
1434 }
1435 }
1436
4d34a67d 1437 return ret_freq;
5a01f2e8 1438}
1da177e4 1439
5a01f2e8
VP
1440/**
1441 * cpufreq_get - get the current CPU frequency (in kHz)
1442 * @cpu: CPU number
1443 *
1444 * Get the CPU current (static) CPU frequency
1445 */
1446unsigned int cpufreq_get(unsigned int cpu)
1447{
1448 unsigned int ret_freq = 0;
1449 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1450
1451 if (!policy)
1452 goto out;
1453
1454 if (unlikely(lock_policy_rwsem_read(cpu)))
1455 goto out_policy;
1456
1457 ret_freq = __cpufreq_get(cpu);
1458
1459 unlock_policy_rwsem_read(cpu);
1da177e4 1460
5a01f2e8
VP
1461out_policy:
1462 cpufreq_cpu_put(policy);
1463out:
4d34a67d 1464 return ret_freq;
1da177e4
LT
1465}
1466EXPORT_SYMBOL(cpufreq_get);
1467
8a25a2fd
KS
1468static struct subsys_interface cpufreq_interface = {
1469 .name = "cpufreq",
1470 .subsys = &cpu_subsys,
1471 .add_dev = cpufreq_add_dev,
1472 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1473};
1474
42d4dc3f 1475/**
e00e56df
RW
1476 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1477 *
1478 * This function is only executed for the boot processor. The other CPUs
1479 * have been put offline by means of CPU hotplug.
42d4dc3f 1480 */
e00e56df 1481static int cpufreq_bp_suspend(void)
42d4dc3f 1482{
e08f5f5b 1483 int ret = 0;
4bc5d341 1484
e00e56df 1485 int cpu = smp_processor_id();
42d4dc3f
BH
1486 struct cpufreq_policy *cpu_policy;
1487
2d06d8c4 1488 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1489
e00e56df 1490 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1491 cpu_policy = cpufreq_cpu_get(cpu);
1492 if (!cpu_policy)
e00e56df 1493 return 0;
42d4dc3f 1494
1c3d85dd
RW
1495 if (cpufreq_driver->suspend) {
1496 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1497 if (ret)
42d4dc3f
BH
1498 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1499 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1500 }
1501
42d4dc3f 1502 cpufreq_cpu_put(cpu_policy);
c9060494 1503 return ret;
42d4dc3f
BH
1504}
1505
1da177e4 1506/**
e00e56df 1507 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1508 *
1509 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1510 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1511 * restored. It will verify that the current freq is in sync with
1512 * what we believe it to be. This is a bit later than when it
1513 * should be, but nonethteless it's better than calling
1514 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1515 *
1516 * This function is only executed for the boot CPU. The other CPUs have not
1517 * been turned on yet.
1da177e4 1518 */
e00e56df 1519static void cpufreq_bp_resume(void)
1da177e4 1520{
e08f5f5b 1521 int ret = 0;
4bc5d341 1522
e00e56df 1523 int cpu = smp_processor_id();
1da177e4
LT
1524 struct cpufreq_policy *cpu_policy;
1525
2d06d8c4 1526 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1527
e00e56df 1528 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1529 cpu_policy = cpufreq_cpu_get(cpu);
1530 if (!cpu_policy)
e00e56df 1531 return;
1da177e4 1532
1c3d85dd
RW
1533 if (cpufreq_driver->resume) {
1534 ret = cpufreq_driver->resume(cpu_policy);
1da177e4
LT
1535 if (ret) {
1536 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1537 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1538 goto fail;
1da177e4
LT
1539 }
1540 }
1541
1da177e4 1542 schedule_work(&cpu_policy->update);
ce6c3997 1543
c9060494 1544fail:
1da177e4 1545 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1546}
1547
e00e56df
RW
1548static struct syscore_ops cpufreq_syscore_ops = {
1549 .suspend = cpufreq_bp_suspend,
1550 .resume = cpufreq_bp_resume,
1da177e4
LT
1551};
1552
9d95046e
BP
1553/**
1554 * cpufreq_get_current_driver - return current driver's name
1555 *
1556 * Return the name string of the currently loaded cpufreq driver
1557 * or NULL, if none.
1558 */
1559const char *cpufreq_get_current_driver(void)
1560{
1c3d85dd
RW
1561 if (cpufreq_driver)
1562 return cpufreq_driver->name;
1563
1564 return NULL;
9d95046e
BP
1565}
1566EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1567
1568/*********************************************************************
1569 * NOTIFIER LISTS INTERFACE *
1570 *********************************************************************/
1571
1572/**
1573 * cpufreq_register_notifier - register a driver with cpufreq
1574 * @nb: notifier function to register
1575 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1576 *
32ee8c3e 1577 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1578 * are notified about clock rate changes (once before and once after
1579 * the transition), or a list of drivers that are notified about
1580 * changes in cpufreq policy.
1581 *
1582 * This function may sleep, and has the same return conditions as
e041c683 1583 * blocking_notifier_chain_register.
1da177e4
LT
1584 */
1585int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1586{
1587 int ret;
1588
d5aaffa9
DB
1589 if (cpufreq_disabled())
1590 return -EINVAL;
1591
74212ca4
CEB
1592 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1593
1da177e4
LT
1594 switch (list) {
1595 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1596 ret = srcu_notifier_chain_register(
e041c683 1597 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1598 break;
1599 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1600 ret = blocking_notifier_chain_register(
1601 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1602 break;
1603 default:
1604 ret = -EINVAL;
1605 }
1da177e4
LT
1606
1607 return ret;
1608}
1609EXPORT_SYMBOL(cpufreq_register_notifier);
1610
1da177e4
LT
1611/**
1612 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1613 * @nb: notifier block to be unregistered
bb176f7d 1614 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1615 *
1616 * Remove a driver from the CPU frequency notifier list.
1617 *
1618 * This function may sleep, and has the same return conditions as
e041c683 1619 * blocking_notifier_chain_unregister.
1da177e4
LT
1620 */
1621int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1622{
1623 int ret;
1624
d5aaffa9
DB
1625 if (cpufreq_disabled())
1626 return -EINVAL;
1627
1da177e4
LT
1628 switch (list) {
1629 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1630 ret = srcu_notifier_chain_unregister(
e041c683 1631 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1632 break;
1633 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1634 ret = blocking_notifier_chain_unregister(
1635 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1636 break;
1637 default:
1638 ret = -EINVAL;
1639 }
1da177e4
LT
1640
1641 return ret;
1642}
1643EXPORT_SYMBOL(cpufreq_unregister_notifier);
1644
1645
1646/*********************************************************************
1647 * GOVERNORS *
1648 *********************************************************************/
1649
1da177e4
LT
1650int __cpufreq_driver_target(struct cpufreq_policy *policy,
1651 unsigned int target_freq,
1652 unsigned int relation)
1653{
1654 int retval = -EINVAL;
7249924e 1655 unsigned int old_target_freq = target_freq;
c32b6b8e 1656
a7b422cd
KRW
1657 if (cpufreq_disabled())
1658 return -ENODEV;
7c30ed53
VK
1659 if (policy->transition_ongoing)
1660 return -EBUSY;
a7b422cd 1661
7249924e
VK
1662 /* Make sure that target_freq is within supported range */
1663 if (target_freq > policy->max)
1664 target_freq = policy->max;
1665 if (target_freq < policy->min)
1666 target_freq = policy->min;
1667
1668 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1669 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1670
1671 if (target_freq == policy->cur)
1672 return 0;
1673
1c3d85dd
RW
1674 if (cpufreq_driver->target)
1675 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1676
1da177e4
LT
1677 return retval;
1678}
1679EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1680
1da177e4
LT
1681int cpufreq_driver_target(struct cpufreq_policy *policy,
1682 unsigned int target_freq,
1683 unsigned int relation)
1684{
f1829e4a 1685 int ret = -EINVAL;
1da177e4 1686
5a01f2e8 1687 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1688 goto fail;
1da177e4
LT
1689
1690 ret = __cpufreq_driver_target(policy, target_freq, relation);
1691
5a01f2e8 1692 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1693
f1829e4a 1694fail:
1da177e4
LT
1695 return ret;
1696}
1697EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1698
bf0b90e3 1699int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62 1700{
d5aaffa9 1701 if (cpufreq_disabled())
a262e94c 1702 return 0;
d5aaffa9 1703
1c3d85dd 1704 if (!cpufreq_driver->getavg)
0676f7f2
VK
1705 return 0;
1706
a262e94c 1707 return cpufreq_driver->getavg(policy, cpu);
dfde5d62 1708}
5a01f2e8 1709EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1710
153d7f3f 1711/*
153d7f3f
AV
1712 * when "event" is CPUFREQ_GOV_LIMITS
1713 */
1da177e4 1714
e08f5f5b
GS
1715static int __cpufreq_governor(struct cpufreq_policy *policy,
1716 unsigned int event)
1da177e4 1717{
cc993cab 1718 int ret;
6afde10c
TR
1719
1720 /* Only must be defined when default governor is known to have latency
1721 restrictions, like e.g. conservative or ondemand.
1722 That this is the case is already ensured in Kconfig
1723 */
1724#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1725 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1726#else
1727 struct cpufreq_governor *gov = NULL;
1728#endif
1c256245
TR
1729
1730 if (policy->governor->max_transition_latency &&
1731 policy->cpuinfo.transition_latency >
1732 policy->governor->max_transition_latency) {
6afde10c
TR
1733 if (!gov)
1734 return -EINVAL;
1735 else {
1736 printk(KERN_WARNING "%s governor failed, too long"
1737 " transition latency of HW, fallback"
1738 " to %s governor\n",
1739 policy->governor->name,
1740 gov->name);
1741 policy->governor = gov;
1742 }
1c256245 1743 }
1da177e4
LT
1744
1745 if (!try_module_get(policy->governor->owner))
1746 return -EINVAL;
1747
2d06d8c4 1748 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1749 policy->cpu, event);
95731ebb
XC
1750
1751 mutex_lock(&cpufreq_governor_lock);
1752 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1753 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1754 mutex_unlock(&cpufreq_governor_lock);
1755 return -EBUSY;
1756 }
1757
1758 if (event == CPUFREQ_GOV_STOP)
1759 policy->governor_enabled = false;
1760 else if (event == CPUFREQ_GOV_START)
1761 policy->governor_enabled = true;
1762
1763 mutex_unlock(&cpufreq_governor_lock);
1764
1da177e4
LT
1765 ret = policy->governor->governor(policy, event);
1766
4d5dcc42
VK
1767 if (!ret) {
1768 if (event == CPUFREQ_GOV_POLICY_INIT)
1769 policy->governor->initialized++;
1770 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1771 policy->governor->initialized--;
95731ebb
XC
1772 } else {
1773 /* Restore original values */
1774 mutex_lock(&cpufreq_governor_lock);
1775 if (event == CPUFREQ_GOV_STOP)
1776 policy->governor_enabled = true;
1777 else if (event == CPUFREQ_GOV_START)
1778 policy->governor_enabled = false;
1779 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1780 }
b394058f 1781
e08f5f5b
GS
1782 /* we keep one module reference alive for
1783 each CPU governed by this CPU */
1da177e4
LT
1784 if ((event != CPUFREQ_GOV_START) || ret)
1785 module_put(policy->governor->owner);
1786 if ((event == CPUFREQ_GOV_STOP) && !ret)
1787 module_put(policy->governor->owner);
1788
1789 return ret;
1790}
1791
1da177e4
LT
1792int cpufreq_register_governor(struct cpufreq_governor *governor)
1793{
3bcb09a3 1794 int err;
1da177e4
LT
1795
1796 if (!governor)
1797 return -EINVAL;
1798
a7b422cd
KRW
1799 if (cpufreq_disabled())
1800 return -ENODEV;
1801
3fc54d37 1802 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1803
b394058f 1804 governor->initialized = 0;
3bcb09a3
JF
1805 err = -EBUSY;
1806 if (__find_governor(governor->name) == NULL) {
1807 err = 0;
1808 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1809 }
1da177e4 1810
32ee8c3e 1811 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1812 return err;
1da177e4
LT
1813}
1814EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1815
1da177e4
LT
1816void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1817{
90e41bac
PB
1818#ifdef CONFIG_HOTPLUG_CPU
1819 int cpu;
1820#endif
1821
1da177e4
LT
1822 if (!governor)
1823 return;
1824
a7b422cd
KRW
1825 if (cpufreq_disabled())
1826 return;
1827
90e41bac
PB
1828#ifdef CONFIG_HOTPLUG_CPU
1829 for_each_present_cpu(cpu) {
1830 if (cpu_online(cpu))
1831 continue;
1832 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1833 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1834 }
1835#endif
1836
3fc54d37 1837 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1838 list_del(&governor->governor_list);
3fc54d37 1839 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1840 return;
1841}
1842EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1843
1844
1da177e4
LT
1845/*********************************************************************
1846 * POLICY INTERFACE *
1847 *********************************************************************/
1848
1849/**
1850 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1851 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1852 * is written
1da177e4
LT
1853 *
1854 * Reads the current cpufreq policy.
1855 */
1856int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1857{
1858 struct cpufreq_policy *cpu_policy;
1859 if (!policy)
1860 return -EINVAL;
1861
1862 cpu_policy = cpufreq_cpu_get(cpu);
1863 if (!cpu_policy)
1864 return -EINVAL;
1865
1da177e4 1866 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1867
1868 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1869 return 0;
1870}
1871EXPORT_SYMBOL(cpufreq_get_policy);
1872
153d7f3f 1873/*
e08f5f5b
GS
1874 * data : current policy.
1875 * policy : policy to be set.
153d7f3f 1876 */
e08f5f5b
GS
1877static int __cpufreq_set_policy(struct cpufreq_policy *data,
1878 struct cpufreq_policy *policy)
1da177e4 1879{
7bd353a9 1880 int ret = 0, failed = 1;
1da177e4 1881
2d06d8c4 1882 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1883 policy->min, policy->max);
1884
e08f5f5b
GS
1885 memcpy(&policy->cpuinfo, &data->cpuinfo,
1886 sizeof(struct cpufreq_cpuinfo));
1da177e4 1887
53391fa2 1888 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1889 ret = -EINVAL;
1890 goto error_out;
1891 }
1892
1da177e4 1893 /* verify the cpu speed can be set within this limit */
1c3d85dd 1894 ret = cpufreq_driver->verify(policy);
1da177e4
LT
1895 if (ret)
1896 goto error_out;
1897
1da177e4 1898 /* adjust if necessary - all reasons */
e041c683
AS
1899 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1900 CPUFREQ_ADJUST, policy);
1da177e4
LT
1901
1902 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1903 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1904 CPUFREQ_INCOMPATIBLE, policy);
1da177e4 1905
bb176f7d
VK
1906 /*
1907 * verify the cpu speed can be set within this limit, which might be
1908 * different to the first one
1909 */
1c3d85dd 1910 ret = cpufreq_driver->verify(policy);
e041c683 1911 if (ret)
1da177e4 1912 goto error_out;
1da177e4
LT
1913
1914 /* notification of the new policy */
e041c683
AS
1915 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1916 CPUFREQ_NOTIFY, policy);
1da177e4 1917
7d5e350f
DJ
1918 data->min = policy->min;
1919 data->max = policy->max;
1da177e4 1920
2d06d8c4 1921 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1922 data->min, data->max);
1da177e4 1923
1c3d85dd 1924 if (cpufreq_driver->setpolicy) {
1da177e4 1925 data->policy = policy->policy;
2d06d8c4 1926 pr_debug("setting range\n");
1c3d85dd 1927 ret = cpufreq_driver->setpolicy(policy);
1da177e4
LT
1928 } else {
1929 if (policy->governor != data->governor) {
1930 /* save old, working values */
1931 struct cpufreq_governor *old_gov = data->governor;
1932
2d06d8c4 1933 pr_debug("governor switch\n");
1da177e4
LT
1934
1935 /* end old governor */
7bd353a9 1936 if (data->governor) {
1da177e4 1937 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
955ef483 1938 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1939 __cpufreq_governor(data,
1940 CPUFREQ_GOV_POLICY_EXIT);
955ef483 1941 lock_policy_rwsem_write(policy->cpu);
7bd353a9 1942 }
1da177e4
LT
1943
1944 /* start new governor */
1945 data->governor = policy->governor;
7bd353a9 1946 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
955ef483 1947 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
7bd353a9 1948 failed = 0;
955ef483
VK
1949 } else {
1950 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1951 __cpufreq_governor(data,
1952 CPUFREQ_GOV_POLICY_EXIT);
955ef483
VK
1953 lock_policy_rwsem_write(policy->cpu);
1954 }
7bd353a9
VK
1955 }
1956
1957 if (failed) {
1da177e4 1958 /* new governor failed, so re-start old one */
2d06d8c4 1959 pr_debug("starting governor %s failed\n",
e08f5f5b 1960 data->governor->name);
1da177e4
LT
1961 if (old_gov) {
1962 data->governor = old_gov;
7bd353a9
VK
1963 __cpufreq_governor(data,
1964 CPUFREQ_GOV_POLICY_INIT);
e08f5f5b
GS
1965 __cpufreq_governor(data,
1966 CPUFREQ_GOV_START);
1da177e4
LT
1967 }
1968 ret = -EINVAL;
1969 goto error_out;
1970 }
1971 /* might be a policy change, too, so fall through */
1972 }
2d06d8c4 1973 pr_debug("governor: change or update limits\n");
1da177e4
LT
1974 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1975 }
1976
7d5e350f 1977error_out:
1da177e4
LT
1978 return ret;
1979}
1980
1da177e4
LT
1981/**
1982 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1983 * @cpu: CPU which shall be re-evaluated
1984 *
25985edc 1985 * Useful for policy notifiers which have different necessities
1da177e4
LT
1986 * at different times.
1987 */
1988int cpufreq_update_policy(unsigned int cpu)
1989{
1990 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1991 struct cpufreq_policy policy;
f1829e4a 1992 int ret;
1da177e4 1993
f1829e4a
JL
1994 if (!data) {
1995 ret = -ENODEV;
1996 goto no_policy;
1997 }
1da177e4 1998
f1829e4a
JL
1999 if (unlikely(lock_policy_rwsem_write(cpu))) {
2000 ret = -EINVAL;
2001 goto fail;
2002 }
1da177e4 2003
2d06d8c4 2004 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 2005 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
2006 policy.min = data->user_policy.min;
2007 policy.max = data->user_policy.max;
2008 policy.policy = data->user_policy.policy;
2009 policy.governor = data->user_policy.governor;
2010
bb176f7d
VK
2011 /*
2012 * BIOS might change freq behind our back
2013 * -> ask driver for current freq and notify governors about a change
2014 */
1c3d85dd
RW
2015 if (cpufreq_driver->get) {
2016 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 2017 if (!data->cur) {
2d06d8c4 2018 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
2019 data->cur = policy.cur;
2020 } else {
1c3d85dd 2021 if (data->cur != policy.cur && cpufreq_driver->target)
e08f5f5b
GS
2022 cpufreq_out_of_sync(cpu, data->cur,
2023 policy.cur);
a85f7bd3 2024 }
0961dd0d
TR
2025 }
2026
1da177e4
LT
2027 ret = __cpufreq_set_policy(data, &policy);
2028
5a01f2e8
VP
2029 unlock_policy_rwsem_write(cpu);
2030
f1829e4a 2031fail:
1da177e4 2032 cpufreq_cpu_put(data);
f1829e4a 2033no_policy:
1da177e4
LT
2034 return ret;
2035}
2036EXPORT_SYMBOL(cpufreq_update_policy);
2037
2760984f 2038static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2039 unsigned long action, void *hcpu)
2040{
2041 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2042 struct device *dev;
5302c3fb 2043 bool frozen = false;
c32b6b8e 2044
8a25a2fd
KS
2045 dev = get_cpu_device(cpu);
2046 if (dev) {
5302c3fb
SB
2047
2048 if (action & CPU_TASKS_FROZEN)
2049 frozen = true;
2050
2051 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2052 case CPU_ONLINE:
5302c3fb 2053 __cpufreq_add_dev(dev, NULL, frozen);
23d32899 2054 cpufreq_update_policy(cpu);
c32b6b8e 2055 break;
5302c3fb 2056
c32b6b8e 2057 case CPU_DOWN_PREPARE:
5302c3fb 2058 __cpufreq_remove_dev(dev, NULL, frozen);
c32b6b8e 2059 break;
5302c3fb 2060
5a01f2e8 2061 case CPU_DOWN_FAILED:
5302c3fb 2062 __cpufreq_add_dev(dev, NULL, frozen);
c32b6b8e
AR
2063 break;
2064 }
2065 }
2066 return NOTIFY_OK;
2067}
2068
9c36f746 2069static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2070 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2071};
1da177e4
LT
2072
2073/*********************************************************************
2074 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2075 *********************************************************************/
2076
2077/**
2078 * cpufreq_register_driver - register a CPU Frequency driver
2079 * @driver_data: A struct cpufreq_driver containing the values#
2080 * submitted by the CPU Frequency driver.
2081 *
bb176f7d 2082 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2083 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2084 * (and isn't unregistered in the meantime).
1da177e4
LT
2085 *
2086 */
221dee28 2087int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2088{
2089 unsigned long flags;
2090 int ret;
2091
a7b422cd
KRW
2092 if (cpufreq_disabled())
2093 return -ENODEV;
2094
1da177e4
LT
2095 if (!driver_data || !driver_data->verify || !driver_data->init ||
2096 ((!driver_data->setpolicy) && (!driver_data->target)))
2097 return -EINVAL;
2098
2d06d8c4 2099 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
2100
2101 if (driver_data->setpolicy)
2102 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2103
0d1857a1 2104 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2105 if (cpufreq_driver) {
0d1857a1 2106 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2107 return -EBUSY;
2108 }
1c3d85dd 2109 cpufreq_driver = driver_data;
0d1857a1 2110 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2111
8a25a2fd 2112 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
2113 if (ret)
2114 goto err_null_driver;
1da177e4 2115
1c3d85dd 2116 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
2117 int i;
2118 ret = -ENODEV;
2119
2120 /* check for at least one working CPU */
7a6aedfa
MT
2121 for (i = 0; i < nr_cpu_ids; i++)
2122 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 2123 ret = 0;
7a6aedfa
MT
2124 break;
2125 }
1da177e4
LT
2126
2127 /* if all ->init() calls failed, unregister */
2128 if (ret) {
2d06d8c4 2129 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 2130 driver_data->name);
8a25a2fd 2131 goto err_if_unreg;
1da177e4
LT
2132 }
2133 }
2134
8f5bc2ab 2135 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2136 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2137
8f5bc2ab 2138 return 0;
8a25a2fd
KS
2139err_if_unreg:
2140 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2141err_null_driver:
0d1857a1 2142 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2143 cpufreq_driver = NULL;
0d1857a1 2144 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2145 return ret;
1da177e4
LT
2146}
2147EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2148
1da177e4
LT
2149/**
2150 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2151 *
bb176f7d 2152 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2153 * the right to do so, i.e. if you have succeeded in initialising before!
2154 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2155 * currently not initialised.
2156 */
221dee28 2157int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2158{
2159 unsigned long flags;
2160
1c3d85dd 2161 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2162 return -EINVAL;
1da177e4 2163
2d06d8c4 2164 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2165
8a25a2fd 2166 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2167 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2168
0d1857a1 2169 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2170 cpufreq_driver = NULL;
0d1857a1 2171 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2172
2173 return 0;
2174}
2175EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2176
2177static int __init cpufreq_core_init(void)
2178{
2179 int cpu;
2180
a7b422cd
KRW
2181 if (cpufreq_disabled())
2182 return -ENODEV;
2183
5a01f2e8 2184 for_each_possible_cpu(cpu) {
f1625066 2185 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
2186 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2187 }
8aa84ad8 2188
2361be23 2189 cpufreq_global_kobject = kobject_create();
8aa84ad8 2190 BUG_ON(!cpufreq_global_kobject);
e00e56df 2191 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2192
5a01f2e8
VP
2193 return 0;
2194}
5a01f2e8 2195core_initcall(cpufreq_core_init);
This page took 0.754141 seconds and 5 git commands to generate.