cpufreq: remove unnecessary check in __cpufreq_governor()
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
e00e56df 29#include <linux/syscore_ops.h>
5ff0a268 30#include <linux/tick.h>
6f4f2723
TR
31#include <trace/events/power.h>
32
1da177e4 33/**
cd878479 34 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
37 */
1c3d85dd 38static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 39static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
8414809c 40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
bb176f7d
VK
41static DEFINE_RWLOCK(cpufreq_driver_lock);
42static DEFINE_MUTEX(cpufreq_governor_lock);
c88a1f8b 43static LIST_HEAD(cpufreq_policy_list);
bb176f7d 44
084f3493
TR
45#ifdef CONFIG_HOTPLUG_CPU
46/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 47static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 48#endif
1da177e4 49
5a01f2e8
VP
50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
5a01f2e8
VP
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 66 */
f1625066 67static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 71static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 72{ \
f1625066 73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
76 \
77 return 0; \
78}
79
80lock_policy_rwsem(read, cpu);
5a01f2e8 81lock_policy_rwsem(write, cpu);
5a01f2e8 82
fa1d8af4
VK
83#define unlock_policy_rwsem(mode, cpu) \
84static void unlock_policy_rwsem_##mode(int cpu) \
85{ \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 89}
5a01f2e8 90
fa1d8af4
VK
91unlock_policy_rwsem(read, cpu);
92unlock_policy_rwsem(write, cpu);
5a01f2e8 93
6eed9404
VK
94/*
95 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
96 * sections
97 */
98static DECLARE_RWSEM(cpufreq_rwsem);
99
1da177e4 100/* internal prototypes */
29464f28
DJ
101static int __cpufreq_governor(struct cpufreq_policy *policy,
102 unsigned int event);
5a01f2e8 103static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 104static void handle_update(struct work_struct *work);
1da177e4
LT
105
106/**
32ee8c3e
DJ
107 * Two notifier lists: the "policy" list is involved in the
108 * validation process for a new CPU frequency policy; the
1da177e4
LT
109 * "transition" list for kernel code that needs to handle
110 * changes to devices when the CPU clock speed changes.
111 * The mutex locks both lists.
112 */
e041c683 113static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 114static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 115
74212ca4 116static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
117static int __init init_cpufreq_transition_notifier_list(void)
118{
119 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 120 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
121 return 0;
122}
b3438f82 123pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 124
a7b422cd 125static int off __read_mostly;
da584455 126static int cpufreq_disabled(void)
a7b422cd
KRW
127{
128 return off;
129}
130void disable_cpufreq(void)
131{
132 off = 1;
133}
1da177e4 134static LIST_HEAD(cpufreq_governor_list);
29464f28 135static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 136
4d5dcc42
VK
137bool have_governor_per_policy(void)
138{
1c3d85dd 139 return cpufreq_driver->have_governor_per_policy;
4d5dcc42 140}
3f869d6d 141EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 142
944e9a03
VK
143struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
144{
145 if (have_governor_per_policy())
146 return &policy->kobj;
147 else
148 return cpufreq_global_kobject;
149}
150EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
151
72a4ce34
VK
152static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
153{
154 u64 idle_time;
155 u64 cur_wall_time;
156 u64 busy_time;
157
158 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
159
160 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
163 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
164 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
165 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
166
167 idle_time = cur_wall_time - busy_time;
168 if (wall)
169 *wall = cputime_to_usecs(cur_wall_time);
170
171 return cputime_to_usecs(idle_time);
172}
173
174u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
175{
176 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
177
178 if (idle_time == -1ULL)
179 return get_cpu_idle_time_jiffy(cpu, wall);
180 else if (!io_busy)
181 idle_time += get_cpu_iowait_time_us(cpu, wall);
182
183 return idle_time;
184}
185EXPORT_SYMBOL_GPL(get_cpu_idle_time);
186
6eed9404 187struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4 188{
6eed9404 189 struct cpufreq_policy *policy = NULL;
1da177e4
LT
190 unsigned long flags;
191
6eed9404
VK
192 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
193 return NULL;
194
195 if (!down_read_trylock(&cpufreq_rwsem))
196 return NULL;
1da177e4
LT
197
198 /* get the cpufreq driver */
1c3d85dd 199 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 200
6eed9404
VK
201 if (cpufreq_driver) {
202 /* get the CPU */
203 policy = per_cpu(cpufreq_cpu_data, cpu);
204 if (policy)
205 kobject_get(&policy->kobj);
206 }
1da177e4 207
6eed9404 208 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 209
3a3e9e06 210 if (!policy)
6eed9404 211 up_read(&cpufreq_rwsem);
1da177e4 212
3a3e9e06 213 return policy;
a9144436 214}
1da177e4
LT
215EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
216
3a3e9e06 217void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 218{
d5aaffa9
DB
219 if (cpufreq_disabled())
220 return;
221
6eed9404
VK
222 kobject_put(&policy->kobj);
223 up_read(&cpufreq_rwsem);
1da177e4
LT
224}
225EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
226
1da177e4
LT
227/*********************************************************************
228 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
229 *********************************************************************/
230
231/**
232 * adjust_jiffies - adjust the system "loops_per_jiffy"
233 *
234 * This function alters the system "loops_per_jiffy" for the clock
235 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 236 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
237 * per-CPU loops_per_jiffy value wherever possible.
238 */
239#ifndef CONFIG_SMP
240static unsigned long l_p_j_ref;
bb176f7d 241static unsigned int l_p_j_ref_freq;
1da177e4 242
858119e1 243static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
244{
245 if (ci->flags & CPUFREQ_CONST_LOOPS)
246 return;
247
248 if (!l_p_j_ref_freq) {
249 l_p_j_ref = loops_per_jiffy;
250 l_p_j_ref_freq = ci->old;
2d06d8c4 251 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 252 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 253 }
bb176f7d 254 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 255 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
256 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
257 ci->new);
2d06d8c4 258 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 259 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
260 }
261}
262#else
e08f5f5b
GS
263static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
264{
265 return;
266}
1da177e4
LT
267#endif
268
0956df9c 269static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 270 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
271{
272 BUG_ON(irqs_disabled());
273
d5aaffa9
DB
274 if (cpufreq_disabled())
275 return;
276
1c3d85dd 277 freqs->flags = cpufreq_driver->flags;
2d06d8c4 278 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 279 state, freqs->new);
1da177e4 280
1da177e4 281 switch (state) {
e4472cb3 282
1da177e4 283 case CPUFREQ_PRECHANGE:
266c13d7
VK
284 if (WARN(policy->transition_ongoing ==
285 cpumask_weight(policy->cpus),
7c30ed53
VK
286 "In middle of another frequency transition\n"))
287 return;
288
266c13d7 289 policy->transition_ongoing++;
7c30ed53 290
32ee8c3e 291 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
292 * which is not equal to what the cpufreq core thinks is
293 * "old frequency".
1da177e4 294 */
1c3d85dd 295 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
296 if ((policy) && (policy->cpu == freqs->cpu) &&
297 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 298 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
299 " %u, cpufreq assumed %u kHz.\n",
300 freqs->old, policy->cur);
301 freqs->old = policy->cur;
1da177e4
LT
302 }
303 }
b4dfdbb3 304 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 305 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
306 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
307 break;
e4472cb3 308
1da177e4 309 case CPUFREQ_POSTCHANGE:
7c30ed53
VK
310 if (WARN(!policy->transition_ongoing,
311 "No frequency transition in progress\n"))
312 return;
313
266c13d7 314 policy->transition_ongoing--;
7c30ed53 315
1da177e4 316 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 317 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 318 (unsigned long)freqs->cpu);
25e41933 319 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 320 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 321 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
322 if (likely(policy) && likely(policy->cpu == freqs->cpu))
323 policy->cur = freqs->new;
1da177e4
LT
324 break;
325 }
1da177e4 326}
bb176f7d 327
b43a7ffb
VK
328/**
329 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
330 * on frequency transition.
331 *
332 * This function calls the transition notifiers and the "adjust_jiffies"
333 * function. It is called twice on all CPU frequency changes that have
334 * external effects.
335 */
336void cpufreq_notify_transition(struct cpufreq_policy *policy,
337 struct cpufreq_freqs *freqs, unsigned int state)
338{
339 for_each_cpu(freqs->cpu, policy->cpus)
340 __cpufreq_notify_transition(policy, freqs, state);
341}
1da177e4
LT
342EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
343
344
1da177e4
LT
345/*********************************************************************
346 * SYSFS INTERFACE *
347 *********************************************************************/
348
3bcb09a3
JF
349static struct cpufreq_governor *__find_governor(const char *str_governor)
350{
351 struct cpufreq_governor *t;
352
353 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 354 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
355 return t;
356
357 return NULL;
358}
359
1da177e4
LT
360/**
361 * cpufreq_parse_governor - parse a governor string
362 */
905d77cd 363static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
364 struct cpufreq_governor **governor)
365{
3bcb09a3 366 int err = -EINVAL;
1c3d85dd
RW
367
368 if (!cpufreq_driver)
3bcb09a3
JF
369 goto out;
370
1c3d85dd 371 if (cpufreq_driver->setpolicy) {
1da177e4
LT
372 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
373 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 374 err = 0;
e08f5f5b
GS
375 } else if (!strnicmp(str_governor, "powersave",
376 CPUFREQ_NAME_LEN)) {
1da177e4 377 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 378 err = 0;
1da177e4 379 }
1c3d85dd 380 } else if (cpufreq_driver->target) {
1da177e4 381 struct cpufreq_governor *t;
3bcb09a3 382
3fc54d37 383 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
384
385 t = __find_governor(str_governor);
386
ea714970 387 if (t == NULL) {
1a8e1463 388 int ret;
ea714970 389
1a8e1463
KC
390 mutex_unlock(&cpufreq_governor_mutex);
391 ret = request_module("cpufreq_%s", str_governor);
392 mutex_lock(&cpufreq_governor_mutex);
ea714970 393
1a8e1463
KC
394 if (ret == 0)
395 t = __find_governor(str_governor);
ea714970
JF
396 }
397
3bcb09a3
JF
398 if (t != NULL) {
399 *governor = t;
400 err = 0;
1da177e4 401 }
3bcb09a3 402
3fc54d37 403 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 404 }
29464f28 405out:
3bcb09a3 406 return err;
1da177e4 407}
1da177e4 408
1da177e4 409/**
e08f5f5b
GS
410 * cpufreq_per_cpu_attr_read() / show_##file_name() -
411 * print out cpufreq information
1da177e4
LT
412 *
413 * Write out information from cpufreq_driver->policy[cpu]; object must be
414 * "unsigned int".
415 */
416
32ee8c3e
DJ
417#define show_one(file_name, object) \
418static ssize_t show_##file_name \
905d77cd 419(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 420{ \
29464f28 421 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
422}
423
424show_one(cpuinfo_min_freq, cpuinfo.min_freq);
425show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 426show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
427show_one(scaling_min_freq, min);
428show_one(scaling_max_freq, max);
429show_one(scaling_cur_freq, cur);
430
3a3e9e06
VK
431static int __cpufreq_set_policy(struct cpufreq_policy *policy,
432 struct cpufreq_policy *new_policy);
7970e08b 433
1da177e4
LT
434/**
435 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
436 */
437#define store_one(file_name, object) \
438static ssize_t store_##file_name \
905d77cd 439(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 440{ \
f55c9c26 441 unsigned int ret; \
1da177e4
LT
442 struct cpufreq_policy new_policy; \
443 \
444 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
445 if (ret) \
446 return -EINVAL; \
447 \
29464f28 448 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
449 if (ret != 1) \
450 return -EINVAL; \
451 \
7970e08b
TR
452 ret = __cpufreq_set_policy(policy, &new_policy); \
453 policy->user_policy.object = policy->object; \
1da177e4
LT
454 \
455 return ret ? ret : count; \
456}
457
29464f28
DJ
458store_one(scaling_min_freq, min);
459store_one(scaling_max_freq, max);
1da177e4
LT
460
461/**
462 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
463 */
905d77cd
DJ
464static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
465 char *buf)
1da177e4 466{
5a01f2e8 467 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
468 if (!cur_freq)
469 return sprintf(buf, "<unknown>");
470 return sprintf(buf, "%u\n", cur_freq);
471}
472
1da177e4
LT
473/**
474 * show_scaling_governor - show the current policy for the specified CPU
475 */
905d77cd 476static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 477{
29464f28 478 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
479 return sprintf(buf, "powersave\n");
480 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
481 return sprintf(buf, "performance\n");
482 else if (policy->governor)
4b972f0b 483 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 484 policy->governor->name);
1da177e4
LT
485 return -EINVAL;
486}
487
1da177e4
LT
488/**
489 * store_scaling_governor - store policy for the specified CPU
490 */
905d77cd
DJ
491static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
492 const char *buf, size_t count)
1da177e4 493{
f55c9c26 494 unsigned int ret;
1da177e4
LT
495 char str_governor[16];
496 struct cpufreq_policy new_policy;
497
498 ret = cpufreq_get_policy(&new_policy, policy->cpu);
499 if (ret)
500 return ret;
501
29464f28 502 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
503 if (ret != 1)
504 return -EINVAL;
505
e08f5f5b
GS
506 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
507 &new_policy.governor))
1da177e4
LT
508 return -EINVAL;
509
bb176f7d
VK
510 /*
511 * Do not use cpufreq_set_policy here or the user_policy.max
512 * will be wrongly overridden
513 */
7970e08b
TR
514 ret = __cpufreq_set_policy(policy, &new_policy);
515
516 policy->user_policy.policy = policy->policy;
517 policy->user_policy.governor = policy->governor;
7970e08b 518
e08f5f5b
GS
519 if (ret)
520 return ret;
521 else
522 return count;
1da177e4
LT
523}
524
525/**
526 * show_scaling_driver - show the cpufreq driver currently loaded
527 */
905d77cd 528static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 529{
1c3d85dd 530 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
531}
532
533/**
534 * show_scaling_available_governors - show the available CPUfreq governors
535 */
905d77cd
DJ
536static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
537 char *buf)
1da177e4
LT
538{
539 ssize_t i = 0;
540 struct cpufreq_governor *t;
541
1c3d85dd 542 if (!cpufreq_driver->target) {
1da177e4
LT
543 i += sprintf(buf, "performance powersave");
544 goto out;
545 }
546
547 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
548 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
549 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 550 goto out;
4b972f0b 551 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 552 }
7d5e350f 553out:
1da177e4
LT
554 i += sprintf(&buf[i], "\n");
555 return i;
556}
e8628dd0 557
f4fd3797 558ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
559{
560 ssize_t i = 0;
561 unsigned int cpu;
562
835481d9 563 for_each_cpu(cpu, mask) {
1da177e4
LT
564 if (i)
565 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
566 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
567 if (i >= (PAGE_SIZE - 5))
29464f28 568 break;
1da177e4
LT
569 }
570 i += sprintf(&buf[i], "\n");
571 return i;
572}
f4fd3797 573EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 574
e8628dd0
DW
575/**
576 * show_related_cpus - show the CPUs affected by each transition even if
577 * hw coordination is in use
578 */
579static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
580{
f4fd3797 581 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
582}
583
584/**
585 * show_affected_cpus - show the CPUs affected by each transition
586 */
587static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
588{
f4fd3797 589 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
590}
591
9e76988e 592static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 593 const char *buf, size_t count)
9e76988e
VP
594{
595 unsigned int freq = 0;
596 unsigned int ret;
597
879000f9 598 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
599 return -EINVAL;
600
601 ret = sscanf(buf, "%u", &freq);
602 if (ret != 1)
603 return -EINVAL;
604
605 policy->governor->store_setspeed(policy, freq);
606
607 return count;
608}
609
610static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
611{
879000f9 612 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
613 return sprintf(buf, "<unsupported>\n");
614
615 return policy->governor->show_setspeed(policy, buf);
616}
1da177e4 617
e2f74f35 618/**
8bf1ac72 619 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
620 */
621static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
622{
623 unsigned int limit;
624 int ret;
1c3d85dd
RW
625 if (cpufreq_driver->bios_limit) {
626 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
627 if (!ret)
628 return sprintf(buf, "%u\n", limit);
629 }
630 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
631}
632
6dad2a29
BP
633cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
634cpufreq_freq_attr_ro(cpuinfo_min_freq);
635cpufreq_freq_attr_ro(cpuinfo_max_freq);
636cpufreq_freq_attr_ro(cpuinfo_transition_latency);
637cpufreq_freq_attr_ro(scaling_available_governors);
638cpufreq_freq_attr_ro(scaling_driver);
639cpufreq_freq_attr_ro(scaling_cur_freq);
640cpufreq_freq_attr_ro(bios_limit);
641cpufreq_freq_attr_ro(related_cpus);
642cpufreq_freq_attr_ro(affected_cpus);
643cpufreq_freq_attr_rw(scaling_min_freq);
644cpufreq_freq_attr_rw(scaling_max_freq);
645cpufreq_freq_attr_rw(scaling_governor);
646cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 647
905d77cd 648static struct attribute *default_attrs[] = {
1da177e4
LT
649 &cpuinfo_min_freq.attr,
650 &cpuinfo_max_freq.attr,
ed129784 651 &cpuinfo_transition_latency.attr,
1da177e4
LT
652 &scaling_min_freq.attr,
653 &scaling_max_freq.attr,
654 &affected_cpus.attr,
e8628dd0 655 &related_cpus.attr,
1da177e4
LT
656 &scaling_governor.attr,
657 &scaling_driver.attr,
658 &scaling_available_governors.attr,
9e76988e 659 &scaling_setspeed.attr,
1da177e4
LT
660 NULL
661};
662
29464f28
DJ
663#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
664#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 665
29464f28 666static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 667{
905d77cd
DJ
668 struct cpufreq_policy *policy = to_policy(kobj);
669 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 670 ssize_t ret = -EINVAL;
6eed9404
VK
671
672 if (!down_read_trylock(&cpufreq_rwsem))
673 goto exit;
5a01f2e8
VP
674
675 if (lock_policy_rwsem_read(policy->cpu) < 0)
6eed9404 676 goto up_read;
5a01f2e8 677
e08f5f5b
GS
678 if (fattr->show)
679 ret = fattr->show(policy, buf);
680 else
681 ret = -EIO;
682
5a01f2e8 683 unlock_policy_rwsem_read(policy->cpu);
6eed9404
VK
684
685up_read:
686 up_read(&cpufreq_rwsem);
687exit:
1da177e4
LT
688 return ret;
689}
690
905d77cd
DJ
691static ssize_t store(struct kobject *kobj, struct attribute *attr,
692 const char *buf, size_t count)
1da177e4 693{
905d77cd
DJ
694 struct cpufreq_policy *policy = to_policy(kobj);
695 struct freq_attr *fattr = to_attr(attr);
a07530b4 696 ssize_t ret = -EINVAL;
6eed9404
VK
697
698 if (!down_read_trylock(&cpufreq_rwsem))
699 goto exit;
5a01f2e8
VP
700
701 if (lock_policy_rwsem_write(policy->cpu) < 0)
6eed9404 702 goto up_read;
5a01f2e8 703
e08f5f5b
GS
704 if (fattr->store)
705 ret = fattr->store(policy, buf, count);
706 else
707 ret = -EIO;
708
5a01f2e8 709 unlock_policy_rwsem_write(policy->cpu);
6eed9404
VK
710
711up_read:
712 up_read(&cpufreq_rwsem);
713exit:
1da177e4
LT
714 return ret;
715}
716
905d77cd 717static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 718{
905d77cd 719 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 720 pr_debug("last reference is dropped\n");
1da177e4
LT
721 complete(&policy->kobj_unregister);
722}
723
52cf25d0 724static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
725 .show = show,
726 .store = store,
727};
728
729static struct kobj_type ktype_cpufreq = {
730 .sysfs_ops = &sysfs_ops,
731 .default_attrs = default_attrs,
732 .release = cpufreq_sysfs_release,
733};
734
2361be23
VK
735struct kobject *cpufreq_global_kobject;
736EXPORT_SYMBOL(cpufreq_global_kobject);
737
738static int cpufreq_global_kobject_usage;
739
740int cpufreq_get_global_kobject(void)
741{
742 if (!cpufreq_global_kobject_usage++)
743 return kobject_add(cpufreq_global_kobject,
744 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
745
746 return 0;
747}
748EXPORT_SYMBOL(cpufreq_get_global_kobject);
749
750void cpufreq_put_global_kobject(void)
751{
752 if (!--cpufreq_global_kobject_usage)
753 kobject_del(cpufreq_global_kobject);
754}
755EXPORT_SYMBOL(cpufreq_put_global_kobject);
756
757int cpufreq_sysfs_create_file(const struct attribute *attr)
758{
759 int ret = cpufreq_get_global_kobject();
760
761 if (!ret) {
762 ret = sysfs_create_file(cpufreq_global_kobject, attr);
763 if (ret)
764 cpufreq_put_global_kobject();
765 }
766
767 return ret;
768}
769EXPORT_SYMBOL(cpufreq_sysfs_create_file);
770
771void cpufreq_sysfs_remove_file(const struct attribute *attr)
772{
773 sysfs_remove_file(cpufreq_global_kobject, attr);
774 cpufreq_put_global_kobject();
775}
776EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
777
19d6f7ec 778/* symlink affected CPUs */
308b60e7 779static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
780{
781 unsigned int j;
782 int ret = 0;
783
784 for_each_cpu(j, policy->cpus) {
8a25a2fd 785 struct device *cpu_dev;
19d6f7ec 786
308b60e7 787 if (j == policy->cpu)
19d6f7ec 788 continue;
19d6f7ec 789
e8fdde10 790 pr_debug("Adding link for CPU: %u\n", j);
8a25a2fd
KS
791 cpu_dev = get_cpu_device(j);
792 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec 793 "cpufreq");
71c3461e
RW
794 if (ret)
795 break;
19d6f7ec
DJ
796 }
797 return ret;
798}
799
308b60e7 800static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 801 struct device *dev)
909a694e
DJ
802{
803 struct freq_attr **drv_attr;
909a694e 804 int ret = 0;
909a694e
DJ
805
806 /* prepare interface data */
807 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 808 &dev->kobj, "cpufreq");
909a694e
DJ
809 if (ret)
810 return ret;
811
812 /* set up files for this cpu device */
1c3d85dd 813 drv_attr = cpufreq_driver->attr;
909a694e
DJ
814 while ((drv_attr) && (*drv_attr)) {
815 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
816 if (ret)
1c3d85dd 817 goto err_out_kobj_put;
909a694e
DJ
818 drv_attr++;
819 }
1c3d85dd 820 if (cpufreq_driver->get) {
909a694e
DJ
821 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
822 if (ret)
1c3d85dd 823 goto err_out_kobj_put;
909a694e 824 }
1c3d85dd 825 if (cpufreq_driver->target) {
909a694e
DJ
826 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
827 if (ret)
1c3d85dd 828 goto err_out_kobj_put;
909a694e 829 }
1c3d85dd 830 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
831 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
832 if (ret)
1c3d85dd 833 goto err_out_kobj_put;
e2f74f35 834 }
909a694e 835
308b60e7 836 ret = cpufreq_add_dev_symlink(policy);
ecf7e461
DJ
837 if (ret)
838 goto err_out_kobj_put;
839
e18f1682
SB
840 return ret;
841
842err_out_kobj_put:
843 kobject_put(&policy->kobj);
844 wait_for_completion(&policy->kobj_unregister);
845 return ret;
846}
847
848static void cpufreq_init_policy(struct cpufreq_policy *policy)
849{
850 struct cpufreq_policy new_policy;
851 int ret = 0;
852
d5b73cd8 853 memcpy(&new_policy, policy, sizeof(*policy));
ecf7e461
DJ
854 /* assure that the starting sequence is run in __cpufreq_set_policy */
855 policy->governor = NULL;
856
857 /* set default policy */
858 ret = __cpufreq_set_policy(policy, &new_policy);
859 policy->user_policy.policy = policy->policy;
860 policy->user_policy.governor = policy->governor;
861
862 if (ret) {
2d06d8c4 863 pr_debug("setting policy failed\n");
1c3d85dd
RW
864 if (cpufreq_driver->exit)
865 cpufreq_driver->exit(policy);
ecf7e461 866 }
909a694e
DJ
867}
868
fcf80582 869#ifdef CONFIG_HOTPLUG_CPU
d8d3b471
VK
870static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
871 unsigned int cpu, struct device *dev,
872 bool frozen)
fcf80582 873{
1c3d85dd 874 int ret = 0, has_target = !!cpufreq_driver->target;
fcf80582
VK
875 unsigned long flags;
876
3de9bdeb
VK
877 if (has_target) {
878 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
879 if (ret) {
880 pr_err("%s: Failed to stop governor\n", __func__);
881 return ret;
882 }
883 }
fcf80582 884
d8d3b471 885 lock_policy_rwsem_write(policy->cpu);
2eaa3e2d 886
0d1857a1 887 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 888
fcf80582 889 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 890 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 891 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 892 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 893
d8d3b471 894 unlock_policy_rwsem_write(policy->cpu);
2eaa3e2d 895
820c6ca2 896 if (has_target) {
3de9bdeb
VK
897 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
898 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
899 pr_err("%s: Failed to start governor\n", __func__);
900 return ret;
901 }
820c6ca2 902 }
fcf80582 903
a82fab29 904 /* Don't touch sysfs links during light-weight init */
71c3461e
RW
905 if (!frozen)
906 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
a82fab29
SB
907
908 return ret;
fcf80582
VK
909}
910#endif
1da177e4 911
8414809c
SB
912static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
913{
914 struct cpufreq_policy *policy;
915 unsigned long flags;
916
917 write_lock_irqsave(&cpufreq_driver_lock, flags);
918
919 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
920
921 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
922
923 return policy;
924}
925
e9698cc5
SB
926static struct cpufreq_policy *cpufreq_policy_alloc(void)
927{
928 struct cpufreq_policy *policy;
929
930 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
931 if (!policy)
932 return NULL;
933
934 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
935 goto err_free_policy;
936
937 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
938 goto err_free_cpumask;
939
c88a1f8b 940 INIT_LIST_HEAD(&policy->policy_list);
e9698cc5
SB
941 return policy;
942
943err_free_cpumask:
944 free_cpumask_var(policy->cpus);
945err_free_policy:
946 kfree(policy);
947
948 return NULL;
949}
950
951static void cpufreq_policy_free(struct cpufreq_policy *policy)
952{
953 free_cpumask_var(policy->related_cpus);
954 free_cpumask_var(policy->cpus);
955 kfree(policy);
956}
957
a82fab29
SB
958static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
959 bool frozen)
1da177e4 960{
fcf80582 961 unsigned int j, cpu = dev->id;
65922465 962 int ret = -ENOMEM;
1da177e4 963 struct cpufreq_policy *policy;
1da177e4 964 unsigned long flags;
90e41bac 965#ifdef CONFIG_HOTPLUG_CPU
fcf80582 966 struct cpufreq_governor *gov;
878f6e07 967 int sibling;
90e41bac 968#endif
1da177e4 969
c32b6b8e
AR
970 if (cpu_is_offline(cpu))
971 return 0;
972
2d06d8c4 973 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
974
975#ifdef CONFIG_SMP
976 /* check whether a different CPU already registered this
977 * CPU because it is in the same boat. */
978 policy = cpufreq_cpu_get(cpu);
979 if (unlikely(policy)) {
8ff69732 980 cpufreq_cpu_put(policy);
1da177e4
LT
981 return 0;
982 }
fcf80582 983
6eed9404
VK
984 if (!down_read_trylock(&cpufreq_rwsem))
985 return 0;
986
fcf80582
VK
987#ifdef CONFIG_HOTPLUG_CPU
988 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 989 read_lock_irqsave(&cpufreq_driver_lock, flags);
878f6e07
RW
990 for_each_online_cpu(sibling) {
991 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
992 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 993 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
878f6e07 994 ret = cpufreq_add_policy_cpu(cp, cpu, dev, frozen);
6eed9404
VK
995 up_read(&cpufreq_rwsem);
996 return ret;
2eaa3e2d 997 }
fcf80582 998 }
0d1857a1 999 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 1000#endif
1da177e4
LT
1001#endif
1002
8414809c
SB
1003 if (frozen)
1004 /* Restore the saved policy when doing light-weight init */
1005 policy = cpufreq_policy_restore(cpu);
1006 else
1007 policy = cpufreq_policy_alloc();
1008
059019a3 1009 if (!policy)
1da177e4 1010 goto nomem_out;
059019a3 1011
1da177e4 1012 policy->cpu = cpu;
65922465 1013 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 1014 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1015
5a01f2e8 1016 /* Initially set CPU itself as the policy_cpu */
f1625066 1017 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 1018
1da177e4 1019 init_completion(&policy->kobj_unregister);
65f27f38 1020 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
1021
1022 /* call driver. From then on the cpufreq must be able
1023 * to accept all calls to ->verify and ->setpolicy for this CPU
1024 */
1c3d85dd 1025 ret = cpufreq_driver->init(policy);
1da177e4 1026 if (ret) {
2d06d8c4 1027 pr_debug("initialization failed\n");
2eaa3e2d 1028 goto err_set_policy_cpu;
1da177e4 1029 }
643ae6e8 1030
fcf80582
VK
1031 /* related cpus should atleast have policy->cpus */
1032 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1033
643ae6e8
VK
1034 /*
1035 * affected cpus must always be the one, which are online. We aren't
1036 * managing offline cpus here.
1037 */
1038 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1039
187d9f4e
MC
1040 policy->user_policy.min = policy->min;
1041 policy->user_policy.max = policy->max;
1da177e4 1042
a1531acd
TR
1043 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1044 CPUFREQ_START, policy);
1045
fcf80582
VK
1046#ifdef CONFIG_HOTPLUG_CPU
1047 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1048 if (gov) {
1049 policy->governor = gov;
1050 pr_debug("Restoring governor %s for cpu %d\n",
1051 policy->governor->name, cpu);
4bfa042c 1052 }
fcf80582 1053#endif
1da177e4 1054
e18f1682
SB
1055 write_lock_irqsave(&cpufreq_driver_lock, flags);
1056 for_each_cpu(j, policy->cpus) {
1057 per_cpu(cpufreq_cpu_data, j) = policy;
1058 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
1059 }
1060 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1061
a82fab29 1062 if (!frozen) {
308b60e7 1063 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1064 if (ret)
1065 goto err_out_unregister;
1066 }
8ff69732 1067
9515f4d6
VK
1068 write_lock_irqsave(&cpufreq_driver_lock, flags);
1069 list_add(&policy->policy_list, &cpufreq_policy_list);
1070 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1071
e18f1682
SB
1072 cpufreq_init_policy(policy);
1073
038c5b3e 1074 kobject_uevent(&policy->kobj, KOBJ_ADD);
6eed9404
VK
1075 up_read(&cpufreq_rwsem);
1076
2d06d8c4 1077 pr_debug("initialization complete\n");
87c32271 1078
1da177e4
LT
1079 return 0;
1080
1da177e4 1081err_out_unregister:
0d1857a1 1082 write_lock_irqsave(&cpufreq_driver_lock, flags);
e18f1682 1083 for_each_cpu(j, policy->cpus) {
7a6aedfa 1084 per_cpu(cpufreq_cpu_data, j) = NULL;
e18f1682
SB
1085 if (j != cpu)
1086 per_cpu(cpufreq_policy_cpu, j) = -1;
1087 }
0d1857a1 1088 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1089
2eaa3e2d
VK
1090err_set_policy_cpu:
1091 per_cpu(cpufreq_policy_cpu, cpu) = -1;
e9698cc5 1092 cpufreq_policy_free(policy);
1da177e4 1093nomem_out:
6eed9404
VK
1094 up_read(&cpufreq_rwsem);
1095
1da177e4
LT
1096 return ret;
1097}
1098
a82fab29
SB
1099/**
1100 * cpufreq_add_dev - add a CPU device
1101 *
1102 * Adds the cpufreq interface for a CPU device.
1103 *
1104 * The Oracle says: try running cpufreq registration/unregistration concurrently
1105 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1106 * mess up, but more thorough testing is needed. - Mathieu
1107 */
1108static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1109{
1110 return __cpufreq_add_dev(dev, sif, false);
1111}
1112
b8eed8af
VK
1113static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1114{
1115 int j;
1116
1117 policy->last_cpu = policy->cpu;
1118 policy->cpu = cpu;
1119
3361b7b1 1120 for_each_cpu(j, policy->cpus)
b8eed8af 1121 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
1122
1123#ifdef CONFIG_CPU_FREQ_TABLE
1124 cpufreq_frequency_table_update_policy_cpu(policy);
1125#endif
1126 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1127 CPUFREQ_UPDATE_POLICY_CPU, policy);
1128}
1da177e4 1129
3a3e9e06 1130static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
a82fab29 1131 unsigned int old_cpu, bool frozen)
f9ba680d
SB
1132{
1133 struct device *cpu_dev;
1134 unsigned long flags;
1135 int ret;
1136
1137 /* first sibling now owns the new sysfs dir */
3a3e9e06 1138 cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
a82fab29
SB
1139
1140 /* Don't touch sysfs files during light-weight tear-down */
1141 if (frozen)
1142 return cpu_dev->id;
1143
f9ba680d 1144 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
3a3e9e06 1145 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
f9ba680d
SB
1146 if (ret) {
1147 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1148
1149 WARN_ON(lock_policy_rwsem_write(old_cpu));
3a3e9e06 1150 cpumask_set_cpu(old_cpu, policy->cpus);
f9ba680d
SB
1151
1152 write_lock_irqsave(&cpufreq_driver_lock, flags);
3a3e9e06 1153 per_cpu(cpufreq_cpu_data, old_cpu) = policy;
f9ba680d
SB
1154 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1155
1156 unlock_policy_rwsem_write(old_cpu);
1157
3a3e9e06 1158 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
f9ba680d
SB
1159 "cpufreq");
1160
1161 return -EINVAL;
1162 }
1163
1164 return cpu_dev->id;
1165}
1166
1da177e4 1167/**
5a01f2e8 1168 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1169 *
1170 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1171 * Caller should already have policy_rwsem in write mode for this CPU.
1172 * This routine frees the rwsem before returning.
1da177e4 1173 */
bb176f7d 1174static int __cpufreq_remove_dev(struct device *dev,
a82fab29 1175 struct subsys_interface *sif, bool frozen)
1da177e4 1176{
f9ba680d 1177 unsigned int cpu = dev->id, cpus;
3de9bdeb 1178 int new_cpu, ret;
1da177e4 1179 unsigned long flags;
3a3e9e06 1180 struct cpufreq_policy *policy;
499bca9b
AW
1181 struct kobject *kobj;
1182 struct completion *cmp;
1da177e4 1183
b8eed8af 1184 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1185
0d1857a1 1186 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1187
3a3e9e06 1188 policy = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1189 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1190
8414809c
SB
1191 /* Save the policy somewhere when doing a light-weight tear-down */
1192 if (frozen)
3a3e9e06 1193 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
8414809c 1194
0d1857a1 1195 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1196
3a3e9e06 1197 if (!policy) {
b8eed8af 1198 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1199 return -EINVAL;
1200 }
1da177e4 1201
3de9bdeb
VK
1202 if (cpufreq_driver->target) {
1203 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1204 if (ret) {
1205 pr_err("%s: Failed to stop governor\n", __func__);
1206 return ret;
1207 }
1208 }
1da177e4 1209
084f3493 1210#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1211 if (!cpufreq_driver->setpolicy)
fa69e33f 1212 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
3a3e9e06 1213 policy->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1214#endif
1215
2eaa3e2d 1216 WARN_ON(lock_policy_rwsem_write(cpu));
3a3e9e06 1217 cpus = cpumask_weight(policy->cpus);
e4969eba
VK
1218
1219 if (cpus > 1)
3a3e9e06 1220 cpumask_clear_cpu(cpu, policy->cpus);
2eaa3e2d 1221 unlock_policy_rwsem_write(cpu);
084f3493 1222
3a3e9e06 1223 if (cpu != policy->cpu && !frozen) {
73bf0fc2
VK
1224 sysfs_remove_link(&dev->kobj, "cpufreq");
1225 } else if (cpus > 1) {
084f3493 1226
3a3e9e06 1227 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
f9ba680d 1228 if (new_cpu >= 0) {
2eaa3e2d 1229 WARN_ON(lock_policy_rwsem_write(cpu));
3a3e9e06 1230 update_policy_cpu(policy, new_cpu);
499bca9b 1231 unlock_policy_rwsem_write(cpu);
a82fab29
SB
1232
1233 if (!frozen) {
1234 pr_debug("%s: policy Kobject moved to cpu: %d "
1235 "from: %d\n",__func__, new_cpu, cpu);
1236 }
1da177e4
LT
1237 }
1238 }
1da177e4 1239
b8eed8af
VK
1240 /* If cpu is last user of policy, free policy */
1241 if (cpus == 1) {
3de9bdeb
VK
1242 if (cpufreq_driver->target) {
1243 ret = __cpufreq_governor(policy,
1244 CPUFREQ_GOV_POLICY_EXIT);
1245 if (ret) {
1246 pr_err("%s: Failed to exit governor\n",
1247 __func__);
1248 return ret;
1249 }
edab2fbc 1250 }
2a998599 1251
8414809c
SB
1252 if (!frozen) {
1253 lock_policy_rwsem_read(cpu);
3a3e9e06
VK
1254 kobj = &policy->kobj;
1255 cmp = &policy->kobj_unregister;
8414809c
SB
1256 unlock_policy_rwsem_read(cpu);
1257 kobject_put(kobj);
1258
1259 /*
1260 * We need to make sure that the underlying kobj is
1261 * actually not referenced anymore by anybody before we
1262 * proceed with unloading.
1263 */
1264 pr_debug("waiting for dropping of refcount\n");
1265 wait_for_completion(cmp);
1266 pr_debug("wait complete\n");
1267 }
7d26e2d5 1268
8414809c
SB
1269 /*
1270 * Perform the ->exit() even during light-weight tear-down,
1271 * since this is a core component, and is essential for the
1272 * subsequent light-weight ->init() to succeed.
b8eed8af 1273 */
1c3d85dd 1274 if (cpufreq_driver->exit)
3a3e9e06 1275 cpufreq_driver->exit(policy);
27ecddc2 1276
9515f4d6
VK
1277 /* Remove policy from list of active policies */
1278 write_lock_irqsave(&cpufreq_driver_lock, flags);
1279 list_del(&policy->policy_list);
1280 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1281
8414809c 1282 if (!frozen)
3a3e9e06 1283 cpufreq_policy_free(policy);
2a998599 1284 } else {
2a998599 1285 if (cpufreq_driver->target) {
3de9bdeb
VK
1286 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1287 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1288 pr_err("%s: Failed to start governor\n",
1289 __func__);
1290 return ret;
1291 }
2a998599 1292 }
27ecddc2 1293 }
1da177e4 1294
2eaa3e2d 1295 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1296 return 0;
1297}
1298
8a25a2fd 1299static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1300{
8a25a2fd 1301 unsigned int cpu = dev->id;
5a01f2e8 1302 int retval;
ec28297a
VP
1303
1304 if (cpu_is_offline(cpu))
1305 return 0;
1306
a82fab29 1307 retval = __cpufreq_remove_dev(dev, sif, false);
5a01f2e8
VP
1308 return retval;
1309}
1310
65f27f38 1311static void handle_update(struct work_struct *work)
1da177e4 1312{
65f27f38
DH
1313 struct cpufreq_policy *policy =
1314 container_of(work, struct cpufreq_policy, update);
1315 unsigned int cpu = policy->cpu;
2d06d8c4 1316 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1317 cpufreq_update_policy(cpu);
1318}
1319
1320/**
bb176f7d
VK
1321 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1322 * in deep trouble.
1da177e4
LT
1323 * @cpu: cpu number
1324 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1325 * @new_freq: CPU frequency the CPU actually runs at
1326 *
29464f28
DJ
1327 * We adjust to current frequency first, and need to clean up later.
1328 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1329 */
e08f5f5b
GS
1330static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1331 unsigned int new_freq)
1da177e4 1332{
b43a7ffb 1333 struct cpufreq_policy *policy;
1da177e4 1334 struct cpufreq_freqs freqs;
b43a7ffb
VK
1335 unsigned long flags;
1336
2d06d8c4 1337 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1338 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1339
1da177e4
LT
1340 freqs.old = old_freq;
1341 freqs.new = new_freq;
b43a7ffb
VK
1342
1343 read_lock_irqsave(&cpufreq_driver_lock, flags);
1344 policy = per_cpu(cpufreq_cpu_data, cpu);
1345 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1346
1347 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1348 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1349}
1350
32ee8c3e 1351/**
4ab70df4 1352 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1353 * @cpu: CPU number
1354 *
1355 * This is the last known freq, without actually getting it from the driver.
1356 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1357 */
1358unsigned int cpufreq_quick_get(unsigned int cpu)
1359{
9e21ba8b 1360 struct cpufreq_policy *policy;
e08f5f5b 1361 unsigned int ret_freq = 0;
95235ca2 1362
1c3d85dd
RW
1363 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1364 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1365
1366 policy = cpufreq_cpu_get(cpu);
95235ca2 1367 if (policy) {
e08f5f5b 1368 ret_freq = policy->cur;
95235ca2
VP
1369 cpufreq_cpu_put(policy);
1370 }
1371
4d34a67d 1372 return ret_freq;
95235ca2
VP
1373}
1374EXPORT_SYMBOL(cpufreq_quick_get);
1375
3d737108
JB
1376/**
1377 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1378 * @cpu: CPU number
1379 *
1380 * Just return the max possible frequency for a given CPU.
1381 */
1382unsigned int cpufreq_quick_get_max(unsigned int cpu)
1383{
1384 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1385 unsigned int ret_freq = 0;
1386
1387 if (policy) {
1388 ret_freq = policy->max;
1389 cpufreq_cpu_put(policy);
1390 }
1391
1392 return ret_freq;
1393}
1394EXPORT_SYMBOL(cpufreq_quick_get_max);
1395
5a01f2e8 1396static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1397{
7a6aedfa 1398 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1399 unsigned int ret_freq = 0;
5800043b 1400
1c3d85dd 1401 if (!cpufreq_driver->get)
4d34a67d 1402 return ret_freq;
1da177e4 1403
1c3d85dd 1404 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1405
e08f5f5b 1406 if (ret_freq && policy->cur &&
1c3d85dd 1407 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1408 /* verify no discrepancy between actual and
1409 saved value exists */
1410 if (unlikely(ret_freq != policy->cur)) {
1411 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1412 schedule_work(&policy->update);
1413 }
1414 }
1415
4d34a67d 1416 return ret_freq;
5a01f2e8 1417}
1da177e4 1418
5a01f2e8
VP
1419/**
1420 * cpufreq_get - get the current CPU frequency (in kHz)
1421 * @cpu: CPU number
1422 *
1423 * Get the CPU current (static) CPU frequency
1424 */
1425unsigned int cpufreq_get(unsigned int cpu)
1426{
1427 unsigned int ret_freq = 0;
5a01f2e8 1428
6eed9404
VK
1429 if (!down_read_trylock(&cpufreq_rwsem))
1430 return 0;
5a01f2e8
VP
1431
1432 if (unlikely(lock_policy_rwsem_read(cpu)))
1433 goto out_policy;
1434
1435 ret_freq = __cpufreq_get(cpu);
1436
1437 unlock_policy_rwsem_read(cpu);
1da177e4 1438
5a01f2e8 1439out_policy:
6eed9404
VK
1440 up_read(&cpufreq_rwsem);
1441
4d34a67d 1442 return ret_freq;
1da177e4
LT
1443}
1444EXPORT_SYMBOL(cpufreq_get);
1445
8a25a2fd
KS
1446static struct subsys_interface cpufreq_interface = {
1447 .name = "cpufreq",
1448 .subsys = &cpu_subsys,
1449 .add_dev = cpufreq_add_dev,
1450 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1451};
1452
42d4dc3f 1453/**
e00e56df
RW
1454 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1455 *
1456 * This function is only executed for the boot processor. The other CPUs
1457 * have been put offline by means of CPU hotplug.
42d4dc3f 1458 */
e00e56df 1459static int cpufreq_bp_suspend(void)
42d4dc3f 1460{
e08f5f5b 1461 int ret = 0;
4bc5d341 1462
e00e56df 1463 int cpu = smp_processor_id();
3a3e9e06 1464 struct cpufreq_policy *policy;
42d4dc3f 1465
2d06d8c4 1466 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1467
e00e56df 1468 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1469 policy = cpufreq_cpu_get(cpu);
1470 if (!policy)
e00e56df 1471 return 0;
42d4dc3f 1472
1c3d85dd 1473 if (cpufreq_driver->suspend) {
3a3e9e06 1474 ret = cpufreq_driver->suspend(policy);
ce6c3997 1475 if (ret)
42d4dc3f 1476 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
3a3e9e06 1477 "step on CPU %u\n", policy->cpu);
42d4dc3f
BH
1478 }
1479
3a3e9e06 1480 cpufreq_cpu_put(policy);
c9060494 1481 return ret;
42d4dc3f
BH
1482}
1483
1da177e4 1484/**
e00e56df 1485 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1486 *
1487 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1488 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1489 * restored. It will verify that the current freq is in sync with
1490 * what we believe it to be. This is a bit later than when it
1491 * should be, but nonethteless it's better than calling
1492 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1493 *
1494 * This function is only executed for the boot CPU. The other CPUs have not
1495 * been turned on yet.
1da177e4 1496 */
e00e56df 1497static void cpufreq_bp_resume(void)
1da177e4 1498{
e08f5f5b 1499 int ret = 0;
4bc5d341 1500
e00e56df 1501 int cpu = smp_processor_id();
3a3e9e06 1502 struct cpufreq_policy *policy;
1da177e4 1503
2d06d8c4 1504 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1505
e00e56df 1506 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1507 policy = cpufreq_cpu_get(cpu);
1508 if (!policy)
e00e56df 1509 return;
1da177e4 1510
1c3d85dd 1511 if (cpufreq_driver->resume) {
3a3e9e06 1512 ret = cpufreq_driver->resume(policy);
1da177e4
LT
1513 if (ret) {
1514 printk(KERN_ERR "cpufreq: resume failed in ->resume "
3a3e9e06 1515 "step on CPU %u\n", policy->cpu);
c9060494 1516 goto fail;
1da177e4
LT
1517 }
1518 }
1519
3a3e9e06 1520 schedule_work(&policy->update);
ce6c3997 1521
c9060494 1522fail:
3a3e9e06 1523 cpufreq_cpu_put(policy);
1da177e4
LT
1524}
1525
e00e56df
RW
1526static struct syscore_ops cpufreq_syscore_ops = {
1527 .suspend = cpufreq_bp_suspend,
1528 .resume = cpufreq_bp_resume,
1da177e4
LT
1529};
1530
9d95046e
BP
1531/**
1532 * cpufreq_get_current_driver - return current driver's name
1533 *
1534 * Return the name string of the currently loaded cpufreq driver
1535 * or NULL, if none.
1536 */
1537const char *cpufreq_get_current_driver(void)
1538{
1c3d85dd
RW
1539 if (cpufreq_driver)
1540 return cpufreq_driver->name;
1541
1542 return NULL;
9d95046e
BP
1543}
1544EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1545
1546/*********************************************************************
1547 * NOTIFIER LISTS INTERFACE *
1548 *********************************************************************/
1549
1550/**
1551 * cpufreq_register_notifier - register a driver with cpufreq
1552 * @nb: notifier function to register
1553 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1554 *
32ee8c3e 1555 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1556 * are notified about clock rate changes (once before and once after
1557 * the transition), or a list of drivers that are notified about
1558 * changes in cpufreq policy.
1559 *
1560 * This function may sleep, and has the same return conditions as
e041c683 1561 * blocking_notifier_chain_register.
1da177e4
LT
1562 */
1563int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1564{
1565 int ret;
1566
d5aaffa9
DB
1567 if (cpufreq_disabled())
1568 return -EINVAL;
1569
74212ca4
CEB
1570 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1571
1da177e4
LT
1572 switch (list) {
1573 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1574 ret = srcu_notifier_chain_register(
e041c683 1575 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1576 break;
1577 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1578 ret = blocking_notifier_chain_register(
1579 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1580 break;
1581 default:
1582 ret = -EINVAL;
1583 }
1da177e4
LT
1584
1585 return ret;
1586}
1587EXPORT_SYMBOL(cpufreq_register_notifier);
1588
1da177e4
LT
1589/**
1590 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1591 * @nb: notifier block to be unregistered
bb176f7d 1592 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1593 *
1594 * Remove a driver from the CPU frequency notifier list.
1595 *
1596 * This function may sleep, and has the same return conditions as
e041c683 1597 * blocking_notifier_chain_unregister.
1da177e4
LT
1598 */
1599int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1600{
1601 int ret;
1602
d5aaffa9
DB
1603 if (cpufreq_disabled())
1604 return -EINVAL;
1605
1da177e4
LT
1606 switch (list) {
1607 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1608 ret = srcu_notifier_chain_unregister(
e041c683 1609 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1610 break;
1611 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1612 ret = blocking_notifier_chain_unregister(
1613 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1614 break;
1615 default:
1616 ret = -EINVAL;
1617 }
1da177e4
LT
1618
1619 return ret;
1620}
1621EXPORT_SYMBOL(cpufreq_unregister_notifier);
1622
1623
1624/*********************************************************************
1625 * GOVERNORS *
1626 *********************************************************************/
1627
1da177e4
LT
1628int __cpufreq_driver_target(struct cpufreq_policy *policy,
1629 unsigned int target_freq,
1630 unsigned int relation)
1631{
1632 int retval = -EINVAL;
7249924e 1633 unsigned int old_target_freq = target_freq;
c32b6b8e 1634
a7b422cd
KRW
1635 if (cpufreq_disabled())
1636 return -ENODEV;
7c30ed53
VK
1637 if (policy->transition_ongoing)
1638 return -EBUSY;
a7b422cd 1639
7249924e
VK
1640 /* Make sure that target_freq is within supported range */
1641 if (target_freq > policy->max)
1642 target_freq = policy->max;
1643 if (target_freq < policy->min)
1644 target_freq = policy->min;
1645
1646 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1647 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1648
1649 if (target_freq == policy->cur)
1650 return 0;
1651
1c3d85dd
RW
1652 if (cpufreq_driver->target)
1653 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1654
1da177e4
LT
1655 return retval;
1656}
1657EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1658
1da177e4
LT
1659int cpufreq_driver_target(struct cpufreq_policy *policy,
1660 unsigned int target_freq,
1661 unsigned int relation)
1662{
f1829e4a 1663 int ret = -EINVAL;
1da177e4 1664
5a01f2e8 1665 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1666 goto fail;
1da177e4
LT
1667
1668 ret = __cpufreq_driver_target(policy, target_freq, relation);
1669
5a01f2e8 1670 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1671
f1829e4a 1672fail:
1da177e4
LT
1673 return ret;
1674}
1675EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1676
153d7f3f 1677/*
153d7f3f
AV
1678 * when "event" is CPUFREQ_GOV_LIMITS
1679 */
1da177e4 1680
e08f5f5b
GS
1681static int __cpufreq_governor(struct cpufreq_policy *policy,
1682 unsigned int event)
1da177e4 1683{
cc993cab 1684 int ret;
6afde10c
TR
1685
1686 /* Only must be defined when default governor is known to have latency
1687 restrictions, like e.g. conservative or ondemand.
1688 That this is the case is already ensured in Kconfig
1689 */
1690#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1691 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1692#else
1693 struct cpufreq_governor *gov = NULL;
1694#endif
1c256245
TR
1695
1696 if (policy->governor->max_transition_latency &&
1697 policy->cpuinfo.transition_latency >
1698 policy->governor->max_transition_latency) {
6afde10c
TR
1699 if (!gov)
1700 return -EINVAL;
1701 else {
1702 printk(KERN_WARNING "%s governor failed, too long"
1703 " transition latency of HW, fallback"
1704 " to %s governor\n",
1705 policy->governor->name,
1706 gov->name);
1707 policy->governor = gov;
1708 }
1c256245 1709 }
1da177e4 1710
fe492f3f
VK
1711 if (event == CPUFREQ_GOV_POLICY_INIT)
1712 if (!try_module_get(policy->governor->owner))
1713 return -EINVAL;
1da177e4 1714
2d06d8c4 1715 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1716 policy->cpu, event);
95731ebb
XC
1717
1718 mutex_lock(&cpufreq_governor_lock);
1719 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1720 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1721 mutex_unlock(&cpufreq_governor_lock);
1722 return -EBUSY;
1723 }
1724
1725 if (event == CPUFREQ_GOV_STOP)
1726 policy->governor_enabled = false;
1727 else if (event == CPUFREQ_GOV_START)
1728 policy->governor_enabled = true;
1729
1730 mutex_unlock(&cpufreq_governor_lock);
1731
1da177e4
LT
1732 ret = policy->governor->governor(policy, event);
1733
4d5dcc42
VK
1734 if (!ret) {
1735 if (event == CPUFREQ_GOV_POLICY_INIT)
1736 policy->governor->initialized++;
1737 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1738 policy->governor->initialized--;
95731ebb
XC
1739 } else {
1740 /* Restore original values */
1741 mutex_lock(&cpufreq_governor_lock);
1742 if (event == CPUFREQ_GOV_STOP)
1743 policy->governor_enabled = true;
1744 else if (event == CPUFREQ_GOV_START)
1745 policy->governor_enabled = false;
1746 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1747 }
b394058f 1748
fe492f3f
VK
1749 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1750 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1da177e4
LT
1751 module_put(policy->governor->owner);
1752
1753 return ret;
1754}
1755
1da177e4
LT
1756int cpufreq_register_governor(struct cpufreq_governor *governor)
1757{
3bcb09a3 1758 int err;
1da177e4
LT
1759
1760 if (!governor)
1761 return -EINVAL;
1762
a7b422cd
KRW
1763 if (cpufreq_disabled())
1764 return -ENODEV;
1765
3fc54d37 1766 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1767
b394058f 1768 governor->initialized = 0;
3bcb09a3
JF
1769 err = -EBUSY;
1770 if (__find_governor(governor->name) == NULL) {
1771 err = 0;
1772 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1773 }
1da177e4 1774
32ee8c3e 1775 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1776 return err;
1da177e4
LT
1777}
1778EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1779
1da177e4
LT
1780void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1781{
90e41bac
PB
1782#ifdef CONFIG_HOTPLUG_CPU
1783 int cpu;
1784#endif
1785
1da177e4
LT
1786 if (!governor)
1787 return;
1788
a7b422cd
KRW
1789 if (cpufreq_disabled())
1790 return;
1791
90e41bac
PB
1792#ifdef CONFIG_HOTPLUG_CPU
1793 for_each_present_cpu(cpu) {
1794 if (cpu_online(cpu))
1795 continue;
1796 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1797 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1798 }
1799#endif
1800
3fc54d37 1801 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1802 list_del(&governor->governor_list);
3fc54d37 1803 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1804 return;
1805}
1806EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1807
1808
1da177e4
LT
1809/*********************************************************************
1810 * POLICY INTERFACE *
1811 *********************************************************************/
1812
1813/**
1814 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1815 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1816 * is written
1da177e4
LT
1817 *
1818 * Reads the current cpufreq policy.
1819 */
1820int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1821{
1822 struct cpufreq_policy *cpu_policy;
1823 if (!policy)
1824 return -EINVAL;
1825
1826 cpu_policy = cpufreq_cpu_get(cpu);
1827 if (!cpu_policy)
1828 return -EINVAL;
1829
d5b73cd8 1830 memcpy(policy, cpu_policy, sizeof(*policy));
1da177e4
LT
1831
1832 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1833 return 0;
1834}
1835EXPORT_SYMBOL(cpufreq_get_policy);
1836
153d7f3f 1837/*
e08f5f5b
GS
1838 * data : current policy.
1839 * policy : policy to be set.
153d7f3f 1840 */
3a3e9e06
VK
1841static int __cpufreq_set_policy(struct cpufreq_policy *policy,
1842 struct cpufreq_policy *new_policy)
1da177e4 1843{
7bd353a9 1844 int ret = 0, failed = 1;
1da177e4 1845
3a3e9e06
VK
1846 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1847 new_policy->min, new_policy->max);
1da177e4 1848
d5b73cd8 1849 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1da177e4 1850
3a3e9e06 1851 if (new_policy->min > policy->max || new_policy->max < policy->min) {
9c9a43ed
MD
1852 ret = -EINVAL;
1853 goto error_out;
1854 }
1855
1da177e4 1856 /* verify the cpu speed can be set within this limit */
3a3e9e06 1857 ret = cpufreq_driver->verify(new_policy);
1da177e4
LT
1858 if (ret)
1859 goto error_out;
1860
1da177e4 1861 /* adjust if necessary - all reasons */
e041c683 1862 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1863 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
1864
1865 /* adjust if necessary - hardware incompatibility*/
e041c683 1866 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1867 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 1868
bb176f7d
VK
1869 /*
1870 * verify the cpu speed can be set within this limit, which might be
1871 * different to the first one
1872 */
3a3e9e06 1873 ret = cpufreq_driver->verify(new_policy);
e041c683 1874 if (ret)
1da177e4 1875 goto error_out;
1da177e4
LT
1876
1877 /* notification of the new policy */
e041c683 1878 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1879 CPUFREQ_NOTIFY, new_policy);
1da177e4 1880
3a3e9e06
VK
1881 policy->min = new_policy->min;
1882 policy->max = new_policy->max;
1da177e4 1883
2d06d8c4 1884 pr_debug("new min and max freqs are %u - %u kHz\n",
3a3e9e06 1885 policy->min, policy->max);
1da177e4 1886
1c3d85dd 1887 if (cpufreq_driver->setpolicy) {
3a3e9e06 1888 policy->policy = new_policy->policy;
2d06d8c4 1889 pr_debug("setting range\n");
3a3e9e06 1890 ret = cpufreq_driver->setpolicy(new_policy);
1da177e4 1891 } else {
3a3e9e06 1892 if (new_policy->governor != policy->governor) {
1da177e4 1893 /* save old, working values */
3a3e9e06 1894 struct cpufreq_governor *old_gov = policy->governor;
1da177e4 1895
2d06d8c4 1896 pr_debug("governor switch\n");
1da177e4
LT
1897
1898 /* end old governor */
3a3e9e06
VK
1899 if (policy->governor) {
1900 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1901 unlock_policy_rwsem_write(new_policy->cpu);
1902 __cpufreq_governor(policy,
7bd353a9 1903 CPUFREQ_GOV_POLICY_EXIT);
3a3e9e06 1904 lock_policy_rwsem_write(new_policy->cpu);
7bd353a9 1905 }
1da177e4
LT
1906
1907 /* start new governor */
3a3e9e06
VK
1908 policy->governor = new_policy->governor;
1909 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
1910 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
7bd353a9 1911 failed = 0;
955ef483 1912 } else {
3a3e9e06
VK
1913 unlock_policy_rwsem_write(new_policy->cpu);
1914 __cpufreq_governor(policy,
7bd353a9 1915 CPUFREQ_GOV_POLICY_EXIT);
3a3e9e06 1916 lock_policy_rwsem_write(new_policy->cpu);
955ef483 1917 }
7bd353a9
VK
1918 }
1919
1920 if (failed) {
1da177e4 1921 /* new governor failed, so re-start old one */
2d06d8c4 1922 pr_debug("starting governor %s failed\n",
3a3e9e06 1923 policy->governor->name);
1da177e4 1924 if (old_gov) {
3a3e9e06
VK
1925 policy->governor = old_gov;
1926 __cpufreq_governor(policy,
7bd353a9 1927 CPUFREQ_GOV_POLICY_INIT);
3a3e9e06 1928 __cpufreq_governor(policy,
e08f5f5b 1929 CPUFREQ_GOV_START);
1da177e4
LT
1930 }
1931 ret = -EINVAL;
1932 goto error_out;
1933 }
1934 /* might be a policy change, too, so fall through */
1935 }
2d06d8c4 1936 pr_debug("governor: change or update limits\n");
3de9bdeb 1937 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
1938 }
1939
7d5e350f 1940error_out:
1da177e4
LT
1941 return ret;
1942}
1943
1da177e4
LT
1944/**
1945 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1946 * @cpu: CPU which shall be re-evaluated
1947 *
25985edc 1948 * Useful for policy notifiers which have different necessities
1da177e4
LT
1949 * at different times.
1950 */
1951int cpufreq_update_policy(unsigned int cpu)
1952{
3a3e9e06
VK
1953 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1954 struct cpufreq_policy new_policy;
f1829e4a 1955 int ret;
1da177e4 1956
3a3e9e06 1957 if (!policy) {
f1829e4a
JL
1958 ret = -ENODEV;
1959 goto no_policy;
1960 }
1da177e4 1961
f1829e4a
JL
1962 if (unlikely(lock_policy_rwsem_write(cpu))) {
1963 ret = -EINVAL;
1964 goto fail;
1965 }
1da177e4 1966
2d06d8c4 1967 pr_debug("updating policy for CPU %u\n", cpu);
d5b73cd8 1968 memcpy(&new_policy, policy, sizeof(*policy));
3a3e9e06
VK
1969 new_policy.min = policy->user_policy.min;
1970 new_policy.max = policy->user_policy.max;
1971 new_policy.policy = policy->user_policy.policy;
1972 new_policy.governor = policy->user_policy.governor;
1da177e4 1973
bb176f7d
VK
1974 /*
1975 * BIOS might change freq behind our back
1976 * -> ask driver for current freq and notify governors about a change
1977 */
1c3d85dd 1978 if (cpufreq_driver->get) {
3a3e9e06
VK
1979 new_policy.cur = cpufreq_driver->get(cpu);
1980 if (!policy->cur) {
2d06d8c4 1981 pr_debug("Driver did not initialize current freq");
3a3e9e06 1982 policy->cur = new_policy.cur;
a85f7bd3 1983 } else {
3a3e9e06
VK
1984 if (policy->cur != new_policy.cur && cpufreq_driver->target)
1985 cpufreq_out_of_sync(cpu, policy->cur,
1986 new_policy.cur);
a85f7bd3 1987 }
0961dd0d
TR
1988 }
1989
3a3e9e06 1990 ret = __cpufreq_set_policy(policy, &new_policy);
1da177e4 1991
5a01f2e8
VP
1992 unlock_policy_rwsem_write(cpu);
1993
f1829e4a 1994fail:
3a3e9e06 1995 cpufreq_cpu_put(policy);
f1829e4a 1996no_policy:
1da177e4
LT
1997 return ret;
1998}
1999EXPORT_SYMBOL(cpufreq_update_policy);
2000
2760984f 2001static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2002 unsigned long action, void *hcpu)
2003{
2004 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2005 struct device *dev;
5302c3fb 2006 bool frozen = false;
c32b6b8e 2007
8a25a2fd
KS
2008 dev = get_cpu_device(cpu);
2009 if (dev) {
5302c3fb
SB
2010
2011 if (action & CPU_TASKS_FROZEN)
2012 frozen = true;
2013
2014 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2015 case CPU_ONLINE:
5302c3fb 2016 __cpufreq_add_dev(dev, NULL, frozen);
23d32899 2017 cpufreq_update_policy(cpu);
c32b6b8e 2018 break;
5302c3fb 2019
c32b6b8e 2020 case CPU_DOWN_PREPARE:
5302c3fb 2021 __cpufreq_remove_dev(dev, NULL, frozen);
c32b6b8e 2022 break;
5302c3fb 2023
5a01f2e8 2024 case CPU_DOWN_FAILED:
5302c3fb 2025 __cpufreq_add_dev(dev, NULL, frozen);
c32b6b8e
AR
2026 break;
2027 }
2028 }
2029 return NOTIFY_OK;
2030}
2031
9c36f746 2032static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2033 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2034};
1da177e4
LT
2035
2036/*********************************************************************
2037 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2038 *********************************************************************/
2039
2040/**
2041 * cpufreq_register_driver - register a CPU Frequency driver
2042 * @driver_data: A struct cpufreq_driver containing the values#
2043 * submitted by the CPU Frequency driver.
2044 *
bb176f7d 2045 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2046 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2047 * (and isn't unregistered in the meantime).
1da177e4
LT
2048 *
2049 */
221dee28 2050int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2051{
2052 unsigned long flags;
2053 int ret;
2054
a7b422cd
KRW
2055 if (cpufreq_disabled())
2056 return -ENODEV;
2057
1da177e4
LT
2058 if (!driver_data || !driver_data->verify || !driver_data->init ||
2059 ((!driver_data->setpolicy) && (!driver_data->target)))
2060 return -EINVAL;
2061
2d06d8c4 2062 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
2063
2064 if (driver_data->setpolicy)
2065 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2066
0d1857a1 2067 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2068 if (cpufreq_driver) {
0d1857a1 2069 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2070 return -EBUSY;
2071 }
1c3d85dd 2072 cpufreq_driver = driver_data;
0d1857a1 2073 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2074
8a25a2fd 2075 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
2076 if (ret)
2077 goto err_null_driver;
1da177e4 2078
1c3d85dd 2079 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
2080 int i;
2081 ret = -ENODEV;
2082
2083 /* check for at least one working CPU */
7a6aedfa
MT
2084 for (i = 0; i < nr_cpu_ids; i++)
2085 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 2086 ret = 0;
7a6aedfa
MT
2087 break;
2088 }
1da177e4
LT
2089
2090 /* if all ->init() calls failed, unregister */
2091 if (ret) {
2d06d8c4 2092 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 2093 driver_data->name);
8a25a2fd 2094 goto err_if_unreg;
1da177e4
LT
2095 }
2096 }
2097
8f5bc2ab 2098 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2099 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2100
8f5bc2ab 2101 return 0;
8a25a2fd
KS
2102err_if_unreg:
2103 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2104err_null_driver:
0d1857a1 2105 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2106 cpufreq_driver = NULL;
0d1857a1 2107 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2108 return ret;
1da177e4
LT
2109}
2110EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2111
1da177e4
LT
2112/**
2113 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2114 *
bb176f7d 2115 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2116 * the right to do so, i.e. if you have succeeded in initialising before!
2117 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2118 * currently not initialised.
2119 */
221dee28 2120int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2121{
2122 unsigned long flags;
2123
1c3d85dd 2124 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2125 return -EINVAL;
1da177e4 2126
2d06d8c4 2127 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2128
8a25a2fd 2129 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2130 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2131
6eed9404 2132 down_write(&cpufreq_rwsem);
0d1857a1 2133 write_lock_irqsave(&cpufreq_driver_lock, flags);
6eed9404 2134
1c3d85dd 2135 cpufreq_driver = NULL;
6eed9404 2136
0d1857a1 2137 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
6eed9404 2138 up_write(&cpufreq_rwsem);
1da177e4
LT
2139
2140 return 0;
2141}
2142EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2143
2144static int __init cpufreq_core_init(void)
2145{
2146 int cpu;
2147
a7b422cd
KRW
2148 if (cpufreq_disabled())
2149 return -ENODEV;
2150
5a01f2e8 2151 for_each_possible_cpu(cpu) {
f1625066 2152 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
2153 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2154 }
8aa84ad8 2155
2361be23 2156 cpufreq_global_kobject = kobject_create();
8aa84ad8 2157 BUG_ON(!cpufreq_global_kobject);
e00e56df 2158 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2159
5a01f2e8
VP
2160 return 0;
2161}
5a01f2e8 2162core_initcall(cpufreq_core_init);
This page took 0.795659 seconds and 5 git commands to generate.