cpufreq: suspend governors on system suspend/hibernate
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
5a87182a 29#include <linux/suspend.h>
e00e56df 30#include <linux/syscore_ops.h>
5ff0a268 31#include <linux/tick.h>
6f4f2723
TR
32#include <trace/events/power.h>
33
1da177e4 34/**
cd878479 35 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
36 * level driver of CPUFreq support, and its spinlock. This lock
37 * also protects the cpufreq_cpu_data array.
38 */
1c3d85dd 39static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
8414809c 41static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
bb176f7d
VK
42static DEFINE_RWLOCK(cpufreq_driver_lock);
43static DEFINE_MUTEX(cpufreq_governor_lock);
c88a1f8b 44static LIST_HEAD(cpufreq_policy_list);
bb176f7d 45
084f3493
TR
46#ifdef CONFIG_HOTPLUG_CPU
47/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 48static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 49#endif
1da177e4 50
5a87182a
VK
51/* Flag to suspend/resume CPUFreq governors */
52static bool cpufreq_suspended;
53
9c0ebcf7
VK
54static inline bool has_target(void)
55{
56 return cpufreq_driver->target_index || cpufreq_driver->target;
57}
58
6eed9404
VK
59/*
60 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
61 * sections
62 */
63static DECLARE_RWSEM(cpufreq_rwsem);
64
1da177e4 65/* internal prototypes */
29464f28
DJ
66static int __cpufreq_governor(struct cpufreq_policy *policy,
67 unsigned int event);
5a01f2e8 68static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 69static void handle_update(struct work_struct *work);
1da177e4
LT
70
71/**
32ee8c3e
DJ
72 * Two notifier lists: the "policy" list is involved in the
73 * validation process for a new CPU frequency policy; the
1da177e4
LT
74 * "transition" list for kernel code that needs to handle
75 * changes to devices when the CPU clock speed changes.
76 * The mutex locks both lists.
77 */
e041c683 78static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 79static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 80
74212ca4 81static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
82static int __init init_cpufreq_transition_notifier_list(void)
83{
84 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 85 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
86 return 0;
87}
b3438f82 88pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 89
a7b422cd 90static int off __read_mostly;
da584455 91static int cpufreq_disabled(void)
a7b422cd
KRW
92{
93 return off;
94}
95void disable_cpufreq(void)
96{
97 off = 1;
98}
1da177e4 99static LIST_HEAD(cpufreq_governor_list);
29464f28 100static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 101
4d5dcc42
VK
102bool have_governor_per_policy(void)
103{
0b981e70 104 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
4d5dcc42 105}
3f869d6d 106EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 107
944e9a03
VK
108struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
109{
110 if (have_governor_per_policy())
111 return &policy->kobj;
112 else
113 return cpufreq_global_kobject;
114}
115EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
116
72a4ce34
VK
117static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
118{
119 u64 idle_time;
120 u64 cur_wall_time;
121 u64 busy_time;
122
123 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
124
125 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
128 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
129 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
130 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
131
132 idle_time = cur_wall_time - busy_time;
133 if (wall)
134 *wall = cputime_to_usecs(cur_wall_time);
135
136 return cputime_to_usecs(idle_time);
137}
138
139u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
140{
141 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
142
143 if (idle_time == -1ULL)
144 return get_cpu_idle_time_jiffy(cpu, wall);
145 else if (!io_busy)
146 idle_time += get_cpu_iowait_time_us(cpu, wall);
147
148 return idle_time;
149}
150EXPORT_SYMBOL_GPL(get_cpu_idle_time);
151
70e9e778
VK
152/*
153 * This is a generic cpufreq init() routine which can be used by cpufreq
154 * drivers of SMP systems. It will do following:
155 * - validate & show freq table passed
156 * - set policies transition latency
157 * - policy->cpus with all possible CPUs
158 */
159int cpufreq_generic_init(struct cpufreq_policy *policy,
160 struct cpufreq_frequency_table *table,
161 unsigned int transition_latency)
162{
163 int ret;
164
165 ret = cpufreq_table_validate_and_show(policy, table);
166 if (ret) {
167 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
168 return ret;
169 }
170
171 policy->cpuinfo.transition_latency = transition_latency;
172
173 /*
174 * The driver only supports the SMP configuartion where all processors
175 * share the clock and voltage and clock.
176 */
177 cpumask_setall(policy->cpus);
178
179 return 0;
180}
181EXPORT_SYMBOL_GPL(cpufreq_generic_init);
182
6eed9404 183struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4 184{
6eed9404 185 struct cpufreq_policy *policy = NULL;
1da177e4
LT
186 unsigned long flags;
187
6eed9404
VK
188 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
189 return NULL;
190
191 if (!down_read_trylock(&cpufreq_rwsem))
192 return NULL;
1da177e4
LT
193
194 /* get the cpufreq driver */
1c3d85dd 195 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 196
6eed9404
VK
197 if (cpufreq_driver) {
198 /* get the CPU */
199 policy = per_cpu(cpufreq_cpu_data, cpu);
200 if (policy)
201 kobject_get(&policy->kobj);
202 }
1da177e4 203
6eed9404 204 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 205
3a3e9e06 206 if (!policy)
6eed9404 207 up_read(&cpufreq_rwsem);
1da177e4 208
3a3e9e06 209 return policy;
a9144436 210}
1da177e4
LT
211EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
212
3a3e9e06 213void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 214{
d5aaffa9
DB
215 if (cpufreq_disabled())
216 return;
217
6eed9404
VK
218 kobject_put(&policy->kobj);
219 up_read(&cpufreq_rwsem);
1da177e4
LT
220}
221EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
222
1da177e4
LT
223/*********************************************************************
224 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
225 *********************************************************************/
226
227/**
228 * adjust_jiffies - adjust the system "loops_per_jiffy"
229 *
230 * This function alters the system "loops_per_jiffy" for the clock
231 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 232 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
233 * per-CPU loops_per_jiffy value wherever possible.
234 */
235#ifndef CONFIG_SMP
236static unsigned long l_p_j_ref;
bb176f7d 237static unsigned int l_p_j_ref_freq;
1da177e4 238
858119e1 239static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
240{
241 if (ci->flags & CPUFREQ_CONST_LOOPS)
242 return;
243
244 if (!l_p_j_ref_freq) {
245 l_p_j_ref = loops_per_jiffy;
246 l_p_j_ref_freq = ci->old;
2d06d8c4 247 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 248 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 249 }
bb176f7d 250 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 251 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
252 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
253 ci->new);
2d06d8c4 254 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 255 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
256 }
257}
258#else
e08f5f5b
GS
259static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
260{
261 return;
262}
1da177e4
LT
263#endif
264
0956df9c 265static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 266 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
267{
268 BUG_ON(irqs_disabled());
269
d5aaffa9
DB
270 if (cpufreq_disabled())
271 return;
272
1c3d85dd 273 freqs->flags = cpufreq_driver->flags;
2d06d8c4 274 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 275 state, freqs->new);
1da177e4 276
1da177e4 277 switch (state) {
e4472cb3 278
1da177e4 279 case CPUFREQ_PRECHANGE:
32ee8c3e 280 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
281 * which is not equal to what the cpufreq core thinks is
282 * "old frequency".
1da177e4 283 */
1c3d85dd 284 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
285 if ((policy) && (policy->cpu == freqs->cpu) &&
286 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 287 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
288 " %u, cpufreq assumed %u kHz.\n",
289 freqs->old, policy->cur);
290 freqs->old = policy->cur;
1da177e4
LT
291 }
292 }
b4dfdbb3 293 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 294 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
295 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
296 break;
e4472cb3 297
1da177e4
LT
298 case CPUFREQ_POSTCHANGE:
299 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 300 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 301 (unsigned long)freqs->cpu);
25e41933 302 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 303 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 304 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
305 if (likely(policy) && likely(policy->cpu == freqs->cpu))
306 policy->cur = freqs->new;
1da177e4
LT
307 break;
308 }
1da177e4 309}
bb176f7d 310
b43a7ffb
VK
311/**
312 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
313 * on frequency transition.
314 *
315 * This function calls the transition notifiers and the "adjust_jiffies"
316 * function. It is called twice on all CPU frequency changes that have
317 * external effects.
318 */
319void cpufreq_notify_transition(struct cpufreq_policy *policy,
320 struct cpufreq_freqs *freqs, unsigned int state)
321{
322 for_each_cpu(freqs->cpu, policy->cpus)
323 __cpufreq_notify_transition(policy, freqs, state);
324}
1da177e4
LT
325EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
326
327
1da177e4
LT
328/*********************************************************************
329 * SYSFS INTERFACE *
330 *********************************************************************/
331
3bcb09a3
JF
332static struct cpufreq_governor *__find_governor(const char *str_governor)
333{
334 struct cpufreq_governor *t;
335
336 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 337 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
338 return t;
339
340 return NULL;
341}
342
1da177e4
LT
343/**
344 * cpufreq_parse_governor - parse a governor string
345 */
905d77cd 346static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
347 struct cpufreq_governor **governor)
348{
3bcb09a3 349 int err = -EINVAL;
1c3d85dd
RW
350
351 if (!cpufreq_driver)
3bcb09a3
JF
352 goto out;
353
1c3d85dd 354 if (cpufreq_driver->setpolicy) {
1da177e4
LT
355 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
356 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 357 err = 0;
e08f5f5b
GS
358 } else if (!strnicmp(str_governor, "powersave",
359 CPUFREQ_NAME_LEN)) {
1da177e4 360 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 361 err = 0;
1da177e4 362 }
9c0ebcf7 363 } else if (has_target()) {
1da177e4 364 struct cpufreq_governor *t;
3bcb09a3 365
3fc54d37 366 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
367
368 t = __find_governor(str_governor);
369
ea714970 370 if (t == NULL) {
1a8e1463 371 int ret;
ea714970 372
1a8e1463
KC
373 mutex_unlock(&cpufreq_governor_mutex);
374 ret = request_module("cpufreq_%s", str_governor);
375 mutex_lock(&cpufreq_governor_mutex);
ea714970 376
1a8e1463
KC
377 if (ret == 0)
378 t = __find_governor(str_governor);
ea714970
JF
379 }
380
3bcb09a3
JF
381 if (t != NULL) {
382 *governor = t;
383 err = 0;
1da177e4 384 }
3bcb09a3 385
3fc54d37 386 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 387 }
29464f28 388out:
3bcb09a3 389 return err;
1da177e4 390}
1da177e4 391
1da177e4 392/**
e08f5f5b
GS
393 * cpufreq_per_cpu_attr_read() / show_##file_name() -
394 * print out cpufreq information
1da177e4
LT
395 *
396 * Write out information from cpufreq_driver->policy[cpu]; object must be
397 * "unsigned int".
398 */
399
32ee8c3e
DJ
400#define show_one(file_name, object) \
401static ssize_t show_##file_name \
905d77cd 402(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 403{ \
29464f28 404 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
405}
406
407show_one(cpuinfo_min_freq, cpuinfo.min_freq);
408show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 409show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
410show_one(scaling_min_freq, min);
411show_one(scaling_max_freq, max);
412show_one(scaling_cur_freq, cur);
413
037ce839 414static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 415 struct cpufreq_policy *new_policy);
7970e08b 416
1da177e4
LT
417/**
418 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
419 */
420#define store_one(file_name, object) \
421static ssize_t store_##file_name \
905d77cd 422(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 423{ \
5136fa56 424 int ret; \
1da177e4
LT
425 struct cpufreq_policy new_policy; \
426 \
427 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
428 if (ret) \
429 return -EINVAL; \
430 \
29464f28 431 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
432 if (ret != 1) \
433 return -EINVAL; \
434 \
037ce839 435 ret = cpufreq_set_policy(policy, &new_policy); \
7970e08b 436 policy->user_policy.object = policy->object; \
1da177e4
LT
437 \
438 return ret ? ret : count; \
439}
440
29464f28
DJ
441store_one(scaling_min_freq, min);
442store_one(scaling_max_freq, max);
1da177e4
LT
443
444/**
445 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
446 */
905d77cd
DJ
447static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
448 char *buf)
1da177e4 449{
5a01f2e8 450 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
451 if (!cur_freq)
452 return sprintf(buf, "<unknown>");
453 return sprintf(buf, "%u\n", cur_freq);
454}
455
1da177e4
LT
456/**
457 * show_scaling_governor - show the current policy for the specified CPU
458 */
905d77cd 459static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 460{
29464f28 461 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
462 return sprintf(buf, "powersave\n");
463 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
464 return sprintf(buf, "performance\n");
465 else if (policy->governor)
4b972f0b 466 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 467 policy->governor->name);
1da177e4
LT
468 return -EINVAL;
469}
470
1da177e4
LT
471/**
472 * store_scaling_governor - store policy for the specified CPU
473 */
905d77cd
DJ
474static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
475 const char *buf, size_t count)
1da177e4 476{
5136fa56 477 int ret;
1da177e4
LT
478 char str_governor[16];
479 struct cpufreq_policy new_policy;
480
481 ret = cpufreq_get_policy(&new_policy, policy->cpu);
482 if (ret)
483 return ret;
484
29464f28 485 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
486 if (ret != 1)
487 return -EINVAL;
488
e08f5f5b
GS
489 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
490 &new_policy.governor))
1da177e4
LT
491 return -EINVAL;
492
037ce839 493 ret = cpufreq_set_policy(policy, &new_policy);
7970e08b
TR
494
495 policy->user_policy.policy = policy->policy;
496 policy->user_policy.governor = policy->governor;
7970e08b 497
e08f5f5b
GS
498 if (ret)
499 return ret;
500 else
501 return count;
1da177e4
LT
502}
503
504/**
505 * show_scaling_driver - show the cpufreq driver currently loaded
506 */
905d77cd 507static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 508{
1c3d85dd 509 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
510}
511
512/**
513 * show_scaling_available_governors - show the available CPUfreq governors
514 */
905d77cd
DJ
515static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
516 char *buf)
1da177e4
LT
517{
518 ssize_t i = 0;
519 struct cpufreq_governor *t;
520
9c0ebcf7 521 if (!has_target()) {
1da177e4
LT
522 i += sprintf(buf, "performance powersave");
523 goto out;
524 }
525
526 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
527 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
528 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 529 goto out;
4b972f0b 530 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 531 }
7d5e350f 532out:
1da177e4
LT
533 i += sprintf(&buf[i], "\n");
534 return i;
535}
e8628dd0 536
f4fd3797 537ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
538{
539 ssize_t i = 0;
540 unsigned int cpu;
541
835481d9 542 for_each_cpu(cpu, mask) {
1da177e4
LT
543 if (i)
544 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
545 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
546 if (i >= (PAGE_SIZE - 5))
29464f28 547 break;
1da177e4
LT
548 }
549 i += sprintf(&buf[i], "\n");
550 return i;
551}
f4fd3797 552EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 553
e8628dd0
DW
554/**
555 * show_related_cpus - show the CPUs affected by each transition even if
556 * hw coordination is in use
557 */
558static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
559{
f4fd3797 560 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
561}
562
563/**
564 * show_affected_cpus - show the CPUs affected by each transition
565 */
566static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
567{
f4fd3797 568 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
569}
570
9e76988e 571static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 572 const char *buf, size_t count)
9e76988e
VP
573{
574 unsigned int freq = 0;
575 unsigned int ret;
576
879000f9 577 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
578 return -EINVAL;
579
580 ret = sscanf(buf, "%u", &freq);
581 if (ret != 1)
582 return -EINVAL;
583
584 policy->governor->store_setspeed(policy, freq);
585
586 return count;
587}
588
589static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
590{
879000f9 591 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
592 return sprintf(buf, "<unsupported>\n");
593
594 return policy->governor->show_setspeed(policy, buf);
595}
1da177e4 596
e2f74f35 597/**
8bf1ac72 598 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
599 */
600static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
601{
602 unsigned int limit;
603 int ret;
1c3d85dd
RW
604 if (cpufreq_driver->bios_limit) {
605 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
606 if (!ret)
607 return sprintf(buf, "%u\n", limit);
608 }
609 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
610}
611
6dad2a29
BP
612cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
613cpufreq_freq_attr_ro(cpuinfo_min_freq);
614cpufreq_freq_attr_ro(cpuinfo_max_freq);
615cpufreq_freq_attr_ro(cpuinfo_transition_latency);
616cpufreq_freq_attr_ro(scaling_available_governors);
617cpufreq_freq_attr_ro(scaling_driver);
618cpufreq_freq_attr_ro(scaling_cur_freq);
619cpufreq_freq_attr_ro(bios_limit);
620cpufreq_freq_attr_ro(related_cpus);
621cpufreq_freq_attr_ro(affected_cpus);
622cpufreq_freq_attr_rw(scaling_min_freq);
623cpufreq_freq_attr_rw(scaling_max_freq);
624cpufreq_freq_attr_rw(scaling_governor);
625cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 626
905d77cd 627static struct attribute *default_attrs[] = {
1da177e4
LT
628 &cpuinfo_min_freq.attr,
629 &cpuinfo_max_freq.attr,
ed129784 630 &cpuinfo_transition_latency.attr,
1da177e4
LT
631 &scaling_min_freq.attr,
632 &scaling_max_freq.attr,
633 &affected_cpus.attr,
e8628dd0 634 &related_cpus.attr,
1da177e4
LT
635 &scaling_governor.attr,
636 &scaling_driver.attr,
637 &scaling_available_governors.attr,
9e76988e 638 &scaling_setspeed.attr,
1da177e4
LT
639 NULL
640};
641
29464f28
DJ
642#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
643#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 644
29464f28 645static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 646{
905d77cd
DJ
647 struct cpufreq_policy *policy = to_policy(kobj);
648 struct freq_attr *fattr = to_attr(attr);
1b750e3b 649 ssize_t ret;
6eed9404
VK
650
651 if (!down_read_trylock(&cpufreq_rwsem))
1b750e3b 652 return -EINVAL;
5a01f2e8 653
ad7722da 654 down_read(&policy->rwsem);
5a01f2e8 655
e08f5f5b
GS
656 if (fattr->show)
657 ret = fattr->show(policy, buf);
658 else
659 ret = -EIO;
660
ad7722da 661 up_read(&policy->rwsem);
6eed9404 662 up_read(&cpufreq_rwsem);
1b750e3b 663
1da177e4
LT
664 return ret;
665}
666
905d77cd
DJ
667static ssize_t store(struct kobject *kobj, struct attribute *attr,
668 const char *buf, size_t count)
1da177e4 669{
905d77cd
DJ
670 struct cpufreq_policy *policy = to_policy(kobj);
671 struct freq_attr *fattr = to_attr(attr);
a07530b4 672 ssize_t ret = -EINVAL;
6eed9404 673
4f750c93
SB
674 get_online_cpus();
675
676 if (!cpu_online(policy->cpu))
677 goto unlock;
678
6eed9404 679 if (!down_read_trylock(&cpufreq_rwsem))
4f750c93 680 goto unlock;
5a01f2e8 681
ad7722da 682 down_write(&policy->rwsem);
5a01f2e8 683
e08f5f5b
GS
684 if (fattr->store)
685 ret = fattr->store(policy, buf, count);
686 else
687 ret = -EIO;
688
ad7722da 689 up_write(&policy->rwsem);
6eed9404 690
6eed9404 691 up_read(&cpufreq_rwsem);
4f750c93
SB
692unlock:
693 put_online_cpus();
694
1da177e4
LT
695 return ret;
696}
697
905d77cd 698static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 699{
905d77cd 700 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 701 pr_debug("last reference is dropped\n");
1da177e4
LT
702 complete(&policy->kobj_unregister);
703}
704
52cf25d0 705static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
706 .show = show,
707 .store = store,
708};
709
710static struct kobj_type ktype_cpufreq = {
711 .sysfs_ops = &sysfs_ops,
712 .default_attrs = default_attrs,
713 .release = cpufreq_sysfs_release,
714};
715
2361be23
VK
716struct kobject *cpufreq_global_kobject;
717EXPORT_SYMBOL(cpufreq_global_kobject);
718
719static int cpufreq_global_kobject_usage;
720
721int cpufreq_get_global_kobject(void)
722{
723 if (!cpufreq_global_kobject_usage++)
724 return kobject_add(cpufreq_global_kobject,
725 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
726
727 return 0;
728}
729EXPORT_SYMBOL(cpufreq_get_global_kobject);
730
731void cpufreq_put_global_kobject(void)
732{
733 if (!--cpufreq_global_kobject_usage)
734 kobject_del(cpufreq_global_kobject);
735}
736EXPORT_SYMBOL(cpufreq_put_global_kobject);
737
738int cpufreq_sysfs_create_file(const struct attribute *attr)
739{
740 int ret = cpufreq_get_global_kobject();
741
742 if (!ret) {
743 ret = sysfs_create_file(cpufreq_global_kobject, attr);
744 if (ret)
745 cpufreq_put_global_kobject();
746 }
747
748 return ret;
749}
750EXPORT_SYMBOL(cpufreq_sysfs_create_file);
751
752void cpufreq_sysfs_remove_file(const struct attribute *attr)
753{
754 sysfs_remove_file(cpufreq_global_kobject, attr);
755 cpufreq_put_global_kobject();
756}
757EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
758
19d6f7ec 759/* symlink affected CPUs */
308b60e7 760static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
761{
762 unsigned int j;
763 int ret = 0;
764
765 for_each_cpu(j, policy->cpus) {
8a25a2fd 766 struct device *cpu_dev;
19d6f7ec 767
308b60e7 768 if (j == policy->cpu)
19d6f7ec 769 continue;
19d6f7ec 770
e8fdde10 771 pr_debug("Adding link for CPU: %u\n", j);
8a25a2fd
KS
772 cpu_dev = get_cpu_device(j);
773 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec 774 "cpufreq");
71c3461e
RW
775 if (ret)
776 break;
19d6f7ec
DJ
777 }
778 return ret;
779}
780
308b60e7 781static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 782 struct device *dev)
909a694e
DJ
783{
784 struct freq_attr **drv_attr;
909a694e 785 int ret = 0;
909a694e
DJ
786
787 /* prepare interface data */
788 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 789 &dev->kobj, "cpufreq");
909a694e
DJ
790 if (ret)
791 return ret;
792
793 /* set up files for this cpu device */
1c3d85dd 794 drv_attr = cpufreq_driver->attr;
909a694e
DJ
795 while ((drv_attr) && (*drv_attr)) {
796 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
797 if (ret)
1c3d85dd 798 goto err_out_kobj_put;
909a694e
DJ
799 drv_attr++;
800 }
1c3d85dd 801 if (cpufreq_driver->get) {
909a694e
DJ
802 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
803 if (ret)
1c3d85dd 804 goto err_out_kobj_put;
909a694e 805 }
9c0ebcf7 806 if (has_target()) {
909a694e
DJ
807 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
808 if (ret)
1c3d85dd 809 goto err_out_kobj_put;
909a694e 810 }
1c3d85dd 811 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
812 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
813 if (ret)
1c3d85dd 814 goto err_out_kobj_put;
e2f74f35 815 }
909a694e 816
308b60e7 817 ret = cpufreq_add_dev_symlink(policy);
ecf7e461
DJ
818 if (ret)
819 goto err_out_kobj_put;
820
e18f1682
SB
821 return ret;
822
823err_out_kobj_put:
824 kobject_put(&policy->kobj);
825 wait_for_completion(&policy->kobj_unregister);
826 return ret;
827}
828
829static void cpufreq_init_policy(struct cpufreq_policy *policy)
830{
831 struct cpufreq_policy new_policy;
832 int ret = 0;
833
d5b73cd8 834 memcpy(&new_policy, policy, sizeof(*policy));
037ce839 835 /* assure that the starting sequence is run in cpufreq_set_policy */
ecf7e461
DJ
836 policy->governor = NULL;
837
838 /* set default policy */
037ce839 839 ret = cpufreq_set_policy(policy, &new_policy);
ecf7e461
DJ
840 policy->user_policy.policy = policy->policy;
841 policy->user_policy.governor = policy->governor;
842
843 if (ret) {
2d06d8c4 844 pr_debug("setting policy failed\n");
1c3d85dd
RW
845 if (cpufreq_driver->exit)
846 cpufreq_driver->exit(policy);
ecf7e461 847 }
909a694e
DJ
848}
849
fcf80582 850#ifdef CONFIG_HOTPLUG_CPU
d8d3b471
VK
851static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
852 unsigned int cpu, struct device *dev,
853 bool frozen)
fcf80582 854{
9c0ebcf7 855 int ret = 0;
fcf80582
VK
856 unsigned long flags;
857
9c0ebcf7 858 if (has_target()) {
3de9bdeb
VK
859 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
860 if (ret) {
861 pr_err("%s: Failed to stop governor\n", __func__);
862 return ret;
863 }
864 }
fcf80582 865
ad7722da 866 down_write(&policy->rwsem);
2eaa3e2d 867
0d1857a1 868 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 869
fcf80582
VK
870 cpumask_set_cpu(cpu, policy->cpus);
871 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 872 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 873
ad7722da 874 up_write(&policy->rwsem);
2eaa3e2d 875
9c0ebcf7 876 if (has_target()) {
3de9bdeb
VK
877 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
878 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
879 pr_err("%s: Failed to start governor\n", __func__);
880 return ret;
881 }
820c6ca2 882 }
fcf80582 883
a82fab29 884 /* Don't touch sysfs links during light-weight init */
71c3461e
RW
885 if (!frozen)
886 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
a82fab29
SB
887
888 return ret;
fcf80582
VK
889}
890#endif
1da177e4 891
8414809c
SB
892static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
893{
894 struct cpufreq_policy *policy;
895 unsigned long flags;
896
44871c9c 897 read_lock_irqsave(&cpufreq_driver_lock, flags);
8414809c
SB
898
899 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
900
44871c9c 901 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
8414809c
SB
902
903 return policy;
904}
905
e9698cc5
SB
906static struct cpufreq_policy *cpufreq_policy_alloc(void)
907{
908 struct cpufreq_policy *policy;
909
910 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
911 if (!policy)
912 return NULL;
913
914 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
915 goto err_free_policy;
916
917 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
918 goto err_free_cpumask;
919
c88a1f8b 920 INIT_LIST_HEAD(&policy->policy_list);
ad7722da 921 init_rwsem(&policy->rwsem);
922
e9698cc5
SB
923 return policy;
924
925err_free_cpumask:
926 free_cpumask_var(policy->cpus);
927err_free_policy:
928 kfree(policy);
929
930 return NULL;
931}
932
933static void cpufreq_policy_free(struct cpufreq_policy *policy)
934{
935 free_cpumask_var(policy->related_cpus);
936 free_cpumask_var(policy->cpus);
937 kfree(policy);
938}
939
0d66b91e
SB
940static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
941{
99ec899e 942 if (WARN_ON(cpu == policy->cpu))
cb38ed5c
SB
943 return;
944
ad7722da 945 down_write(&policy->rwsem);
8efd5765 946
0d66b91e
SB
947 policy->last_cpu = policy->cpu;
948 policy->cpu = cpu;
949
ad7722da 950 up_write(&policy->rwsem);
8efd5765 951
0d66b91e 952 cpufreq_frequency_table_update_policy_cpu(policy);
0d66b91e
SB
953 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
954 CPUFREQ_UPDATE_POLICY_CPU, policy);
955}
956
a82fab29
SB
957static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
958 bool frozen)
1da177e4 959{
fcf80582 960 unsigned int j, cpu = dev->id;
65922465 961 int ret = -ENOMEM;
1da177e4 962 struct cpufreq_policy *policy;
1da177e4 963 unsigned long flags;
90e41bac 964#ifdef CONFIG_HOTPLUG_CPU
1b274294 965 struct cpufreq_policy *tpolicy;
fcf80582 966 struct cpufreq_governor *gov;
90e41bac 967#endif
1da177e4 968
c32b6b8e
AR
969 if (cpu_is_offline(cpu))
970 return 0;
971
2d06d8c4 972 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
973
974#ifdef CONFIG_SMP
975 /* check whether a different CPU already registered this
976 * CPU because it is in the same boat. */
977 policy = cpufreq_cpu_get(cpu);
978 if (unlikely(policy)) {
8ff69732 979 cpufreq_cpu_put(policy);
1da177e4
LT
980 return 0;
981 }
5025d628 982#endif
fcf80582 983
6eed9404
VK
984 if (!down_read_trylock(&cpufreq_rwsem))
985 return 0;
986
fcf80582
VK
987#ifdef CONFIG_HOTPLUG_CPU
988 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 989 read_lock_irqsave(&cpufreq_driver_lock, flags);
1b274294
VK
990 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
991 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
0d1857a1 992 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1b274294 993 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
6eed9404
VK
994 up_read(&cpufreq_rwsem);
995 return ret;
2eaa3e2d 996 }
fcf80582 997 }
0d1857a1 998 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
999#endif
1000
8414809c
SB
1001 if (frozen)
1002 /* Restore the saved policy when doing light-weight init */
1003 policy = cpufreq_policy_restore(cpu);
1004 else
1005 policy = cpufreq_policy_alloc();
1006
059019a3 1007 if (!policy)
1da177e4 1008 goto nomem_out;
059019a3 1009
0d66b91e
SB
1010
1011 /*
1012 * In the resume path, since we restore a saved policy, the assignment
1013 * to policy->cpu is like an update of the existing policy, rather than
1014 * the creation of a brand new one. So we need to perform this update
1015 * by invoking update_policy_cpu().
1016 */
1017 if (frozen && cpu != policy->cpu)
1018 update_policy_cpu(policy, cpu);
1019 else
1020 policy->cpu = cpu;
1021
65922465 1022 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 1023 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1024
1da177e4 1025 init_completion(&policy->kobj_unregister);
65f27f38 1026 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
1027
1028 /* call driver. From then on the cpufreq must be able
1029 * to accept all calls to ->verify and ->setpolicy for this CPU
1030 */
1c3d85dd 1031 ret = cpufreq_driver->init(policy);
1da177e4 1032 if (ret) {
2d06d8c4 1033 pr_debug("initialization failed\n");
2eaa3e2d 1034 goto err_set_policy_cpu;
1da177e4 1035 }
643ae6e8 1036
da60ce9f
VK
1037 if (cpufreq_driver->get) {
1038 policy->cur = cpufreq_driver->get(policy->cpu);
1039 if (!policy->cur) {
1040 pr_err("%s: ->get() failed\n", __func__);
1041 goto err_get_freq;
1042 }
1043 }
1044
fcf80582
VK
1045 /* related cpus should atleast have policy->cpus */
1046 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1047
643ae6e8
VK
1048 /*
1049 * affected cpus must always be the one, which are online. We aren't
1050 * managing offline cpus here.
1051 */
1052 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1053
187d9f4e
MC
1054 policy->user_policy.min = policy->min;
1055 policy->user_policy.max = policy->max;
1da177e4 1056
a1531acd
TR
1057 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1058 CPUFREQ_START, policy);
1059
fcf80582
VK
1060#ifdef CONFIG_HOTPLUG_CPU
1061 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1062 if (gov) {
1063 policy->governor = gov;
1064 pr_debug("Restoring governor %s for cpu %d\n",
1065 policy->governor->name, cpu);
4bfa042c 1066 }
fcf80582 1067#endif
1da177e4 1068
e18f1682 1069 write_lock_irqsave(&cpufreq_driver_lock, flags);
474deff7 1070 for_each_cpu(j, policy->cpus)
e18f1682 1071 per_cpu(cpufreq_cpu_data, j) = policy;
e18f1682
SB
1072 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1073
a82fab29 1074 if (!frozen) {
308b60e7 1075 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1076 if (ret)
1077 goto err_out_unregister;
1078 }
8ff69732 1079
9515f4d6
VK
1080 write_lock_irqsave(&cpufreq_driver_lock, flags);
1081 list_add(&policy->policy_list, &cpufreq_policy_list);
1082 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1083
e18f1682
SB
1084 cpufreq_init_policy(policy);
1085
038c5b3e 1086 kobject_uevent(&policy->kobj, KOBJ_ADD);
6eed9404
VK
1087 up_read(&cpufreq_rwsem);
1088
2d06d8c4 1089 pr_debug("initialization complete\n");
87c32271 1090
1da177e4
LT
1091 return 0;
1092
1da177e4 1093err_out_unregister:
0d1857a1 1094 write_lock_irqsave(&cpufreq_driver_lock, flags);
474deff7 1095 for_each_cpu(j, policy->cpus)
7a6aedfa 1096 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1097 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1098
da60ce9f
VK
1099err_get_freq:
1100 if (cpufreq_driver->exit)
1101 cpufreq_driver->exit(policy);
2eaa3e2d 1102err_set_policy_cpu:
e9698cc5 1103 cpufreq_policy_free(policy);
1da177e4 1104nomem_out:
6eed9404
VK
1105 up_read(&cpufreq_rwsem);
1106
1da177e4
LT
1107 return ret;
1108}
1109
a82fab29
SB
1110/**
1111 * cpufreq_add_dev - add a CPU device
1112 *
1113 * Adds the cpufreq interface for a CPU device.
1114 *
1115 * The Oracle says: try running cpufreq registration/unregistration concurrently
1116 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1117 * mess up, but more thorough testing is needed. - Mathieu
1118 */
1119static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1120{
1121 return __cpufreq_add_dev(dev, sif, false);
1122}
1123
3a3e9e06 1124static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
a82fab29 1125 unsigned int old_cpu, bool frozen)
f9ba680d
SB
1126{
1127 struct device *cpu_dev;
f9ba680d
SB
1128 int ret;
1129
1130 /* first sibling now owns the new sysfs dir */
9c8f1ee4 1131 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
a82fab29
SB
1132
1133 /* Don't touch sysfs files during light-weight tear-down */
1134 if (frozen)
1135 return cpu_dev->id;
1136
f9ba680d 1137 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
3a3e9e06 1138 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
f9ba680d
SB
1139 if (ret) {
1140 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1141
ad7722da 1142 down_write(&policy->rwsem);
3a3e9e06 1143 cpumask_set_cpu(old_cpu, policy->cpus);
ad7722da 1144 up_write(&policy->rwsem);
f9ba680d 1145
3a3e9e06 1146 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
f9ba680d
SB
1147 "cpufreq");
1148
1149 return -EINVAL;
1150 }
1151
1152 return cpu_dev->id;
1153}
1154
cedb70af
SB
1155static int __cpufreq_remove_dev_prepare(struct device *dev,
1156 struct subsys_interface *sif,
1157 bool frozen)
1da177e4 1158{
f9ba680d 1159 unsigned int cpu = dev->id, cpus;
3de9bdeb 1160 int new_cpu, ret;
1da177e4 1161 unsigned long flags;
3a3e9e06 1162 struct cpufreq_policy *policy;
1da177e4 1163
b8eed8af 1164 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1165
0d1857a1 1166 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1167
3a3e9e06 1168 policy = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d 1169
8414809c
SB
1170 /* Save the policy somewhere when doing a light-weight tear-down */
1171 if (frozen)
3a3e9e06 1172 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
8414809c 1173
0d1857a1 1174 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1175
3a3e9e06 1176 if (!policy) {
b8eed8af 1177 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1178 return -EINVAL;
1179 }
1da177e4 1180
9c0ebcf7 1181 if (has_target()) {
3de9bdeb
VK
1182 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1183 if (ret) {
1184 pr_err("%s: Failed to stop governor\n", __func__);
1185 return ret;
1186 }
1187 }
1da177e4 1188
084f3493 1189#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1190 if (!cpufreq_driver->setpolicy)
fa69e33f 1191 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
3a3e9e06 1192 policy->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1193#endif
1194
ad7722da 1195 down_read(&policy->rwsem);
3a3e9e06 1196 cpus = cpumask_weight(policy->cpus);
ad7722da 1197 up_read(&policy->rwsem);
084f3493 1198
61173f25
SB
1199 if (cpu != policy->cpu) {
1200 if (!frozen)
1201 sysfs_remove_link(&dev->kobj, "cpufreq");
73bf0fc2 1202 } else if (cpus > 1) {
3a3e9e06 1203 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
f9ba680d 1204 if (new_cpu >= 0) {
3a3e9e06 1205 update_policy_cpu(policy, new_cpu);
a82fab29
SB
1206
1207 if (!frozen) {
75949c9a
VK
1208 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1209 __func__, new_cpu, cpu);
a82fab29 1210 }
1da177e4
LT
1211 }
1212 }
1da177e4 1213
cedb70af
SB
1214 return 0;
1215}
1216
1217static int __cpufreq_remove_dev_finish(struct device *dev,
1218 struct subsys_interface *sif,
1219 bool frozen)
1220{
1221 unsigned int cpu = dev->id, cpus;
1222 int ret;
1223 unsigned long flags;
1224 struct cpufreq_policy *policy;
1225 struct kobject *kobj;
1226 struct completion *cmp;
1227
1228 read_lock_irqsave(&cpufreq_driver_lock, flags);
1229 policy = per_cpu(cpufreq_cpu_data, cpu);
1230 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1231
1232 if (!policy) {
1233 pr_debug("%s: No cpu_data found\n", __func__);
1234 return -EINVAL;
1235 }
1236
ad7722da 1237 down_write(&policy->rwsem);
cedb70af 1238 cpus = cpumask_weight(policy->cpus);
9c8f1ee4
VK
1239
1240 if (cpus > 1)
1241 cpumask_clear_cpu(cpu, policy->cpus);
ad7722da 1242 up_write(&policy->rwsem);
cedb70af 1243
b8eed8af
VK
1244 /* If cpu is last user of policy, free policy */
1245 if (cpus == 1) {
9c0ebcf7 1246 if (has_target()) {
3de9bdeb
VK
1247 ret = __cpufreq_governor(policy,
1248 CPUFREQ_GOV_POLICY_EXIT);
1249 if (ret) {
1250 pr_err("%s: Failed to exit governor\n",
1251 __func__);
1252 return ret;
1253 }
edab2fbc 1254 }
2a998599 1255
8414809c 1256 if (!frozen) {
ad7722da 1257 down_read(&policy->rwsem);
3a3e9e06
VK
1258 kobj = &policy->kobj;
1259 cmp = &policy->kobj_unregister;
ad7722da 1260 up_read(&policy->rwsem);
8414809c
SB
1261 kobject_put(kobj);
1262
1263 /*
1264 * We need to make sure that the underlying kobj is
1265 * actually not referenced anymore by anybody before we
1266 * proceed with unloading.
1267 */
1268 pr_debug("waiting for dropping of refcount\n");
1269 wait_for_completion(cmp);
1270 pr_debug("wait complete\n");
1271 }
7d26e2d5 1272
8414809c
SB
1273 /*
1274 * Perform the ->exit() even during light-weight tear-down,
1275 * since this is a core component, and is essential for the
1276 * subsequent light-weight ->init() to succeed.
b8eed8af 1277 */
1c3d85dd 1278 if (cpufreq_driver->exit)
3a3e9e06 1279 cpufreq_driver->exit(policy);
27ecddc2 1280
9515f4d6
VK
1281 /* Remove policy from list of active policies */
1282 write_lock_irqsave(&cpufreq_driver_lock, flags);
1283 list_del(&policy->policy_list);
1284 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1285
8414809c 1286 if (!frozen)
3a3e9e06 1287 cpufreq_policy_free(policy);
2a998599 1288 } else {
9c0ebcf7 1289 if (has_target()) {
3de9bdeb
VK
1290 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1291 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1292 pr_err("%s: Failed to start governor\n",
1293 __func__);
1294 return ret;
1295 }
2a998599 1296 }
27ecddc2 1297 }
1da177e4 1298
474deff7 1299 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1da177e4
LT
1300 return 0;
1301}
1302
cedb70af 1303/**
27a862e9 1304 * cpufreq_remove_dev - remove a CPU device
cedb70af
SB
1305 *
1306 * Removes the cpufreq interface for a CPU device.
cedb70af 1307 */
8a25a2fd 1308static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1309{
8a25a2fd 1310 unsigned int cpu = dev->id;
27a862e9 1311 int ret;
ec28297a
VP
1312
1313 if (cpu_is_offline(cpu))
1314 return 0;
1315
27a862e9
VK
1316 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1317
1318 if (!ret)
1319 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1320
1321 return ret;
5a01f2e8
VP
1322}
1323
65f27f38 1324static void handle_update(struct work_struct *work)
1da177e4 1325{
65f27f38
DH
1326 struct cpufreq_policy *policy =
1327 container_of(work, struct cpufreq_policy, update);
1328 unsigned int cpu = policy->cpu;
2d06d8c4 1329 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1330 cpufreq_update_policy(cpu);
1331}
1332
1333/**
bb176f7d
VK
1334 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1335 * in deep trouble.
1da177e4
LT
1336 * @cpu: cpu number
1337 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1338 * @new_freq: CPU frequency the CPU actually runs at
1339 *
29464f28
DJ
1340 * We adjust to current frequency first, and need to clean up later.
1341 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1342 */
e08f5f5b
GS
1343static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1344 unsigned int new_freq)
1da177e4 1345{
b43a7ffb 1346 struct cpufreq_policy *policy;
1da177e4 1347 struct cpufreq_freqs freqs;
b43a7ffb
VK
1348 unsigned long flags;
1349
2d06d8c4 1350 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1351 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1352
1da177e4
LT
1353 freqs.old = old_freq;
1354 freqs.new = new_freq;
b43a7ffb
VK
1355
1356 read_lock_irqsave(&cpufreq_driver_lock, flags);
1357 policy = per_cpu(cpufreq_cpu_data, cpu);
1358 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1359
1360 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1361 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1362}
1363
32ee8c3e 1364/**
4ab70df4 1365 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1366 * @cpu: CPU number
1367 *
1368 * This is the last known freq, without actually getting it from the driver.
1369 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1370 */
1371unsigned int cpufreq_quick_get(unsigned int cpu)
1372{
9e21ba8b 1373 struct cpufreq_policy *policy;
e08f5f5b 1374 unsigned int ret_freq = 0;
95235ca2 1375
1c3d85dd
RW
1376 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1377 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1378
1379 policy = cpufreq_cpu_get(cpu);
95235ca2 1380 if (policy) {
e08f5f5b 1381 ret_freq = policy->cur;
95235ca2
VP
1382 cpufreq_cpu_put(policy);
1383 }
1384
4d34a67d 1385 return ret_freq;
95235ca2
VP
1386}
1387EXPORT_SYMBOL(cpufreq_quick_get);
1388
3d737108
JB
1389/**
1390 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1391 * @cpu: CPU number
1392 *
1393 * Just return the max possible frequency for a given CPU.
1394 */
1395unsigned int cpufreq_quick_get_max(unsigned int cpu)
1396{
1397 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1398 unsigned int ret_freq = 0;
1399
1400 if (policy) {
1401 ret_freq = policy->max;
1402 cpufreq_cpu_put(policy);
1403 }
1404
1405 return ret_freq;
1406}
1407EXPORT_SYMBOL(cpufreq_quick_get_max);
1408
5a01f2e8 1409static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1410{
7a6aedfa 1411 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1412 unsigned int ret_freq = 0;
5800043b 1413
1c3d85dd 1414 if (!cpufreq_driver->get)
4d34a67d 1415 return ret_freq;
1da177e4 1416
1c3d85dd 1417 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1418
e08f5f5b 1419 if (ret_freq && policy->cur &&
1c3d85dd 1420 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1421 /* verify no discrepancy between actual and
1422 saved value exists */
1423 if (unlikely(ret_freq != policy->cur)) {
1424 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1425 schedule_work(&policy->update);
1426 }
1427 }
1428
4d34a67d 1429 return ret_freq;
5a01f2e8 1430}
1da177e4 1431
5a01f2e8
VP
1432/**
1433 * cpufreq_get - get the current CPU frequency (in kHz)
1434 * @cpu: CPU number
1435 *
1436 * Get the CPU current (static) CPU frequency
1437 */
1438unsigned int cpufreq_get(unsigned int cpu)
1439{
ad7722da 1440 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
5a01f2e8 1441 unsigned int ret_freq = 0;
5a01f2e8 1442
26ca8694
VK
1443 if (cpufreq_disabled() || !cpufreq_driver)
1444 return -ENOENT;
1445
ad7722da 1446 BUG_ON(!policy);
1447
6eed9404
VK
1448 if (!down_read_trylock(&cpufreq_rwsem))
1449 return 0;
5a01f2e8 1450
ad7722da 1451 down_read(&policy->rwsem);
5a01f2e8
VP
1452
1453 ret_freq = __cpufreq_get(cpu);
1454
ad7722da 1455 up_read(&policy->rwsem);
6eed9404
VK
1456 up_read(&cpufreq_rwsem);
1457
4d34a67d 1458 return ret_freq;
1da177e4
LT
1459}
1460EXPORT_SYMBOL(cpufreq_get);
1461
8a25a2fd
KS
1462static struct subsys_interface cpufreq_interface = {
1463 .name = "cpufreq",
1464 .subsys = &cpu_subsys,
1465 .add_dev = cpufreq_add_dev,
1466 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1467};
1468
5a87182a
VK
1469void cpufreq_suspend(void)
1470{
1471 struct cpufreq_policy *policy;
1472
1473 if (!has_target())
1474 return;
1475
1476 pr_debug("%s: Suspending Governors\n", __func__);
1477
1478 list_for_each_entry(policy, &cpufreq_policy_list, policy_list)
1479 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1480 pr_err("%s: Failed to stop governor for policy: %p\n",
1481 __func__, policy);
1482
1483 cpufreq_suspended = true;
1484}
1485
1486void cpufreq_resume(void)
1487{
1488 struct cpufreq_policy *policy;
1489
1490 if (!has_target())
1491 return;
1492
1493 pr_debug("%s: Resuming Governors\n", __func__);
1494
1495 cpufreq_suspended = false;
1496
1497 list_for_each_entry(policy, &cpufreq_policy_list, policy_list)
1498 if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1499 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1500 pr_err("%s: Failed to start governor for policy: %p\n",
1501 __func__, policy);
1502}
1503
42d4dc3f 1504/**
e00e56df
RW
1505 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1506 *
1507 * This function is only executed for the boot processor. The other CPUs
1508 * have been put offline by means of CPU hotplug.
42d4dc3f 1509 */
e00e56df 1510static int cpufreq_bp_suspend(void)
42d4dc3f 1511{
e08f5f5b 1512 int ret = 0;
4bc5d341 1513
e00e56df 1514 int cpu = smp_processor_id();
3a3e9e06 1515 struct cpufreq_policy *policy;
42d4dc3f 1516
2d06d8c4 1517 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1518
e00e56df 1519 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1520 policy = cpufreq_cpu_get(cpu);
1521 if (!policy)
e00e56df 1522 return 0;
42d4dc3f 1523
1c3d85dd 1524 if (cpufreq_driver->suspend) {
3a3e9e06 1525 ret = cpufreq_driver->suspend(policy);
ce6c3997 1526 if (ret)
42d4dc3f 1527 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
3a3e9e06 1528 "step on CPU %u\n", policy->cpu);
42d4dc3f
BH
1529 }
1530
3a3e9e06 1531 cpufreq_cpu_put(policy);
c9060494 1532 return ret;
42d4dc3f
BH
1533}
1534
1da177e4 1535/**
e00e56df 1536 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1537 *
1538 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1539 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1540 * restored. It will verify that the current freq is in sync with
1541 * what we believe it to be. This is a bit later than when it
1542 * should be, but nonethteless it's better than calling
1543 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1544 *
1545 * This function is only executed for the boot CPU. The other CPUs have not
1546 * been turned on yet.
1da177e4 1547 */
e00e56df 1548static void cpufreq_bp_resume(void)
1da177e4 1549{
e08f5f5b 1550 int ret = 0;
4bc5d341 1551
e00e56df 1552 int cpu = smp_processor_id();
3a3e9e06 1553 struct cpufreq_policy *policy;
1da177e4 1554
2d06d8c4 1555 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1556
e00e56df 1557 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1558 policy = cpufreq_cpu_get(cpu);
1559 if (!policy)
e00e56df 1560 return;
1da177e4 1561
1c3d85dd 1562 if (cpufreq_driver->resume) {
3a3e9e06 1563 ret = cpufreq_driver->resume(policy);
1da177e4
LT
1564 if (ret) {
1565 printk(KERN_ERR "cpufreq: resume failed in ->resume "
3a3e9e06 1566 "step on CPU %u\n", policy->cpu);
c9060494 1567 goto fail;
1da177e4
LT
1568 }
1569 }
1570
3a3e9e06 1571 schedule_work(&policy->update);
ce6c3997 1572
c9060494 1573fail:
3a3e9e06 1574 cpufreq_cpu_put(policy);
1da177e4
LT
1575}
1576
e00e56df
RW
1577static struct syscore_ops cpufreq_syscore_ops = {
1578 .suspend = cpufreq_bp_suspend,
1579 .resume = cpufreq_bp_resume,
1da177e4
LT
1580};
1581
9d95046e
BP
1582/**
1583 * cpufreq_get_current_driver - return current driver's name
1584 *
1585 * Return the name string of the currently loaded cpufreq driver
1586 * or NULL, if none.
1587 */
1588const char *cpufreq_get_current_driver(void)
1589{
1c3d85dd
RW
1590 if (cpufreq_driver)
1591 return cpufreq_driver->name;
1592
1593 return NULL;
9d95046e
BP
1594}
1595EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1596
1597/*********************************************************************
1598 * NOTIFIER LISTS INTERFACE *
1599 *********************************************************************/
1600
1601/**
1602 * cpufreq_register_notifier - register a driver with cpufreq
1603 * @nb: notifier function to register
1604 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1605 *
32ee8c3e 1606 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1607 * are notified about clock rate changes (once before and once after
1608 * the transition), or a list of drivers that are notified about
1609 * changes in cpufreq policy.
1610 *
1611 * This function may sleep, and has the same return conditions as
e041c683 1612 * blocking_notifier_chain_register.
1da177e4
LT
1613 */
1614int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1615{
1616 int ret;
1617
d5aaffa9
DB
1618 if (cpufreq_disabled())
1619 return -EINVAL;
1620
74212ca4
CEB
1621 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1622
1da177e4
LT
1623 switch (list) {
1624 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1625 ret = srcu_notifier_chain_register(
e041c683 1626 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1627 break;
1628 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1629 ret = blocking_notifier_chain_register(
1630 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1631 break;
1632 default:
1633 ret = -EINVAL;
1634 }
1da177e4
LT
1635
1636 return ret;
1637}
1638EXPORT_SYMBOL(cpufreq_register_notifier);
1639
1da177e4
LT
1640/**
1641 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1642 * @nb: notifier block to be unregistered
bb176f7d 1643 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1644 *
1645 * Remove a driver from the CPU frequency notifier list.
1646 *
1647 * This function may sleep, and has the same return conditions as
e041c683 1648 * blocking_notifier_chain_unregister.
1da177e4
LT
1649 */
1650int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1651{
1652 int ret;
1653
d5aaffa9
DB
1654 if (cpufreq_disabled())
1655 return -EINVAL;
1656
1da177e4
LT
1657 switch (list) {
1658 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1659 ret = srcu_notifier_chain_unregister(
e041c683 1660 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1661 break;
1662 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1663 ret = blocking_notifier_chain_unregister(
1664 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1665 break;
1666 default:
1667 ret = -EINVAL;
1668 }
1da177e4
LT
1669
1670 return ret;
1671}
1672EXPORT_SYMBOL(cpufreq_unregister_notifier);
1673
1674
1675/*********************************************************************
1676 * GOVERNORS *
1677 *********************************************************************/
1678
1da177e4
LT
1679int __cpufreq_driver_target(struct cpufreq_policy *policy,
1680 unsigned int target_freq,
1681 unsigned int relation)
1682{
1683 int retval = -EINVAL;
7249924e 1684 unsigned int old_target_freq = target_freq;
c32b6b8e 1685
a7b422cd
KRW
1686 if (cpufreq_disabled())
1687 return -ENODEV;
1688
7249924e
VK
1689 /* Make sure that target_freq is within supported range */
1690 if (target_freq > policy->max)
1691 target_freq = policy->max;
1692 if (target_freq < policy->min)
1693 target_freq = policy->min;
1694
1695 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1696 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228 1697
9c0ebcf7
VK
1698 /*
1699 * This might look like a redundant call as we are checking it again
1700 * after finding index. But it is left intentionally for cases where
1701 * exactly same freq is called again and so we can save on few function
1702 * calls.
1703 */
5a1c0228
VK
1704 if (target_freq == policy->cur)
1705 return 0;
1706
1c3d85dd
RW
1707 if (cpufreq_driver->target)
1708 retval = cpufreq_driver->target(policy, target_freq, relation);
9c0ebcf7
VK
1709 else if (cpufreq_driver->target_index) {
1710 struct cpufreq_frequency_table *freq_table;
d4019f0a
VK
1711 struct cpufreq_freqs freqs;
1712 bool notify;
9c0ebcf7 1713 int index;
90d45d17 1714
9c0ebcf7
VK
1715 freq_table = cpufreq_frequency_get_table(policy->cpu);
1716 if (unlikely(!freq_table)) {
1717 pr_err("%s: Unable to find freq_table\n", __func__);
1718 goto out;
1719 }
1720
1721 retval = cpufreq_frequency_table_target(policy, freq_table,
1722 target_freq, relation, &index);
1723 if (unlikely(retval)) {
1724 pr_err("%s: Unable to find matching freq\n", __func__);
1725 goto out;
1726 }
1727
d4019f0a 1728 if (freq_table[index].frequency == policy->cur) {
9c0ebcf7 1729 retval = 0;
d4019f0a
VK
1730 goto out;
1731 }
1732
1733 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1734
1735 if (notify) {
1736 freqs.old = policy->cur;
1737 freqs.new = freq_table[index].frequency;
1738 freqs.flags = 0;
1739
1740 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1741 __func__, policy->cpu, freqs.old,
1742 freqs.new);
1743
1744 cpufreq_notify_transition(policy, &freqs,
1745 CPUFREQ_PRECHANGE);
1746 }
1747
1748 retval = cpufreq_driver->target_index(policy, index);
1749 if (retval)
1750 pr_err("%s: Failed to change cpu frequency: %d\n",
1751 __func__, retval);
1752
1753 if (notify) {
1754 /*
1755 * Notify with old freq in case we failed to change
1756 * frequency
1757 */
1758 if (retval)
1759 freqs.new = freqs.old;
1760
1761 cpufreq_notify_transition(policy, &freqs,
1762 CPUFREQ_POSTCHANGE);
1763 }
9c0ebcf7
VK
1764 }
1765
1766out:
1da177e4
LT
1767 return retval;
1768}
1769EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1770
1da177e4
LT
1771int cpufreq_driver_target(struct cpufreq_policy *policy,
1772 unsigned int target_freq,
1773 unsigned int relation)
1774{
f1829e4a 1775 int ret = -EINVAL;
1da177e4 1776
ad7722da 1777 down_write(&policy->rwsem);
1da177e4
LT
1778
1779 ret = __cpufreq_driver_target(policy, target_freq, relation);
1780
ad7722da 1781 up_write(&policy->rwsem);
1da177e4 1782
1da177e4
LT
1783 return ret;
1784}
1785EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1786
153d7f3f 1787/*
153d7f3f
AV
1788 * when "event" is CPUFREQ_GOV_LIMITS
1789 */
1da177e4 1790
e08f5f5b
GS
1791static int __cpufreq_governor(struct cpufreq_policy *policy,
1792 unsigned int event)
1da177e4 1793{
cc993cab 1794 int ret;
6afde10c
TR
1795
1796 /* Only must be defined when default governor is known to have latency
1797 restrictions, like e.g. conservative or ondemand.
1798 That this is the case is already ensured in Kconfig
1799 */
1800#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1801 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1802#else
1803 struct cpufreq_governor *gov = NULL;
1804#endif
1c256245 1805
5a87182a
VK
1806 /* Don't start any governor operations if we are entering suspend */
1807 if (cpufreq_suspended)
1808 return 0;
1809
1c256245
TR
1810 if (policy->governor->max_transition_latency &&
1811 policy->cpuinfo.transition_latency >
1812 policy->governor->max_transition_latency) {
6afde10c
TR
1813 if (!gov)
1814 return -EINVAL;
1815 else {
1816 printk(KERN_WARNING "%s governor failed, too long"
1817 " transition latency of HW, fallback"
1818 " to %s governor\n",
1819 policy->governor->name,
1820 gov->name);
1821 policy->governor = gov;
1822 }
1c256245 1823 }
1da177e4 1824
fe492f3f
VK
1825 if (event == CPUFREQ_GOV_POLICY_INIT)
1826 if (!try_module_get(policy->governor->owner))
1827 return -EINVAL;
1da177e4 1828
2d06d8c4 1829 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1830 policy->cpu, event);
95731ebb
XC
1831
1832 mutex_lock(&cpufreq_governor_lock);
56d07db2 1833 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
f73d3933
VK
1834 || (!policy->governor_enabled
1835 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
95731ebb
XC
1836 mutex_unlock(&cpufreq_governor_lock);
1837 return -EBUSY;
1838 }
1839
1840 if (event == CPUFREQ_GOV_STOP)
1841 policy->governor_enabled = false;
1842 else if (event == CPUFREQ_GOV_START)
1843 policy->governor_enabled = true;
1844
1845 mutex_unlock(&cpufreq_governor_lock);
1846
1da177e4
LT
1847 ret = policy->governor->governor(policy, event);
1848
4d5dcc42
VK
1849 if (!ret) {
1850 if (event == CPUFREQ_GOV_POLICY_INIT)
1851 policy->governor->initialized++;
1852 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1853 policy->governor->initialized--;
95731ebb
XC
1854 } else {
1855 /* Restore original values */
1856 mutex_lock(&cpufreq_governor_lock);
1857 if (event == CPUFREQ_GOV_STOP)
1858 policy->governor_enabled = true;
1859 else if (event == CPUFREQ_GOV_START)
1860 policy->governor_enabled = false;
1861 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1862 }
b394058f 1863
fe492f3f
VK
1864 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1865 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1da177e4
LT
1866 module_put(policy->governor->owner);
1867
1868 return ret;
1869}
1870
1da177e4
LT
1871int cpufreq_register_governor(struct cpufreq_governor *governor)
1872{
3bcb09a3 1873 int err;
1da177e4
LT
1874
1875 if (!governor)
1876 return -EINVAL;
1877
a7b422cd
KRW
1878 if (cpufreq_disabled())
1879 return -ENODEV;
1880
3fc54d37 1881 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1882
b394058f 1883 governor->initialized = 0;
3bcb09a3
JF
1884 err = -EBUSY;
1885 if (__find_governor(governor->name) == NULL) {
1886 err = 0;
1887 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1888 }
1da177e4 1889
32ee8c3e 1890 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1891 return err;
1da177e4
LT
1892}
1893EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1894
1da177e4
LT
1895void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1896{
90e41bac
PB
1897#ifdef CONFIG_HOTPLUG_CPU
1898 int cpu;
1899#endif
1900
1da177e4
LT
1901 if (!governor)
1902 return;
1903
a7b422cd
KRW
1904 if (cpufreq_disabled())
1905 return;
1906
90e41bac
PB
1907#ifdef CONFIG_HOTPLUG_CPU
1908 for_each_present_cpu(cpu) {
1909 if (cpu_online(cpu))
1910 continue;
1911 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1912 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1913 }
1914#endif
1915
3fc54d37 1916 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1917 list_del(&governor->governor_list);
3fc54d37 1918 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1919 return;
1920}
1921EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1922
1923
1da177e4
LT
1924/*********************************************************************
1925 * POLICY INTERFACE *
1926 *********************************************************************/
1927
1928/**
1929 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1930 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1931 * is written
1da177e4
LT
1932 *
1933 * Reads the current cpufreq policy.
1934 */
1935int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1936{
1937 struct cpufreq_policy *cpu_policy;
1938 if (!policy)
1939 return -EINVAL;
1940
1941 cpu_policy = cpufreq_cpu_get(cpu);
1942 if (!cpu_policy)
1943 return -EINVAL;
1944
d5b73cd8 1945 memcpy(policy, cpu_policy, sizeof(*policy));
1da177e4
LT
1946
1947 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1948 return 0;
1949}
1950EXPORT_SYMBOL(cpufreq_get_policy);
1951
153d7f3f 1952/*
037ce839
VK
1953 * policy : current policy.
1954 * new_policy: policy to be set.
153d7f3f 1955 */
037ce839 1956static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 1957 struct cpufreq_policy *new_policy)
1da177e4 1958{
7bd353a9 1959 int ret = 0, failed = 1;
1da177e4 1960
3a3e9e06
VK
1961 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1962 new_policy->min, new_policy->max);
1da177e4 1963
d5b73cd8 1964 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1da177e4 1965
3a3e9e06 1966 if (new_policy->min > policy->max || new_policy->max < policy->min) {
9c9a43ed
MD
1967 ret = -EINVAL;
1968 goto error_out;
1969 }
1970
1da177e4 1971 /* verify the cpu speed can be set within this limit */
3a3e9e06 1972 ret = cpufreq_driver->verify(new_policy);
1da177e4
LT
1973 if (ret)
1974 goto error_out;
1975
1da177e4 1976 /* adjust if necessary - all reasons */
e041c683 1977 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1978 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
1979
1980 /* adjust if necessary - hardware incompatibility*/
e041c683 1981 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1982 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 1983
bb176f7d
VK
1984 /*
1985 * verify the cpu speed can be set within this limit, which might be
1986 * different to the first one
1987 */
3a3e9e06 1988 ret = cpufreq_driver->verify(new_policy);
e041c683 1989 if (ret)
1da177e4 1990 goto error_out;
1da177e4
LT
1991
1992 /* notification of the new policy */
e041c683 1993 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1994 CPUFREQ_NOTIFY, new_policy);
1da177e4 1995
3a3e9e06
VK
1996 policy->min = new_policy->min;
1997 policy->max = new_policy->max;
1da177e4 1998
2d06d8c4 1999 pr_debug("new min and max freqs are %u - %u kHz\n",
3a3e9e06 2000 policy->min, policy->max);
1da177e4 2001
1c3d85dd 2002 if (cpufreq_driver->setpolicy) {
3a3e9e06 2003 policy->policy = new_policy->policy;
2d06d8c4 2004 pr_debug("setting range\n");
3a3e9e06 2005 ret = cpufreq_driver->setpolicy(new_policy);
1da177e4 2006 } else {
3a3e9e06 2007 if (new_policy->governor != policy->governor) {
1da177e4 2008 /* save old, working values */
3a3e9e06 2009 struct cpufreq_governor *old_gov = policy->governor;
1da177e4 2010
2d06d8c4 2011 pr_debug("governor switch\n");
1da177e4
LT
2012
2013 /* end old governor */
3a3e9e06
VK
2014 if (policy->governor) {
2015 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
ad7722da 2016 up_write(&policy->rwsem);
3a3e9e06 2017 __cpufreq_governor(policy,
7bd353a9 2018 CPUFREQ_GOV_POLICY_EXIT);
ad7722da 2019 down_write(&policy->rwsem);
7bd353a9 2020 }
1da177e4
LT
2021
2022 /* start new governor */
3a3e9e06
VK
2023 policy->governor = new_policy->governor;
2024 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2025 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
7bd353a9 2026 failed = 0;
955ef483 2027 } else {
ad7722da 2028 up_write(&policy->rwsem);
3a3e9e06 2029 __cpufreq_governor(policy,
7bd353a9 2030 CPUFREQ_GOV_POLICY_EXIT);
ad7722da 2031 down_write(&policy->rwsem);
955ef483 2032 }
7bd353a9
VK
2033 }
2034
2035 if (failed) {
1da177e4 2036 /* new governor failed, so re-start old one */
2d06d8c4 2037 pr_debug("starting governor %s failed\n",
3a3e9e06 2038 policy->governor->name);
1da177e4 2039 if (old_gov) {
3a3e9e06
VK
2040 policy->governor = old_gov;
2041 __cpufreq_governor(policy,
7bd353a9 2042 CPUFREQ_GOV_POLICY_INIT);
3a3e9e06 2043 __cpufreq_governor(policy,
e08f5f5b 2044 CPUFREQ_GOV_START);
1da177e4
LT
2045 }
2046 ret = -EINVAL;
2047 goto error_out;
2048 }
2049 /* might be a policy change, too, so fall through */
2050 }
2d06d8c4 2051 pr_debug("governor: change or update limits\n");
3de9bdeb 2052 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
2053 }
2054
7d5e350f 2055error_out:
1da177e4
LT
2056 return ret;
2057}
2058
1da177e4
LT
2059/**
2060 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2061 * @cpu: CPU which shall be re-evaluated
2062 *
25985edc 2063 * Useful for policy notifiers which have different necessities
1da177e4
LT
2064 * at different times.
2065 */
2066int cpufreq_update_policy(unsigned int cpu)
2067{
3a3e9e06
VK
2068 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2069 struct cpufreq_policy new_policy;
f1829e4a 2070 int ret;
1da177e4 2071
3a3e9e06 2072 if (!policy) {
f1829e4a
JL
2073 ret = -ENODEV;
2074 goto no_policy;
2075 }
1da177e4 2076
ad7722da 2077 down_write(&policy->rwsem);
1da177e4 2078
2d06d8c4 2079 pr_debug("updating policy for CPU %u\n", cpu);
d5b73cd8 2080 memcpy(&new_policy, policy, sizeof(*policy));
3a3e9e06
VK
2081 new_policy.min = policy->user_policy.min;
2082 new_policy.max = policy->user_policy.max;
2083 new_policy.policy = policy->user_policy.policy;
2084 new_policy.governor = policy->user_policy.governor;
1da177e4 2085
bb176f7d
VK
2086 /*
2087 * BIOS might change freq behind our back
2088 * -> ask driver for current freq and notify governors about a change
2089 */
1c3d85dd 2090 if (cpufreq_driver->get) {
3a3e9e06
VK
2091 new_policy.cur = cpufreq_driver->get(cpu);
2092 if (!policy->cur) {
2d06d8c4 2093 pr_debug("Driver did not initialize current freq");
3a3e9e06 2094 policy->cur = new_policy.cur;
a85f7bd3 2095 } else {
9c0ebcf7 2096 if (policy->cur != new_policy.cur && has_target())
3a3e9e06
VK
2097 cpufreq_out_of_sync(cpu, policy->cur,
2098 new_policy.cur);
a85f7bd3 2099 }
0961dd0d
TR
2100 }
2101
037ce839 2102 ret = cpufreq_set_policy(policy, &new_policy);
1da177e4 2103
ad7722da 2104 up_write(&policy->rwsem);
5a01f2e8 2105
3a3e9e06 2106 cpufreq_cpu_put(policy);
f1829e4a 2107no_policy:
1da177e4
LT
2108 return ret;
2109}
2110EXPORT_SYMBOL(cpufreq_update_policy);
2111
2760984f 2112static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2113 unsigned long action, void *hcpu)
2114{
2115 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2116 struct device *dev;
5302c3fb 2117 bool frozen = false;
c32b6b8e 2118
8a25a2fd
KS
2119 dev = get_cpu_device(cpu);
2120 if (dev) {
5302c3fb
SB
2121
2122 if (action & CPU_TASKS_FROZEN)
2123 frozen = true;
2124
2125 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2126 case CPU_ONLINE:
5302c3fb 2127 __cpufreq_add_dev(dev, NULL, frozen);
23d32899 2128 cpufreq_update_policy(cpu);
c32b6b8e 2129 break;
5302c3fb 2130
c32b6b8e 2131 case CPU_DOWN_PREPARE:
cedb70af 2132 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
1aee40ac
SB
2133 break;
2134
2135 case CPU_POST_DEAD:
cedb70af 2136 __cpufreq_remove_dev_finish(dev, NULL, frozen);
c32b6b8e 2137 break;
5302c3fb 2138
5a01f2e8 2139 case CPU_DOWN_FAILED:
5302c3fb 2140 __cpufreq_add_dev(dev, NULL, frozen);
c32b6b8e
AR
2141 break;
2142 }
2143 }
2144 return NOTIFY_OK;
2145}
2146
9c36f746 2147static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2148 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2149};
1da177e4
LT
2150
2151/*********************************************************************
2152 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2153 *********************************************************************/
2154
2155/**
2156 * cpufreq_register_driver - register a CPU Frequency driver
2157 * @driver_data: A struct cpufreq_driver containing the values#
2158 * submitted by the CPU Frequency driver.
2159 *
bb176f7d 2160 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2161 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2162 * (and isn't unregistered in the meantime).
1da177e4
LT
2163 *
2164 */
221dee28 2165int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2166{
2167 unsigned long flags;
2168 int ret;
2169
a7b422cd
KRW
2170 if (cpufreq_disabled())
2171 return -ENODEV;
2172
1da177e4 2173 if (!driver_data || !driver_data->verify || !driver_data->init ||
9c0ebcf7
VK
2174 !(driver_data->setpolicy || driver_data->target_index ||
2175 driver_data->target))
1da177e4
LT
2176 return -EINVAL;
2177
2d06d8c4 2178 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
2179
2180 if (driver_data->setpolicy)
2181 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2182
0d1857a1 2183 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2184 if (cpufreq_driver) {
0d1857a1 2185 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4dea5806 2186 return -EEXIST;
1da177e4 2187 }
1c3d85dd 2188 cpufreq_driver = driver_data;
0d1857a1 2189 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2190
8a25a2fd 2191 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
2192 if (ret)
2193 goto err_null_driver;
1da177e4 2194
1c3d85dd 2195 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
2196 int i;
2197 ret = -ENODEV;
2198
2199 /* check for at least one working CPU */
7a6aedfa
MT
2200 for (i = 0; i < nr_cpu_ids; i++)
2201 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 2202 ret = 0;
7a6aedfa
MT
2203 break;
2204 }
1da177e4
LT
2205
2206 /* if all ->init() calls failed, unregister */
2207 if (ret) {
2d06d8c4 2208 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 2209 driver_data->name);
8a25a2fd 2210 goto err_if_unreg;
1da177e4
LT
2211 }
2212 }
2213
8f5bc2ab 2214 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2215 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2216
8f5bc2ab 2217 return 0;
8a25a2fd
KS
2218err_if_unreg:
2219 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2220err_null_driver:
0d1857a1 2221 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2222 cpufreq_driver = NULL;
0d1857a1 2223 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2224 return ret;
1da177e4
LT
2225}
2226EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2227
1da177e4
LT
2228/**
2229 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2230 *
bb176f7d 2231 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2232 * the right to do so, i.e. if you have succeeded in initialising before!
2233 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2234 * currently not initialised.
2235 */
221dee28 2236int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2237{
2238 unsigned long flags;
2239
1c3d85dd 2240 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2241 return -EINVAL;
1da177e4 2242
2d06d8c4 2243 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2244
8a25a2fd 2245 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2246 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2247
6eed9404 2248 down_write(&cpufreq_rwsem);
0d1857a1 2249 write_lock_irqsave(&cpufreq_driver_lock, flags);
6eed9404 2250
1c3d85dd 2251 cpufreq_driver = NULL;
6eed9404 2252
0d1857a1 2253 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
6eed9404 2254 up_write(&cpufreq_rwsem);
1da177e4
LT
2255
2256 return 0;
2257}
2258EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2259
2260static int __init cpufreq_core_init(void)
2261{
a7b422cd
KRW
2262 if (cpufreq_disabled())
2263 return -ENODEV;
2264
2361be23 2265 cpufreq_global_kobject = kobject_create();
8aa84ad8 2266 BUG_ON(!cpufreq_global_kobject);
e00e56df 2267 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2268
5a01f2e8
VP
2269 return 0;
2270}
5a01f2e8 2271core_initcall(cpufreq_core_init);
This page took 1.081442 seconds and 5 git commands to generate.