cpufreq: Give consistent names to cpufreq_policy objects
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
e00e56df 29#include <linux/syscore_ops.h>
5ff0a268 30#include <linux/tick.h>
6f4f2723
TR
31#include <trace/events/power.h>
32
1da177e4 33/**
cd878479 34 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
37 */
1c3d85dd 38static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 39static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
8414809c 40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
bb176f7d
VK
41static DEFINE_RWLOCK(cpufreq_driver_lock);
42static DEFINE_MUTEX(cpufreq_governor_lock);
43
084f3493
TR
44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 47#endif
1da177e4 48
5a01f2e8
VP
49/*
50 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
51 * all cpufreq/hotplug/workqueue/etc related lock issues.
52 *
53 * The rules for this semaphore:
54 * - Any routine that wants to read from the policy structure will
55 * do a down_read on this semaphore.
56 * - Any routine that will write to the policy structure and/or may take away
57 * the policy altogether (eg. CPU hotplug), will hold this lock in write
58 * mode before doing so.
59 *
60 * Additional rules:
5a01f2e8
VP
61 * - Governor routines that can be called in cpufreq hotplug path should not
62 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
63 * - Lock should not be held across
64 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 65 */
f1625066 66static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
67static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
68
69#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 70static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 71{ \
f1625066 72 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
73 BUG_ON(policy_cpu == -1); \
74 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
75 \
76 return 0; \
77}
78
79lock_policy_rwsem(read, cpu);
5a01f2e8 80lock_policy_rwsem(write, cpu);
5a01f2e8 81
fa1d8af4
VK
82#define unlock_policy_rwsem(mode, cpu) \
83static void unlock_policy_rwsem_##mode(int cpu) \
84{ \
85 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
86 BUG_ON(policy_cpu == -1); \
87 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 88}
5a01f2e8 89
fa1d8af4
VK
90unlock_policy_rwsem(read, cpu);
91unlock_policy_rwsem(write, cpu);
5a01f2e8 92
1da177e4 93/* internal prototypes */
29464f28
DJ
94static int __cpufreq_governor(struct cpufreq_policy *policy,
95 unsigned int event);
5a01f2e8 96static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 97static void handle_update(struct work_struct *work);
1da177e4
LT
98
99/**
32ee8c3e
DJ
100 * Two notifier lists: the "policy" list is involved in the
101 * validation process for a new CPU frequency policy; the
1da177e4
LT
102 * "transition" list for kernel code that needs to handle
103 * changes to devices when the CPU clock speed changes.
104 * The mutex locks both lists.
105 */
e041c683 106static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 107static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 108
74212ca4 109static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
110static int __init init_cpufreq_transition_notifier_list(void)
111{
112 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 113 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
114 return 0;
115}
b3438f82 116pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 117
a7b422cd 118static int off __read_mostly;
da584455 119static int cpufreq_disabled(void)
a7b422cd
KRW
120{
121 return off;
122}
123void disable_cpufreq(void)
124{
125 off = 1;
126}
1da177e4 127static LIST_HEAD(cpufreq_governor_list);
29464f28 128static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 129
4d5dcc42
VK
130bool have_governor_per_policy(void)
131{
1c3d85dd 132 return cpufreq_driver->have_governor_per_policy;
4d5dcc42 133}
3f869d6d 134EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 135
944e9a03
VK
136struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
137{
138 if (have_governor_per_policy())
139 return &policy->kobj;
140 else
141 return cpufreq_global_kobject;
142}
143EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
144
72a4ce34
VK
145static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
146{
147 u64 idle_time;
148 u64 cur_wall_time;
149 u64 busy_time;
150
151 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
152
153 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
154 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
155 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
156 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
157 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
158 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
159
160 idle_time = cur_wall_time - busy_time;
161 if (wall)
162 *wall = cputime_to_usecs(cur_wall_time);
163
164 return cputime_to_usecs(idle_time);
165}
166
167u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
168{
169 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
170
171 if (idle_time == -1ULL)
172 return get_cpu_idle_time_jiffy(cpu, wall);
173 else if (!io_busy)
174 idle_time += get_cpu_iowait_time_us(cpu, wall);
175
176 return idle_time;
177}
178EXPORT_SYMBOL_GPL(get_cpu_idle_time);
179
a9144436 180static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4 181{
3a3e9e06 182 struct cpufreq_policy *policy;
1da177e4
LT
183 unsigned long flags;
184
7a6aedfa 185 if (cpu >= nr_cpu_ids)
1da177e4
LT
186 goto err_out;
187
188 /* get the cpufreq driver */
1c3d85dd 189 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 190
1c3d85dd 191 if (!cpufreq_driver)
1da177e4
LT
192 goto err_out_unlock;
193
1c3d85dd 194 if (!try_module_get(cpufreq_driver->owner))
1da177e4
LT
195 goto err_out_unlock;
196
1da177e4 197 /* get the CPU */
3a3e9e06 198 policy = per_cpu(cpufreq_cpu_data, cpu);
1da177e4 199
3a3e9e06 200 if (!policy)
1da177e4
LT
201 goto err_out_put_module;
202
3a3e9e06 203 if (!sysfs && !kobject_get(&policy->kobj))
1da177e4
LT
204 goto err_out_put_module;
205
0d1857a1 206 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
3a3e9e06 207 return policy;
1da177e4 208
7d5e350f 209err_out_put_module:
1c3d85dd 210 module_put(cpufreq_driver->owner);
5800043b 211err_out_unlock:
1c3d85dd 212 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 213err_out:
1da177e4
LT
214 return NULL;
215}
a9144436
SB
216
217struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
218{
d5aaffa9
DB
219 if (cpufreq_disabled())
220 return NULL;
221
a9144436
SB
222 return __cpufreq_cpu_get(cpu, false);
223}
1da177e4
LT
224EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
225
a9144436
SB
226static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
227{
228 return __cpufreq_cpu_get(cpu, true);
229}
230
3a3e9e06 231static void __cpufreq_cpu_put(struct cpufreq_policy *policy, bool sysfs)
a9144436
SB
232{
233 if (!sysfs)
3a3e9e06 234 kobject_put(&policy->kobj);
1c3d85dd 235 module_put(cpufreq_driver->owner);
a9144436 236}
7d5e350f 237
3a3e9e06 238void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 239{
d5aaffa9
DB
240 if (cpufreq_disabled())
241 return;
242
3a3e9e06 243 __cpufreq_cpu_put(policy, false);
1da177e4
LT
244}
245EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
246
3a3e9e06 247static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *policy)
a9144436 248{
3a3e9e06 249 __cpufreq_cpu_put(policy, true);
a9144436 250}
1da177e4 251
1da177e4
LT
252/*********************************************************************
253 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
254 *********************************************************************/
255
256/**
257 * adjust_jiffies - adjust the system "loops_per_jiffy"
258 *
259 * This function alters the system "loops_per_jiffy" for the clock
260 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 261 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
262 * per-CPU loops_per_jiffy value wherever possible.
263 */
264#ifndef CONFIG_SMP
265static unsigned long l_p_j_ref;
bb176f7d 266static unsigned int l_p_j_ref_freq;
1da177e4 267
858119e1 268static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
269{
270 if (ci->flags & CPUFREQ_CONST_LOOPS)
271 return;
272
273 if (!l_p_j_ref_freq) {
274 l_p_j_ref = loops_per_jiffy;
275 l_p_j_ref_freq = ci->old;
2d06d8c4 276 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 277 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 278 }
bb176f7d 279 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 280 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
281 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
282 ci->new);
2d06d8c4 283 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 284 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
285 }
286}
287#else
e08f5f5b
GS
288static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
289{
290 return;
291}
1da177e4
LT
292#endif
293
0956df9c 294static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 295 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
296{
297 BUG_ON(irqs_disabled());
298
d5aaffa9
DB
299 if (cpufreq_disabled())
300 return;
301
1c3d85dd 302 freqs->flags = cpufreq_driver->flags;
2d06d8c4 303 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 304 state, freqs->new);
1da177e4 305
1da177e4 306 switch (state) {
e4472cb3 307
1da177e4 308 case CPUFREQ_PRECHANGE:
266c13d7
VK
309 if (WARN(policy->transition_ongoing ==
310 cpumask_weight(policy->cpus),
7c30ed53
VK
311 "In middle of another frequency transition\n"))
312 return;
313
266c13d7 314 policy->transition_ongoing++;
7c30ed53 315
32ee8c3e 316 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
317 * which is not equal to what the cpufreq core thinks is
318 * "old frequency".
1da177e4 319 */
1c3d85dd 320 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
321 if ((policy) && (policy->cpu == freqs->cpu) &&
322 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 323 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
324 " %u, cpufreq assumed %u kHz.\n",
325 freqs->old, policy->cur);
326 freqs->old = policy->cur;
1da177e4
LT
327 }
328 }
b4dfdbb3 329 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 330 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
331 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
332 break;
e4472cb3 333
1da177e4 334 case CPUFREQ_POSTCHANGE:
7c30ed53
VK
335 if (WARN(!policy->transition_ongoing,
336 "No frequency transition in progress\n"))
337 return;
338
266c13d7 339 policy->transition_ongoing--;
7c30ed53 340
1da177e4 341 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 342 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 343 (unsigned long)freqs->cpu);
25e41933 344 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 345 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 346 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
347 if (likely(policy) && likely(policy->cpu == freqs->cpu))
348 policy->cur = freqs->new;
1da177e4
LT
349 break;
350 }
1da177e4 351}
bb176f7d 352
b43a7ffb
VK
353/**
354 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
355 * on frequency transition.
356 *
357 * This function calls the transition notifiers and the "adjust_jiffies"
358 * function. It is called twice on all CPU frequency changes that have
359 * external effects.
360 */
361void cpufreq_notify_transition(struct cpufreq_policy *policy,
362 struct cpufreq_freqs *freqs, unsigned int state)
363{
364 for_each_cpu(freqs->cpu, policy->cpus)
365 __cpufreq_notify_transition(policy, freqs, state);
366}
1da177e4
LT
367EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
368
369
1da177e4
LT
370/*********************************************************************
371 * SYSFS INTERFACE *
372 *********************************************************************/
373
3bcb09a3
JF
374static struct cpufreq_governor *__find_governor(const char *str_governor)
375{
376 struct cpufreq_governor *t;
377
378 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 379 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
380 return t;
381
382 return NULL;
383}
384
1da177e4
LT
385/**
386 * cpufreq_parse_governor - parse a governor string
387 */
905d77cd 388static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
389 struct cpufreq_governor **governor)
390{
3bcb09a3 391 int err = -EINVAL;
1c3d85dd
RW
392
393 if (!cpufreq_driver)
3bcb09a3
JF
394 goto out;
395
1c3d85dd 396 if (cpufreq_driver->setpolicy) {
1da177e4
LT
397 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
398 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 399 err = 0;
e08f5f5b
GS
400 } else if (!strnicmp(str_governor, "powersave",
401 CPUFREQ_NAME_LEN)) {
1da177e4 402 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 403 err = 0;
1da177e4 404 }
1c3d85dd 405 } else if (cpufreq_driver->target) {
1da177e4 406 struct cpufreq_governor *t;
3bcb09a3 407
3fc54d37 408 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
409
410 t = __find_governor(str_governor);
411
ea714970 412 if (t == NULL) {
1a8e1463 413 int ret;
ea714970 414
1a8e1463
KC
415 mutex_unlock(&cpufreq_governor_mutex);
416 ret = request_module("cpufreq_%s", str_governor);
417 mutex_lock(&cpufreq_governor_mutex);
ea714970 418
1a8e1463
KC
419 if (ret == 0)
420 t = __find_governor(str_governor);
ea714970
JF
421 }
422
3bcb09a3
JF
423 if (t != NULL) {
424 *governor = t;
425 err = 0;
1da177e4 426 }
3bcb09a3 427
3fc54d37 428 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 429 }
29464f28 430out:
3bcb09a3 431 return err;
1da177e4 432}
1da177e4 433
1da177e4 434/**
e08f5f5b
GS
435 * cpufreq_per_cpu_attr_read() / show_##file_name() -
436 * print out cpufreq information
1da177e4
LT
437 *
438 * Write out information from cpufreq_driver->policy[cpu]; object must be
439 * "unsigned int".
440 */
441
32ee8c3e
DJ
442#define show_one(file_name, object) \
443static ssize_t show_##file_name \
905d77cd 444(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 445{ \
29464f28 446 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
447}
448
449show_one(cpuinfo_min_freq, cpuinfo.min_freq);
450show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 451show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
452show_one(scaling_min_freq, min);
453show_one(scaling_max_freq, max);
454show_one(scaling_cur_freq, cur);
455
3a3e9e06
VK
456static int __cpufreq_set_policy(struct cpufreq_policy *policy,
457 struct cpufreq_policy *new_policy);
7970e08b 458
1da177e4
LT
459/**
460 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
461 */
462#define store_one(file_name, object) \
463static ssize_t store_##file_name \
905d77cd 464(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 465{ \
f55c9c26 466 unsigned int ret; \
1da177e4
LT
467 struct cpufreq_policy new_policy; \
468 \
469 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
470 if (ret) \
471 return -EINVAL; \
472 \
29464f28 473 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
474 if (ret != 1) \
475 return -EINVAL; \
476 \
7970e08b
TR
477 ret = __cpufreq_set_policy(policy, &new_policy); \
478 policy->user_policy.object = policy->object; \
1da177e4
LT
479 \
480 return ret ? ret : count; \
481}
482
29464f28
DJ
483store_one(scaling_min_freq, min);
484store_one(scaling_max_freq, max);
1da177e4
LT
485
486/**
487 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
488 */
905d77cd
DJ
489static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
490 char *buf)
1da177e4 491{
5a01f2e8 492 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
493 if (!cur_freq)
494 return sprintf(buf, "<unknown>");
495 return sprintf(buf, "%u\n", cur_freq);
496}
497
1da177e4
LT
498/**
499 * show_scaling_governor - show the current policy for the specified CPU
500 */
905d77cd 501static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 502{
29464f28 503 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
504 return sprintf(buf, "powersave\n");
505 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
506 return sprintf(buf, "performance\n");
507 else if (policy->governor)
4b972f0b 508 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 509 policy->governor->name);
1da177e4
LT
510 return -EINVAL;
511}
512
1da177e4
LT
513/**
514 * store_scaling_governor - store policy for the specified CPU
515 */
905d77cd
DJ
516static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
517 const char *buf, size_t count)
1da177e4 518{
f55c9c26 519 unsigned int ret;
1da177e4
LT
520 char str_governor[16];
521 struct cpufreq_policy new_policy;
522
523 ret = cpufreq_get_policy(&new_policy, policy->cpu);
524 if (ret)
525 return ret;
526
29464f28 527 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
528 if (ret != 1)
529 return -EINVAL;
530
e08f5f5b
GS
531 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
532 &new_policy.governor))
1da177e4
LT
533 return -EINVAL;
534
bb176f7d
VK
535 /*
536 * Do not use cpufreq_set_policy here or the user_policy.max
537 * will be wrongly overridden
538 */
7970e08b
TR
539 ret = __cpufreq_set_policy(policy, &new_policy);
540
541 policy->user_policy.policy = policy->policy;
542 policy->user_policy.governor = policy->governor;
7970e08b 543
e08f5f5b
GS
544 if (ret)
545 return ret;
546 else
547 return count;
1da177e4
LT
548}
549
550/**
551 * show_scaling_driver - show the cpufreq driver currently loaded
552 */
905d77cd 553static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 554{
1c3d85dd 555 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
556}
557
558/**
559 * show_scaling_available_governors - show the available CPUfreq governors
560 */
905d77cd
DJ
561static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
562 char *buf)
1da177e4
LT
563{
564 ssize_t i = 0;
565 struct cpufreq_governor *t;
566
1c3d85dd 567 if (!cpufreq_driver->target) {
1da177e4
LT
568 i += sprintf(buf, "performance powersave");
569 goto out;
570 }
571
572 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
573 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
574 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 575 goto out;
4b972f0b 576 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 577 }
7d5e350f 578out:
1da177e4
LT
579 i += sprintf(&buf[i], "\n");
580 return i;
581}
e8628dd0 582
f4fd3797 583ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
584{
585 ssize_t i = 0;
586 unsigned int cpu;
587
835481d9 588 for_each_cpu(cpu, mask) {
1da177e4
LT
589 if (i)
590 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
591 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
592 if (i >= (PAGE_SIZE - 5))
29464f28 593 break;
1da177e4
LT
594 }
595 i += sprintf(&buf[i], "\n");
596 return i;
597}
f4fd3797 598EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 599
e8628dd0
DW
600/**
601 * show_related_cpus - show the CPUs affected by each transition even if
602 * hw coordination is in use
603 */
604static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
605{
f4fd3797 606 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
607}
608
609/**
610 * show_affected_cpus - show the CPUs affected by each transition
611 */
612static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
613{
f4fd3797 614 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
615}
616
9e76988e 617static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 618 const char *buf, size_t count)
9e76988e
VP
619{
620 unsigned int freq = 0;
621 unsigned int ret;
622
879000f9 623 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
624 return -EINVAL;
625
626 ret = sscanf(buf, "%u", &freq);
627 if (ret != 1)
628 return -EINVAL;
629
630 policy->governor->store_setspeed(policy, freq);
631
632 return count;
633}
634
635static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
636{
879000f9 637 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
638 return sprintf(buf, "<unsupported>\n");
639
640 return policy->governor->show_setspeed(policy, buf);
641}
1da177e4 642
e2f74f35 643/**
8bf1ac72 644 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
645 */
646static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
647{
648 unsigned int limit;
649 int ret;
1c3d85dd
RW
650 if (cpufreq_driver->bios_limit) {
651 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
652 if (!ret)
653 return sprintf(buf, "%u\n", limit);
654 }
655 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
656}
657
6dad2a29
BP
658cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
659cpufreq_freq_attr_ro(cpuinfo_min_freq);
660cpufreq_freq_attr_ro(cpuinfo_max_freq);
661cpufreq_freq_attr_ro(cpuinfo_transition_latency);
662cpufreq_freq_attr_ro(scaling_available_governors);
663cpufreq_freq_attr_ro(scaling_driver);
664cpufreq_freq_attr_ro(scaling_cur_freq);
665cpufreq_freq_attr_ro(bios_limit);
666cpufreq_freq_attr_ro(related_cpus);
667cpufreq_freq_attr_ro(affected_cpus);
668cpufreq_freq_attr_rw(scaling_min_freq);
669cpufreq_freq_attr_rw(scaling_max_freq);
670cpufreq_freq_attr_rw(scaling_governor);
671cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 672
905d77cd 673static struct attribute *default_attrs[] = {
1da177e4
LT
674 &cpuinfo_min_freq.attr,
675 &cpuinfo_max_freq.attr,
ed129784 676 &cpuinfo_transition_latency.attr,
1da177e4
LT
677 &scaling_min_freq.attr,
678 &scaling_max_freq.attr,
679 &affected_cpus.attr,
e8628dd0 680 &related_cpus.attr,
1da177e4
LT
681 &scaling_governor.attr,
682 &scaling_driver.attr,
683 &scaling_available_governors.attr,
9e76988e 684 &scaling_setspeed.attr,
1da177e4
LT
685 NULL
686};
687
29464f28
DJ
688#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
689#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 690
29464f28 691static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 692{
905d77cd
DJ
693 struct cpufreq_policy *policy = to_policy(kobj);
694 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 695 ssize_t ret = -EINVAL;
a9144436 696 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 697 if (!policy)
0db4a8a9 698 goto no_policy;
5a01f2e8
VP
699
700 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 701 goto fail;
5a01f2e8 702
e08f5f5b
GS
703 if (fattr->show)
704 ret = fattr->show(policy, buf);
705 else
706 ret = -EIO;
707
5a01f2e8 708 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 709fail:
a9144436 710 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 711no_policy:
1da177e4
LT
712 return ret;
713}
714
905d77cd
DJ
715static ssize_t store(struct kobject *kobj, struct attribute *attr,
716 const char *buf, size_t count)
1da177e4 717{
905d77cd
DJ
718 struct cpufreq_policy *policy = to_policy(kobj);
719 struct freq_attr *fattr = to_attr(attr);
a07530b4 720 ssize_t ret = -EINVAL;
a9144436 721 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 722 if (!policy)
a07530b4 723 goto no_policy;
5a01f2e8
VP
724
725 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 726 goto fail;
5a01f2e8 727
e08f5f5b
GS
728 if (fattr->store)
729 ret = fattr->store(policy, buf, count);
730 else
731 ret = -EIO;
732
5a01f2e8 733 unlock_policy_rwsem_write(policy->cpu);
a07530b4 734fail:
a9144436 735 cpufreq_cpu_put_sysfs(policy);
a07530b4 736no_policy:
1da177e4
LT
737 return ret;
738}
739
905d77cd 740static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 741{
905d77cd 742 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 743 pr_debug("last reference is dropped\n");
1da177e4
LT
744 complete(&policy->kobj_unregister);
745}
746
52cf25d0 747static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
748 .show = show,
749 .store = store,
750};
751
752static struct kobj_type ktype_cpufreq = {
753 .sysfs_ops = &sysfs_ops,
754 .default_attrs = default_attrs,
755 .release = cpufreq_sysfs_release,
756};
757
2361be23
VK
758struct kobject *cpufreq_global_kobject;
759EXPORT_SYMBOL(cpufreq_global_kobject);
760
761static int cpufreq_global_kobject_usage;
762
763int cpufreq_get_global_kobject(void)
764{
765 if (!cpufreq_global_kobject_usage++)
766 return kobject_add(cpufreq_global_kobject,
767 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
768
769 return 0;
770}
771EXPORT_SYMBOL(cpufreq_get_global_kobject);
772
773void cpufreq_put_global_kobject(void)
774{
775 if (!--cpufreq_global_kobject_usage)
776 kobject_del(cpufreq_global_kobject);
777}
778EXPORT_SYMBOL(cpufreq_put_global_kobject);
779
780int cpufreq_sysfs_create_file(const struct attribute *attr)
781{
782 int ret = cpufreq_get_global_kobject();
783
784 if (!ret) {
785 ret = sysfs_create_file(cpufreq_global_kobject, attr);
786 if (ret)
787 cpufreq_put_global_kobject();
788 }
789
790 return ret;
791}
792EXPORT_SYMBOL(cpufreq_sysfs_create_file);
793
794void cpufreq_sysfs_remove_file(const struct attribute *attr)
795{
796 sysfs_remove_file(cpufreq_global_kobject, attr);
797 cpufreq_put_global_kobject();
798}
799EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
800
19d6f7ec 801/* symlink affected CPUs */
308b60e7 802static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
803{
804 unsigned int j;
805 int ret = 0;
806
807 for_each_cpu(j, policy->cpus) {
8a25a2fd 808 struct device *cpu_dev;
19d6f7ec 809
308b60e7 810 if (j == policy->cpu)
19d6f7ec 811 continue;
19d6f7ec 812
e8fdde10 813 pr_debug("Adding link for CPU: %u\n", j);
8a25a2fd
KS
814 cpu_dev = get_cpu_device(j);
815 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec 816 "cpufreq");
71c3461e
RW
817 if (ret)
818 break;
19d6f7ec
DJ
819 }
820 return ret;
821}
822
308b60e7 823static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 824 struct device *dev)
909a694e
DJ
825{
826 struct freq_attr **drv_attr;
909a694e 827 int ret = 0;
909a694e
DJ
828
829 /* prepare interface data */
830 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 831 &dev->kobj, "cpufreq");
909a694e
DJ
832 if (ret)
833 return ret;
834
835 /* set up files for this cpu device */
1c3d85dd 836 drv_attr = cpufreq_driver->attr;
909a694e
DJ
837 while ((drv_attr) && (*drv_attr)) {
838 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
839 if (ret)
1c3d85dd 840 goto err_out_kobj_put;
909a694e
DJ
841 drv_attr++;
842 }
1c3d85dd 843 if (cpufreq_driver->get) {
909a694e
DJ
844 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
845 if (ret)
1c3d85dd 846 goto err_out_kobj_put;
909a694e 847 }
1c3d85dd 848 if (cpufreq_driver->target) {
909a694e
DJ
849 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
850 if (ret)
1c3d85dd 851 goto err_out_kobj_put;
909a694e 852 }
1c3d85dd 853 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
854 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
855 if (ret)
1c3d85dd 856 goto err_out_kobj_put;
e2f74f35 857 }
909a694e 858
308b60e7 859 ret = cpufreq_add_dev_symlink(policy);
ecf7e461
DJ
860 if (ret)
861 goto err_out_kobj_put;
862
e18f1682
SB
863 return ret;
864
865err_out_kobj_put:
866 kobject_put(&policy->kobj);
867 wait_for_completion(&policy->kobj_unregister);
868 return ret;
869}
870
871static void cpufreq_init_policy(struct cpufreq_policy *policy)
872{
873 struct cpufreq_policy new_policy;
874 int ret = 0;
875
ecf7e461
DJ
876 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
877 /* assure that the starting sequence is run in __cpufreq_set_policy */
878 policy->governor = NULL;
879
880 /* set default policy */
881 ret = __cpufreq_set_policy(policy, &new_policy);
882 policy->user_policy.policy = policy->policy;
883 policy->user_policy.governor = policy->governor;
884
885 if (ret) {
2d06d8c4 886 pr_debug("setting policy failed\n");
1c3d85dd
RW
887 if (cpufreq_driver->exit)
888 cpufreq_driver->exit(policy);
ecf7e461 889 }
909a694e
DJ
890}
891
fcf80582 892#ifdef CONFIG_HOTPLUG_CPU
d8d3b471
VK
893static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
894 unsigned int cpu, struct device *dev,
895 bool frozen)
fcf80582 896{
1c3d85dd 897 int ret = 0, has_target = !!cpufreq_driver->target;
fcf80582
VK
898 unsigned long flags;
899
820c6ca2
VK
900 if (has_target)
901 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
fcf80582 902
d8d3b471 903 lock_policy_rwsem_write(policy->cpu);
2eaa3e2d 904
0d1857a1 905 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 906
fcf80582 907 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 908 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 909 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 910 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 911
d8d3b471 912 unlock_policy_rwsem_write(policy->cpu);
2eaa3e2d 913
820c6ca2
VK
914 if (has_target) {
915 __cpufreq_governor(policy, CPUFREQ_GOV_START);
916 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
917 }
fcf80582 918
a82fab29 919 /* Don't touch sysfs links during light-weight init */
71c3461e
RW
920 if (!frozen)
921 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
a82fab29
SB
922
923 return ret;
fcf80582
VK
924}
925#endif
1da177e4 926
8414809c
SB
927static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
928{
929 struct cpufreq_policy *policy;
930 unsigned long flags;
931
932 write_lock_irqsave(&cpufreq_driver_lock, flags);
933
934 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
935
936 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
937
938 return policy;
939}
940
e9698cc5
SB
941static struct cpufreq_policy *cpufreq_policy_alloc(void)
942{
943 struct cpufreq_policy *policy;
944
945 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
946 if (!policy)
947 return NULL;
948
949 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
950 goto err_free_policy;
951
952 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
953 goto err_free_cpumask;
954
955 return policy;
956
957err_free_cpumask:
958 free_cpumask_var(policy->cpus);
959err_free_policy:
960 kfree(policy);
961
962 return NULL;
963}
964
965static void cpufreq_policy_free(struct cpufreq_policy *policy)
966{
967 free_cpumask_var(policy->related_cpus);
968 free_cpumask_var(policy->cpus);
969 kfree(policy);
970}
971
a82fab29
SB
972static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
973 bool frozen)
1da177e4 974{
fcf80582 975 unsigned int j, cpu = dev->id;
65922465 976 int ret = -ENOMEM;
1da177e4 977 struct cpufreq_policy *policy;
1da177e4 978 unsigned long flags;
90e41bac 979#ifdef CONFIG_HOTPLUG_CPU
fcf80582 980 struct cpufreq_governor *gov;
90e41bac
PB
981 int sibling;
982#endif
1da177e4 983
c32b6b8e
AR
984 if (cpu_is_offline(cpu))
985 return 0;
986
2d06d8c4 987 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
988
989#ifdef CONFIG_SMP
990 /* check whether a different CPU already registered this
991 * CPU because it is in the same boat. */
992 policy = cpufreq_cpu_get(cpu);
993 if (unlikely(policy)) {
8ff69732 994 cpufreq_cpu_put(policy);
1da177e4
LT
995 return 0;
996 }
fcf80582
VK
997
998#ifdef CONFIG_HOTPLUG_CPU
999 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 1000 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
1001 for_each_online_cpu(sibling) {
1002 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 1003 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 1004 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
d8d3b471 1005 return cpufreq_add_policy_cpu(cp, cpu, dev, frozen);
2eaa3e2d 1006 }
fcf80582 1007 }
0d1857a1 1008 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 1009#endif
1da177e4
LT
1010#endif
1011
1c3d85dd 1012 if (!try_module_get(cpufreq_driver->owner)) {
1da177e4
LT
1013 ret = -EINVAL;
1014 goto module_out;
1015 }
1016
8414809c
SB
1017 if (frozen)
1018 /* Restore the saved policy when doing light-weight init */
1019 policy = cpufreq_policy_restore(cpu);
1020 else
1021 policy = cpufreq_policy_alloc();
1022
059019a3 1023 if (!policy)
1da177e4 1024 goto nomem_out;
059019a3 1025
1da177e4 1026 policy->cpu = cpu;
65922465 1027 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 1028 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1029
5a01f2e8 1030 /* Initially set CPU itself as the policy_cpu */
f1625066 1031 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 1032
1da177e4 1033 init_completion(&policy->kobj_unregister);
65f27f38 1034 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
1035
1036 /* call driver. From then on the cpufreq must be able
1037 * to accept all calls to ->verify and ->setpolicy for this CPU
1038 */
1c3d85dd 1039 ret = cpufreq_driver->init(policy);
1da177e4 1040 if (ret) {
2d06d8c4 1041 pr_debug("initialization failed\n");
2eaa3e2d 1042 goto err_set_policy_cpu;
1da177e4 1043 }
643ae6e8 1044
fcf80582
VK
1045 /* related cpus should atleast have policy->cpus */
1046 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1047
643ae6e8
VK
1048 /*
1049 * affected cpus must always be the one, which are online. We aren't
1050 * managing offline cpus here.
1051 */
1052 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1053
187d9f4e
MC
1054 policy->user_policy.min = policy->min;
1055 policy->user_policy.max = policy->max;
1da177e4 1056
a1531acd
TR
1057 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1058 CPUFREQ_START, policy);
1059
fcf80582
VK
1060#ifdef CONFIG_HOTPLUG_CPU
1061 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1062 if (gov) {
1063 policy->governor = gov;
1064 pr_debug("Restoring governor %s for cpu %d\n",
1065 policy->governor->name, cpu);
4bfa042c 1066 }
fcf80582 1067#endif
1da177e4 1068
e18f1682
SB
1069 write_lock_irqsave(&cpufreq_driver_lock, flags);
1070 for_each_cpu(j, policy->cpus) {
1071 per_cpu(cpufreq_cpu_data, j) = policy;
1072 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
1073 }
1074 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1075
a82fab29 1076 if (!frozen) {
308b60e7 1077 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1078 if (ret)
1079 goto err_out_unregister;
1080 }
8ff69732 1081
e18f1682
SB
1082 cpufreq_init_policy(policy);
1083
038c5b3e 1084 kobject_uevent(&policy->kobj, KOBJ_ADD);
1c3d85dd 1085 module_put(cpufreq_driver->owner);
2d06d8c4 1086 pr_debug("initialization complete\n");
87c32271 1087
1da177e4
LT
1088 return 0;
1089
1da177e4 1090err_out_unregister:
0d1857a1 1091 write_lock_irqsave(&cpufreq_driver_lock, flags);
e18f1682 1092 for_each_cpu(j, policy->cpus) {
7a6aedfa 1093 per_cpu(cpufreq_cpu_data, j) = NULL;
e18f1682
SB
1094 if (j != cpu)
1095 per_cpu(cpufreq_policy_cpu, j) = -1;
1096 }
0d1857a1 1097 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1098
2eaa3e2d
VK
1099err_set_policy_cpu:
1100 per_cpu(cpufreq_policy_cpu, cpu) = -1;
e9698cc5 1101 cpufreq_policy_free(policy);
1da177e4 1102nomem_out:
1c3d85dd 1103 module_put(cpufreq_driver->owner);
c32b6b8e 1104module_out:
1da177e4
LT
1105 return ret;
1106}
1107
a82fab29
SB
1108/**
1109 * cpufreq_add_dev - add a CPU device
1110 *
1111 * Adds the cpufreq interface for a CPU device.
1112 *
1113 * The Oracle says: try running cpufreq registration/unregistration concurrently
1114 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1115 * mess up, but more thorough testing is needed. - Mathieu
1116 */
1117static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1118{
1119 return __cpufreq_add_dev(dev, sif, false);
1120}
1121
b8eed8af
VK
1122static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1123{
1124 int j;
1125
1126 policy->last_cpu = policy->cpu;
1127 policy->cpu = cpu;
1128
3361b7b1 1129 for_each_cpu(j, policy->cpus)
b8eed8af 1130 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
1131
1132#ifdef CONFIG_CPU_FREQ_TABLE
1133 cpufreq_frequency_table_update_policy_cpu(policy);
1134#endif
1135 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1136 CPUFREQ_UPDATE_POLICY_CPU, policy);
1137}
1da177e4 1138
3a3e9e06 1139static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
a82fab29 1140 unsigned int old_cpu, bool frozen)
f9ba680d
SB
1141{
1142 struct device *cpu_dev;
1143 unsigned long flags;
1144 int ret;
1145
1146 /* first sibling now owns the new sysfs dir */
3a3e9e06 1147 cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
a82fab29
SB
1148
1149 /* Don't touch sysfs files during light-weight tear-down */
1150 if (frozen)
1151 return cpu_dev->id;
1152
f9ba680d 1153 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
3a3e9e06 1154 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
f9ba680d
SB
1155 if (ret) {
1156 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1157
1158 WARN_ON(lock_policy_rwsem_write(old_cpu));
3a3e9e06 1159 cpumask_set_cpu(old_cpu, policy->cpus);
f9ba680d
SB
1160
1161 write_lock_irqsave(&cpufreq_driver_lock, flags);
3a3e9e06 1162 per_cpu(cpufreq_cpu_data, old_cpu) = policy;
f9ba680d
SB
1163 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1164
1165 unlock_policy_rwsem_write(old_cpu);
1166
3a3e9e06 1167 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
f9ba680d
SB
1168 "cpufreq");
1169
1170 return -EINVAL;
1171 }
1172
1173 return cpu_dev->id;
1174}
1175
1da177e4 1176/**
5a01f2e8 1177 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1178 *
1179 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1180 * Caller should already have policy_rwsem in write mode for this CPU.
1181 * This routine frees the rwsem before returning.
1da177e4 1182 */
bb176f7d 1183static int __cpufreq_remove_dev(struct device *dev,
a82fab29 1184 struct subsys_interface *sif, bool frozen)
1da177e4 1185{
f9ba680d
SB
1186 unsigned int cpu = dev->id, cpus;
1187 int new_cpu;
1da177e4 1188 unsigned long flags;
3a3e9e06 1189 struct cpufreq_policy *policy;
499bca9b
AW
1190 struct kobject *kobj;
1191 struct completion *cmp;
1da177e4 1192
b8eed8af 1193 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1194
0d1857a1 1195 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1196
3a3e9e06 1197 policy = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1198 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1199
8414809c
SB
1200 /* Save the policy somewhere when doing a light-weight tear-down */
1201 if (frozen)
3a3e9e06 1202 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
8414809c 1203
0d1857a1 1204 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1205
3a3e9e06 1206 if (!policy) {
b8eed8af 1207 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1208 return -EINVAL;
1209 }
1da177e4 1210
1c3d85dd 1211 if (cpufreq_driver->target)
3a3e9e06 1212 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1da177e4 1213
084f3493 1214#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1215 if (!cpufreq_driver->setpolicy)
fa69e33f 1216 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
3a3e9e06 1217 policy->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1218#endif
1219
2eaa3e2d 1220 WARN_ON(lock_policy_rwsem_write(cpu));
3a3e9e06 1221 cpus = cpumask_weight(policy->cpus);
e4969eba
VK
1222
1223 if (cpus > 1)
3a3e9e06 1224 cpumask_clear_cpu(cpu, policy->cpus);
2eaa3e2d 1225 unlock_policy_rwsem_write(cpu);
084f3493 1226
3a3e9e06 1227 if (cpu != policy->cpu && !frozen) {
73bf0fc2
VK
1228 sysfs_remove_link(&dev->kobj, "cpufreq");
1229 } else if (cpus > 1) {
084f3493 1230
3a3e9e06 1231 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
f9ba680d 1232 if (new_cpu >= 0) {
2eaa3e2d 1233 WARN_ON(lock_policy_rwsem_write(cpu));
3a3e9e06 1234 update_policy_cpu(policy, new_cpu);
499bca9b 1235 unlock_policy_rwsem_write(cpu);
a82fab29
SB
1236
1237 if (!frozen) {
1238 pr_debug("%s: policy Kobject moved to cpu: %d "
1239 "from: %d\n",__func__, new_cpu, cpu);
1240 }
1da177e4
LT
1241 }
1242 }
1da177e4 1243
b8eed8af
VK
1244 /* If cpu is last user of policy, free policy */
1245 if (cpus == 1) {
2a998599 1246 if (cpufreq_driver->target)
3a3e9e06 1247 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2a998599 1248
8414809c
SB
1249 if (!frozen) {
1250 lock_policy_rwsem_read(cpu);
3a3e9e06
VK
1251 kobj = &policy->kobj;
1252 cmp = &policy->kobj_unregister;
8414809c
SB
1253 unlock_policy_rwsem_read(cpu);
1254 kobject_put(kobj);
1255
1256 /*
1257 * We need to make sure that the underlying kobj is
1258 * actually not referenced anymore by anybody before we
1259 * proceed with unloading.
1260 */
1261 pr_debug("waiting for dropping of refcount\n");
1262 wait_for_completion(cmp);
1263 pr_debug("wait complete\n");
1264 }
7d26e2d5 1265
8414809c
SB
1266 /*
1267 * Perform the ->exit() even during light-weight tear-down,
1268 * since this is a core component, and is essential for the
1269 * subsequent light-weight ->init() to succeed.
b8eed8af 1270 */
1c3d85dd 1271 if (cpufreq_driver->exit)
3a3e9e06 1272 cpufreq_driver->exit(policy);
27ecddc2 1273
8414809c 1274 if (!frozen)
3a3e9e06 1275 cpufreq_policy_free(policy);
2a998599 1276 } else {
2a998599 1277 if (cpufreq_driver->target) {
3a3e9e06
VK
1278 __cpufreq_governor(policy, CPUFREQ_GOV_START);
1279 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2a998599 1280 }
27ecddc2 1281 }
1da177e4 1282
2eaa3e2d 1283 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1284 return 0;
1285}
1286
8a25a2fd 1287static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1288{
8a25a2fd 1289 unsigned int cpu = dev->id;
5a01f2e8 1290 int retval;
ec28297a
VP
1291
1292 if (cpu_is_offline(cpu))
1293 return 0;
1294
a82fab29 1295 retval = __cpufreq_remove_dev(dev, sif, false);
5a01f2e8
VP
1296 return retval;
1297}
1298
65f27f38 1299static void handle_update(struct work_struct *work)
1da177e4 1300{
65f27f38
DH
1301 struct cpufreq_policy *policy =
1302 container_of(work, struct cpufreq_policy, update);
1303 unsigned int cpu = policy->cpu;
2d06d8c4 1304 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1305 cpufreq_update_policy(cpu);
1306}
1307
1308/**
bb176f7d
VK
1309 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1310 * in deep trouble.
1da177e4
LT
1311 * @cpu: cpu number
1312 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1313 * @new_freq: CPU frequency the CPU actually runs at
1314 *
29464f28
DJ
1315 * We adjust to current frequency first, and need to clean up later.
1316 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1317 */
e08f5f5b
GS
1318static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1319 unsigned int new_freq)
1da177e4 1320{
b43a7ffb 1321 struct cpufreq_policy *policy;
1da177e4 1322 struct cpufreq_freqs freqs;
b43a7ffb
VK
1323 unsigned long flags;
1324
2d06d8c4 1325 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1326 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1327
1da177e4
LT
1328 freqs.old = old_freq;
1329 freqs.new = new_freq;
b43a7ffb
VK
1330
1331 read_lock_irqsave(&cpufreq_driver_lock, flags);
1332 policy = per_cpu(cpufreq_cpu_data, cpu);
1333 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1334
1335 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1336 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1337}
1338
32ee8c3e 1339/**
4ab70df4 1340 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1341 * @cpu: CPU number
1342 *
1343 * This is the last known freq, without actually getting it from the driver.
1344 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1345 */
1346unsigned int cpufreq_quick_get(unsigned int cpu)
1347{
9e21ba8b 1348 struct cpufreq_policy *policy;
e08f5f5b 1349 unsigned int ret_freq = 0;
95235ca2 1350
1c3d85dd
RW
1351 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1352 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1353
1354 policy = cpufreq_cpu_get(cpu);
95235ca2 1355 if (policy) {
e08f5f5b 1356 ret_freq = policy->cur;
95235ca2
VP
1357 cpufreq_cpu_put(policy);
1358 }
1359
4d34a67d 1360 return ret_freq;
95235ca2
VP
1361}
1362EXPORT_SYMBOL(cpufreq_quick_get);
1363
3d737108
JB
1364/**
1365 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1366 * @cpu: CPU number
1367 *
1368 * Just return the max possible frequency for a given CPU.
1369 */
1370unsigned int cpufreq_quick_get_max(unsigned int cpu)
1371{
1372 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1373 unsigned int ret_freq = 0;
1374
1375 if (policy) {
1376 ret_freq = policy->max;
1377 cpufreq_cpu_put(policy);
1378 }
1379
1380 return ret_freq;
1381}
1382EXPORT_SYMBOL(cpufreq_quick_get_max);
1383
5a01f2e8 1384static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1385{
7a6aedfa 1386 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1387 unsigned int ret_freq = 0;
5800043b 1388
1c3d85dd 1389 if (!cpufreq_driver->get)
4d34a67d 1390 return ret_freq;
1da177e4 1391
1c3d85dd 1392 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1393
e08f5f5b 1394 if (ret_freq && policy->cur &&
1c3d85dd 1395 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1396 /* verify no discrepancy between actual and
1397 saved value exists */
1398 if (unlikely(ret_freq != policy->cur)) {
1399 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1400 schedule_work(&policy->update);
1401 }
1402 }
1403
4d34a67d 1404 return ret_freq;
5a01f2e8 1405}
1da177e4 1406
5a01f2e8
VP
1407/**
1408 * cpufreq_get - get the current CPU frequency (in kHz)
1409 * @cpu: CPU number
1410 *
1411 * Get the CPU current (static) CPU frequency
1412 */
1413unsigned int cpufreq_get(unsigned int cpu)
1414{
1415 unsigned int ret_freq = 0;
1416 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1417
1418 if (!policy)
1419 goto out;
1420
1421 if (unlikely(lock_policy_rwsem_read(cpu)))
1422 goto out_policy;
1423
1424 ret_freq = __cpufreq_get(cpu);
1425
1426 unlock_policy_rwsem_read(cpu);
1da177e4 1427
5a01f2e8
VP
1428out_policy:
1429 cpufreq_cpu_put(policy);
1430out:
4d34a67d 1431 return ret_freq;
1da177e4
LT
1432}
1433EXPORT_SYMBOL(cpufreq_get);
1434
8a25a2fd
KS
1435static struct subsys_interface cpufreq_interface = {
1436 .name = "cpufreq",
1437 .subsys = &cpu_subsys,
1438 .add_dev = cpufreq_add_dev,
1439 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1440};
1441
42d4dc3f 1442/**
e00e56df
RW
1443 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1444 *
1445 * This function is only executed for the boot processor. The other CPUs
1446 * have been put offline by means of CPU hotplug.
42d4dc3f 1447 */
e00e56df 1448static int cpufreq_bp_suspend(void)
42d4dc3f 1449{
e08f5f5b 1450 int ret = 0;
4bc5d341 1451
e00e56df 1452 int cpu = smp_processor_id();
3a3e9e06 1453 struct cpufreq_policy *policy;
42d4dc3f 1454
2d06d8c4 1455 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1456
e00e56df 1457 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1458 policy = cpufreq_cpu_get(cpu);
1459 if (!policy)
e00e56df 1460 return 0;
42d4dc3f 1461
1c3d85dd 1462 if (cpufreq_driver->suspend) {
3a3e9e06 1463 ret = cpufreq_driver->suspend(policy);
ce6c3997 1464 if (ret)
42d4dc3f 1465 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
3a3e9e06 1466 "step on CPU %u\n", policy->cpu);
42d4dc3f
BH
1467 }
1468
3a3e9e06 1469 cpufreq_cpu_put(policy);
c9060494 1470 return ret;
42d4dc3f
BH
1471}
1472
1da177e4 1473/**
e00e56df 1474 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1475 *
1476 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1477 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1478 * restored. It will verify that the current freq is in sync with
1479 * what we believe it to be. This is a bit later than when it
1480 * should be, but nonethteless it's better than calling
1481 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1482 *
1483 * This function is only executed for the boot CPU. The other CPUs have not
1484 * been turned on yet.
1da177e4 1485 */
e00e56df 1486static void cpufreq_bp_resume(void)
1da177e4 1487{
e08f5f5b 1488 int ret = 0;
4bc5d341 1489
e00e56df 1490 int cpu = smp_processor_id();
3a3e9e06 1491 struct cpufreq_policy *policy;
1da177e4 1492
2d06d8c4 1493 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1494
e00e56df 1495 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1496 policy = cpufreq_cpu_get(cpu);
1497 if (!policy)
e00e56df 1498 return;
1da177e4 1499
1c3d85dd 1500 if (cpufreq_driver->resume) {
3a3e9e06 1501 ret = cpufreq_driver->resume(policy);
1da177e4
LT
1502 if (ret) {
1503 printk(KERN_ERR "cpufreq: resume failed in ->resume "
3a3e9e06 1504 "step on CPU %u\n", policy->cpu);
c9060494 1505 goto fail;
1da177e4
LT
1506 }
1507 }
1508
3a3e9e06 1509 schedule_work(&policy->update);
ce6c3997 1510
c9060494 1511fail:
3a3e9e06 1512 cpufreq_cpu_put(policy);
1da177e4
LT
1513}
1514
e00e56df
RW
1515static struct syscore_ops cpufreq_syscore_ops = {
1516 .suspend = cpufreq_bp_suspend,
1517 .resume = cpufreq_bp_resume,
1da177e4
LT
1518};
1519
9d95046e
BP
1520/**
1521 * cpufreq_get_current_driver - return current driver's name
1522 *
1523 * Return the name string of the currently loaded cpufreq driver
1524 * or NULL, if none.
1525 */
1526const char *cpufreq_get_current_driver(void)
1527{
1c3d85dd
RW
1528 if (cpufreq_driver)
1529 return cpufreq_driver->name;
1530
1531 return NULL;
9d95046e
BP
1532}
1533EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1534
1535/*********************************************************************
1536 * NOTIFIER LISTS INTERFACE *
1537 *********************************************************************/
1538
1539/**
1540 * cpufreq_register_notifier - register a driver with cpufreq
1541 * @nb: notifier function to register
1542 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1543 *
32ee8c3e 1544 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1545 * are notified about clock rate changes (once before and once after
1546 * the transition), or a list of drivers that are notified about
1547 * changes in cpufreq policy.
1548 *
1549 * This function may sleep, and has the same return conditions as
e041c683 1550 * blocking_notifier_chain_register.
1da177e4
LT
1551 */
1552int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1553{
1554 int ret;
1555
d5aaffa9
DB
1556 if (cpufreq_disabled())
1557 return -EINVAL;
1558
74212ca4
CEB
1559 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1560
1da177e4
LT
1561 switch (list) {
1562 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1563 ret = srcu_notifier_chain_register(
e041c683 1564 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1565 break;
1566 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1567 ret = blocking_notifier_chain_register(
1568 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1569 break;
1570 default:
1571 ret = -EINVAL;
1572 }
1da177e4
LT
1573
1574 return ret;
1575}
1576EXPORT_SYMBOL(cpufreq_register_notifier);
1577
1da177e4
LT
1578/**
1579 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1580 * @nb: notifier block to be unregistered
bb176f7d 1581 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1582 *
1583 * Remove a driver from the CPU frequency notifier list.
1584 *
1585 * This function may sleep, and has the same return conditions as
e041c683 1586 * blocking_notifier_chain_unregister.
1da177e4
LT
1587 */
1588int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1589{
1590 int ret;
1591
d5aaffa9
DB
1592 if (cpufreq_disabled())
1593 return -EINVAL;
1594
1da177e4
LT
1595 switch (list) {
1596 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1597 ret = srcu_notifier_chain_unregister(
e041c683 1598 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1599 break;
1600 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1601 ret = blocking_notifier_chain_unregister(
1602 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1603 break;
1604 default:
1605 ret = -EINVAL;
1606 }
1da177e4
LT
1607
1608 return ret;
1609}
1610EXPORT_SYMBOL(cpufreq_unregister_notifier);
1611
1612
1613/*********************************************************************
1614 * GOVERNORS *
1615 *********************************************************************/
1616
1da177e4
LT
1617int __cpufreq_driver_target(struct cpufreq_policy *policy,
1618 unsigned int target_freq,
1619 unsigned int relation)
1620{
1621 int retval = -EINVAL;
7249924e 1622 unsigned int old_target_freq = target_freq;
c32b6b8e 1623
a7b422cd
KRW
1624 if (cpufreq_disabled())
1625 return -ENODEV;
7c30ed53
VK
1626 if (policy->transition_ongoing)
1627 return -EBUSY;
a7b422cd 1628
7249924e
VK
1629 /* Make sure that target_freq is within supported range */
1630 if (target_freq > policy->max)
1631 target_freq = policy->max;
1632 if (target_freq < policy->min)
1633 target_freq = policy->min;
1634
1635 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1636 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1637
1638 if (target_freq == policy->cur)
1639 return 0;
1640
1c3d85dd
RW
1641 if (cpufreq_driver->target)
1642 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1643
1da177e4
LT
1644 return retval;
1645}
1646EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1647
1da177e4
LT
1648int cpufreq_driver_target(struct cpufreq_policy *policy,
1649 unsigned int target_freq,
1650 unsigned int relation)
1651{
f1829e4a 1652 int ret = -EINVAL;
1da177e4 1653
5a01f2e8 1654 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1655 goto fail;
1da177e4
LT
1656
1657 ret = __cpufreq_driver_target(policy, target_freq, relation);
1658
5a01f2e8 1659 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1660
f1829e4a 1661fail:
1da177e4
LT
1662 return ret;
1663}
1664EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1665
153d7f3f 1666/*
153d7f3f
AV
1667 * when "event" is CPUFREQ_GOV_LIMITS
1668 */
1da177e4 1669
e08f5f5b
GS
1670static int __cpufreq_governor(struct cpufreq_policy *policy,
1671 unsigned int event)
1da177e4 1672{
cc993cab 1673 int ret;
6afde10c
TR
1674
1675 /* Only must be defined when default governor is known to have latency
1676 restrictions, like e.g. conservative or ondemand.
1677 That this is the case is already ensured in Kconfig
1678 */
1679#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1680 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1681#else
1682 struct cpufreq_governor *gov = NULL;
1683#endif
1c256245
TR
1684
1685 if (policy->governor->max_transition_latency &&
1686 policy->cpuinfo.transition_latency >
1687 policy->governor->max_transition_latency) {
6afde10c
TR
1688 if (!gov)
1689 return -EINVAL;
1690 else {
1691 printk(KERN_WARNING "%s governor failed, too long"
1692 " transition latency of HW, fallback"
1693 " to %s governor\n",
1694 policy->governor->name,
1695 gov->name);
1696 policy->governor = gov;
1697 }
1c256245 1698 }
1da177e4
LT
1699
1700 if (!try_module_get(policy->governor->owner))
1701 return -EINVAL;
1702
2d06d8c4 1703 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1704 policy->cpu, event);
95731ebb
XC
1705
1706 mutex_lock(&cpufreq_governor_lock);
1707 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1708 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1709 mutex_unlock(&cpufreq_governor_lock);
1710 return -EBUSY;
1711 }
1712
1713 if (event == CPUFREQ_GOV_STOP)
1714 policy->governor_enabled = false;
1715 else if (event == CPUFREQ_GOV_START)
1716 policy->governor_enabled = true;
1717
1718 mutex_unlock(&cpufreq_governor_lock);
1719
1da177e4
LT
1720 ret = policy->governor->governor(policy, event);
1721
4d5dcc42
VK
1722 if (!ret) {
1723 if (event == CPUFREQ_GOV_POLICY_INIT)
1724 policy->governor->initialized++;
1725 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1726 policy->governor->initialized--;
95731ebb
XC
1727 } else {
1728 /* Restore original values */
1729 mutex_lock(&cpufreq_governor_lock);
1730 if (event == CPUFREQ_GOV_STOP)
1731 policy->governor_enabled = true;
1732 else if (event == CPUFREQ_GOV_START)
1733 policy->governor_enabled = false;
1734 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1735 }
b394058f 1736
e08f5f5b
GS
1737 /* we keep one module reference alive for
1738 each CPU governed by this CPU */
1da177e4
LT
1739 if ((event != CPUFREQ_GOV_START) || ret)
1740 module_put(policy->governor->owner);
1741 if ((event == CPUFREQ_GOV_STOP) && !ret)
1742 module_put(policy->governor->owner);
1743
1744 return ret;
1745}
1746
1da177e4
LT
1747int cpufreq_register_governor(struct cpufreq_governor *governor)
1748{
3bcb09a3 1749 int err;
1da177e4
LT
1750
1751 if (!governor)
1752 return -EINVAL;
1753
a7b422cd
KRW
1754 if (cpufreq_disabled())
1755 return -ENODEV;
1756
3fc54d37 1757 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1758
b394058f 1759 governor->initialized = 0;
3bcb09a3
JF
1760 err = -EBUSY;
1761 if (__find_governor(governor->name) == NULL) {
1762 err = 0;
1763 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1764 }
1da177e4 1765
32ee8c3e 1766 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1767 return err;
1da177e4
LT
1768}
1769EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1770
1da177e4
LT
1771void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1772{
90e41bac
PB
1773#ifdef CONFIG_HOTPLUG_CPU
1774 int cpu;
1775#endif
1776
1da177e4
LT
1777 if (!governor)
1778 return;
1779
a7b422cd
KRW
1780 if (cpufreq_disabled())
1781 return;
1782
90e41bac
PB
1783#ifdef CONFIG_HOTPLUG_CPU
1784 for_each_present_cpu(cpu) {
1785 if (cpu_online(cpu))
1786 continue;
1787 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1788 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1789 }
1790#endif
1791
3fc54d37 1792 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1793 list_del(&governor->governor_list);
3fc54d37 1794 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1795 return;
1796}
1797EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1798
1799
1da177e4
LT
1800/*********************************************************************
1801 * POLICY INTERFACE *
1802 *********************************************************************/
1803
1804/**
1805 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1806 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1807 * is written
1da177e4
LT
1808 *
1809 * Reads the current cpufreq policy.
1810 */
1811int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1812{
1813 struct cpufreq_policy *cpu_policy;
1814 if (!policy)
1815 return -EINVAL;
1816
1817 cpu_policy = cpufreq_cpu_get(cpu);
1818 if (!cpu_policy)
1819 return -EINVAL;
1820
1da177e4 1821 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1822
1823 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1824 return 0;
1825}
1826EXPORT_SYMBOL(cpufreq_get_policy);
1827
153d7f3f 1828/*
e08f5f5b
GS
1829 * data : current policy.
1830 * policy : policy to be set.
153d7f3f 1831 */
3a3e9e06
VK
1832static int __cpufreq_set_policy(struct cpufreq_policy *policy,
1833 struct cpufreq_policy *new_policy)
1da177e4 1834{
7bd353a9 1835 int ret = 0, failed = 1;
1da177e4 1836
3a3e9e06
VK
1837 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1838 new_policy->min, new_policy->max);
1da177e4 1839
3a3e9e06 1840 memcpy(&new_policy->cpuinfo, &policy->cpuinfo,
e08f5f5b 1841 sizeof(struct cpufreq_cpuinfo));
1da177e4 1842
3a3e9e06 1843 if (new_policy->min > policy->max || new_policy->max < policy->min) {
9c9a43ed
MD
1844 ret = -EINVAL;
1845 goto error_out;
1846 }
1847
1da177e4 1848 /* verify the cpu speed can be set within this limit */
3a3e9e06 1849 ret = cpufreq_driver->verify(new_policy);
1da177e4
LT
1850 if (ret)
1851 goto error_out;
1852
1da177e4 1853 /* adjust if necessary - all reasons */
e041c683 1854 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1855 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
1856
1857 /* adjust if necessary - hardware incompatibility*/
e041c683 1858 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1859 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 1860
bb176f7d
VK
1861 /*
1862 * verify the cpu speed can be set within this limit, which might be
1863 * different to the first one
1864 */
3a3e9e06 1865 ret = cpufreq_driver->verify(new_policy);
e041c683 1866 if (ret)
1da177e4 1867 goto error_out;
1da177e4
LT
1868
1869 /* notification of the new policy */
e041c683 1870 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1871 CPUFREQ_NOTIFY, new_policy);
1da177e4 1872
3a3e9e06
VK
1873 policy->min = new_policy->min;
1874 policy->max = new_policy->max;
1da177e4 1875
2d06d8c4 1876 pr_debug("new min and max freqs are %u - %u kHz\n",
3a3e9e06 1877 policy->min, policy->max);
1da177e4 1878
1c3d85dd 1879 if (cpufreq_driver->setpolicy) {
3a3e9e06 1880 policy->policy = new_policy->policy;
2d06d8c4 1881 pr_debug("setting range\n");
3a3e9e06 1882 ret = cpufreq_driver->setpolicy(new_policy);
1da177e4 1883 } else {
3a3e9e06 1884 if (new_policy->governor != policy->governor) {
1da177e4 1885 /* save old, working values */
3a3e9e06 1886 struct cpufreq_governor *old_gov = policy->governor;
1da177e4 1887
2d06d8c4 1888 pr_debug("governor switch\n");
1da177e4
LT
1889
1890 /* end old governor */
3a3e9e06
VK
1891 if (policy->governor) {
1892 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1893 unlock_policy_rwsem_write(new_policy->cpu);
1894 __cpufreq_governor(policy,
7bd353a9 1895 CPUFREQ_GOV_POLICY_EXIT);
3a3e9e06 1896 lock_policy_rwsem_write(new_policy->cpu);
7bd353a9 1897 }
1da177e4
LT
1898
1899 /* start new governor */
3a3e9e06
VK
1900 policy->governor = new_policy->governor;
1901 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
1902 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
7bd353a9 1903 failed = 0;
955ef483 1904 } else {
3a3e9e06
VK
1905 unlock_policy_rwsem_write(new_policy->cpu);
1906 __cpufreq_governor(policy,
7bd353a9 1907 CPUFREQ_GOV_POLICY_EXIT);
3a3e9e06 1908 lock_policy_rwsem_write(new_policy->cpu);
955ef483 1909 }
7bd353a9
VK
1910 }
1911
1912 if (failed) {
1da177e4 1913 /* new governor failed, so re-start old one */
2d06d8c4 1914 pr_debug("starting governor %s failed\n",
3a3e9e06 1915 policy->governor->name);
1da177e4 1916 if (old_gov) {
3a3e9e06
VK
1917 policy->governor = old_gov;
1918 __cpufreq_governor(policy,
7bd353a9 1919 CPUFREQ_GOV_POLICY_INIT);
3a3e9e06 1920 __cpufreq_governor(policy,
e08f5f5b 1921 CPUFREQ_GOV_START);
1da177e4
LT
1922 }
1923 ret = -EINVAL;
1924 goto error_out;
1925 }
1926 /* might be a policy change, too, so fall through */
1927 }
2d06d8c4 1928 pr_debug("governor: change or update limits\n");
3a3e9e06 1929 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
1930 }
1931
7d5e350f 1932error_out:
1da177e4
LT
1933 return ret;
1934}
1935
1da177e4
LT
1936/**
1937 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1938 * @cpu: CPU which shall be re-evaluated
1939 *
25985edc 1940 * Useful for policy notifiers which have different necessities
1da177e4
LT
1941 * at different times.
1942 */
1943int cpufreq_update_policy(unsigned int cpu)
1944{
3a3e9e06
VK
1945 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1946 struct cpufreq_policy new_policy;
f1829e4a 1947 int ret;
1da177e4 1948
3a3e9e06 1949 if (!policy) {
f1829e4a
JL
1950 ret = -ENODEV;
1951 goto no_policy;
1952 }
1da177e4 1953
f1829e4a
JL
1954 if (unlikely(lock_policy_rwsem_write(cpu))) {
1955 ret = -EINVAL;
1956 goto fail;
1957 }
1da177e4 1958
2d06d8c4 1959 pr_debug("updating policy for CPU %u\n", cpu);
3a3e9e06
VK
1960 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
1961 new_policy.min = policy->user_policy.min;
1962 new_policy.max = policy->user_policy.max;
1963 new_policy.policy = policy->user_policy.policy;
1964 new_policy.governor = policy->user_policy.governor;
1da177e4 1965
bb176f7d
VK
1966 /*
1967 * BIOS might change freq behind our back
1968 * -> ask driver for current freq and notify governors about a change
1969 */
1c3d85dd 1970 if (cpufreq_driver->get) {
3a3e9e06
VK
1971 new_policy.cur = cpufreq_driver->get(cpu);
1972 if (!policy->cur) {
2d06d8c4 1973 pr_debug("Driver did not initialize current freq");
3a3e9e06 1974 policy->cur = new_policy.cur;
a85f7bd3 1975 } else {
3a3e9e06
VK
1976 if (policy->cur != new_policy.cur && cpufreq_driver->target)
1977 cpufreq_out_of_sync(cpu, policy->cur,
1978 new_policy.cur);
a85f7bd3 1979 }
0961dd0d
TR
1980 }
1981
3a3e9e06 1982 ret = __cpufreq_set_policy(policy, &new_policy);
1da177e4 1983
5a01f2e8
VP
1984 unlock_policy_rwsem_write(cpu);
1985
f1829e4a 1986fail:
3a3e9e06 1987 cpufreq_cpu_put(policy);
f1829e4a 1988no_policy:
1da177e4
LT
1989 return ret;
1990}
1991EXPORT_SYMBOL(cpufreq_update_policy);
1992
2760984f 1993static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1994 unsigned long action, void *hcpu)
1995{
1996 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1997 struct device *dev;
5302c3fb 1998 bool frozen = false;
c32b6b8e 1999
8a25a2fd
KS
2000 dev = get_cpu_device(cpu);
2001 if (dev) {
5302c3fb
SB
2002
2003 if (action & CPU_TASKS_FROZEN)
2004 frozen = true;
2005
2006 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2007 case CPU_ONLINE:
5302c3fb 2008 __cpufreq_add_dev(dev, NULL, frozen);
23d32899 2009 cpufreq_update_policy(cpu);
c32b6b8e 2010 break;
5302c3fb 2011
c32b6b8e 2012 case CPU_DOWN_PREPARE:
5302c3fb 2013 __cpufreq_remove_dev(dev, NULL, frozen);
c32b6b8e 2014 break;
5302c3fb 2015
5a01f2e8 2016 case CPU_DOWN_FAILED:
5302c3fb 2017 __cpufreq_add_dev(dev, NULL, frozen);
c32b6b8e
AR
2018 break;
2019 }
2020 }
2021 return NOTIFY_OK;
2022}
2023
9c36f746 2024static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2025 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2026};
1da177e4
LT
2027
2028/*********************************************************************
2029 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2030 *********************************************************************/
2031
2032/**
2033 * cpufreq_register_driver - register a CPU Frequency driver
2034 * @driver_data: A struct cpufreq_driver containing the values#
2035 * submitted by the CPU Frequency driver.
2036 *
bb176f7d 2037 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2038 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2039 * (and isn't unregistered in the meantime).
1da177e4
LT
2040 *
2041 */
221dee28 2042int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2043{
2044 unsigned long flags;
2045 int ret;
2046
a7b422cd
KRW
2047 if (cpufreq_disabled())
2048 return -ENODEV;
2049
1da177e4
LT
2050 if (!driver_data || !driver_data->verify || !driver_data->init ||
2051 ((!driver_data->setpolicy) && (!driver_data->target)))
2052 return -EINVAL;
2053
2d06d8c4 2054 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
2055
2056 if (driver_data->setpolicy)
2057 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2058
0d1857a1 2059 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2060 if (cpufreq_driver) {
0d1857a1 2061 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2062 return -EBUSY;
2063 }
1c3d85dd 2064 cpufreq_driver = driver_data;
0d1857a1 2065 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2066
8a25a2fd 2067 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
2068 if (ret)
2069 goto err_null_driver;
1da177e4 2070
1c3d85dd 2071 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
2072 int i;
2073 ret = -ENODEV;
2074
2075 /* check for at least one working CPU */
7a6aedfa
MT
2076 for (i = 0; i < nr_cpu_ids; i++)
2077 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 2078 ret = 0;
7a6aedfa
MT
2079 break;
2080 }
1da177e4
LT
2081
2082 /* if all ->init() calls failed, unregister */
2083 if (ret) {
2d06d8c4 2084 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 2085 driver_data->name);
8a25a2fd 2086 goto err_if_unreg;
1da177e4
LT
2087 }
2088 }
2089
8f5bc2ab 2090 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2091 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2092
8f5bc2ab 2093 return 0;
8a25a2fd
KS
2094err_if_unreg:
2095 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2096err_null_driver:
0d1857a1 2097 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2098 cpufreq_driver = NULL;
0d1857a1 2099 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2100 return ret;
1da177e4
LT
2101}
2102EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2103
1da177e4
LT
2104/**
2105 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2106 *
bb176f7d 2107 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2108 * the right to do so, i.e. if you have succeeded in initialising before!
2109 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2110 * currently not initialised.
2111 */
221dee28 2112int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2113{
2114 unsigned long flags;
2115
1c3d85dd 2116 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2117 return -EINVAL;
1da177e4 2118
2d06d8c4 2119 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2120
8a25a2fd 2121 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2122 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2123
0d1857a1 2124 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2125 cpufreq_driver = NULL;
0d1857a1 2126 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2127
2128 return 0;
2129}
2130EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2131
2132static int __init cpufreq_core_init(void)
2133{
2134 int cpu;
2135
a7b422cd
KRW
2136 if (cpufreq_disabled())
2137 return -ENODEV;
2138
5a01f2e8 2139 for_each_possible_cpu(cpu) {
f1625066 2140 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
2141 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2142 }
8aa84ad8 2143
2361be23 2144 cpufreq_global_kobject = kobject_create();
8aa84ad8 2145 BUG_ON(!cpufreq_global_kobject);
e00e56df 2146 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2147
5a01f2e8
VP
2148 return 0;
2149}
5a01f2e8 2150core_initcall(cpufreq_core_init);
This page took 0.789089 seconds and 5 git commands to generate.