cpufreq: Fix broken usage of governor->owner's refcount
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
e00e56df 29#include <linux/syscore_ops.h>
5ff0a268 30#include <linux/tick.h>
6f4f2723
TR
31#include <trace/events/power.h>
32
1da177e4 33/**
cd878479 34 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
37 */
1c3d85dd 38static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 39static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
8414809c 40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
bb176f7d
VK
41static DEFINE_RWLOCK(cpufreq_driver_lock);
42static DEFINE_MUTEX(cpufreq_governor_lock);
c88a1f8b 43static LIST_HEAD(cpufreq_policy_list);
bb176f7d 44
084f3493
TR
45#ifdef CONFIG_HOTPLUG_CPU
46/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 47static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 48#endif
1da177e4 49
5a01f2e8
VP
50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
5a01f2e8
VP
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 66 */
f1625066 67static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 71static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 72{ \
f1625066 73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
76 \
77 return 0; \
78}
79
80lock_policy_rwsem(read, cpu);
5a01f2e8 81lock_policy_rwsem(write, cpu);
5a01f2e8 82
fa1d8af4
VK
83#define unlock_policy_rwsem(mode, cpu) \
84static void unlock_policy_rwsem_##mode(int cpu) \
85{ \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 89}
5a01f2e8 90
fa1d8af4
VK
91unlock_policy_rwsem(read, cpu);
92unlock_policy_rwsem(write, cpu);
5a01f2e8 93
1da177e4 94/* internal prototypes */
29464f28
DJ
95static int __cpufreq_governor(struct cpufreq_policy *policy,
96 unsigned int event);
5a01f2e8 97static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 98static void handle_update(struct work_struct *work);
1da177e4
LT
99
100/**
32ee8c3e
DJ
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
1da177e4
LT
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
106 */
e041c683 107static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 108static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 109
74212ca4 110static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
111static int __init init_cpufreq_transition_notifier_list(void)
112{
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 114 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
115 return 0;
116}
b3438f82 117pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 118
a7b422cd 119static int off __read_mostly;
da584455 120static int cpufreq_disabled(void)
a7b422cd
KRW
121{
122 return off;
123}
124void disable_cpufreq(void)
125{
126 off = 1;
127}
1da177e4 128static LIST_HEAD(cpufreq_governor_list);
29464f28 129static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 130
4d5dcc42
VK
131bool have_governor_per_policy(void)
132{
1c3d85dd 133 return cpufreq_driver->have_governor_per_policy;
4d5dcc42 134}
3f869d6d 135EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 136
944e9a03
VK
137struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
138{
139 if (have_governor_per_policy())
140 return &policy->kobj;
141 else
142 return cpufreq_global_kobject;
143}
144EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
145
72a4ce34
VK
146static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
147{
148 u64 idle_time;
149 u64 cur_wall_time;
150 u64 busy_time;
151
152 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
153
154 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
155 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
156 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
157 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
158 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
159 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
160
161 idle_time = cur_wall_time - busy_time;
162 if (wall)
163 *wall = cputime_to_usecs(cur_wall_time);
164
165 return cputime_to_usecs(idle_time);
166}
167
168u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
169{
170 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
171
172 if (idle_time == -1ULL)
173 return get_cpu_idle_time_jiffy(cpu, wall);
174 else if (!io_busy)
175 idle_time += get_cpu_iowait_time_us(cpu, wall);
176
177 return idle_time;
178}
179EXPORT_SYMBOL_GPL(get_cpu_idle_time);
180
a9144436 181static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4 182{
3a3e9e06 183 struct cpufreq_policy *policy;
1da177e4
LT
184 unsigned long flags;
185
7a6aedfa 186 if (cpu >= nr_cpu_ids)
1da177e4
LT
187 goto err_out;
188
189 /* get the cpufreq driver */
1c3d85dd 190 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 191
1c3d85dd 192 if (!cpufreq_driver)
1da177e4
LT
193 goto err_out_unlock;
194
1c3d85dd 195 if (!try_module_get(cpufreq_driver->owner))
1da177e4
LT
196 goto err_out_unlock;
197
1da177e4 198 /* get the CPU */
3a3e9e06 199 policy = per_cpu(cpufreq_cpu_data, cpu);
1da177e4 200
3a3e9e06 201 if (!policy)
1da177e4
LT
202 goto err_out_put_module;
203
3a3e9e06 204 if (!sysfs && !kobject_get(&policy->kobj))
1da177e4
LT
205 goto err_out_put_module;
206
0d1857a1 207 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
3a3e9e06 208 return policy;
1da177e4 209
7d5e350f 210err_out_put_module:
1c3d85dd 211 module_put(cpufreq_driver->owner);
5800043b 212err_out_unlock:
1c3d85dd 213 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 214err_out:
1da177e4
LT
215 return NULL;
216}
a9144436
SB
217
218struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
219{
d5aaffa9
DB
220 if (cpufreq_disabled())
221 return NULL;
222
a9144436
SB
223 return __cpufreq_cpu_get(cpu, false);
224}
1da177e4
LT
225EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
226
a9144436
SB
227static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
228{
229 return __cpufreq_cpu_get(cpu, true);
230}
231
3a3e9e06 232static void __cpufreq_cpu_put(struct cpufreq_policy *policy, bool sysfs)
a9144436
SB
233{
234 if (!sysfs)
3a3e9e06 235 kobject_put(&policy->kobj);
1c3d85dd 236 module_put(cpufreq_driver->owner);
a9144436 237}
7d5e350f 238
3a3e9e06 239void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 240{
d5aaffa9
DB
241 if (cpufreq_disabled())
242 return;
243
3a3e9e06 244 __cpufreq_cpu_put(policy, false);
1da177e4
LT
245}
246EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
247
3a3e9e06 248static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *policy)
a9144436 249{
3a3e9e06 250 __cpufreq_cpu_put(policy, true);
a9144436 251}
1da177e4 252
1da177e4
LT
253/*********************************************************************
254 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
255 *********************************************************************/
256
257/**
258 * adjust_jiffies - adjust the system "loops_per_jiffy"
259 *
260 * This function alters the system "loops_per_jiffy" for the clock
261 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 262 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
263 * per-CPU loops_per_jiffy value wherever possible.
264 */
265#ifndef CONFIG_SMP
266static unsigned long l_p_j_ref;
bb176f7d 267static unsigned int l_p_j_ref_freq;
1da177e4 268
858119e1 269static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
270{
271 if (ci->flags & CPUFREQ_CONST_LOOPS)
272 return;
273
274 if (!l_p_j_ref_freq) {
275 l_p_j_ref = loops_per_jiffy;
276 l_p_j_ref_freq = ci->old;
2d06d8c4 277 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 278 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 279 }
bb176f7d 280 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 281 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
282 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
283 ci->new);
2d06d8c4 284 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 285 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
286 }
287}
288#else
e08f5f5b
GS
289static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
290{
291 return;
292}
1da177e4
LT
293#endif
294
0956df9c 295static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 296 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
297{
298 BUG_ON(irqs_disabled());
299
d5aaffa9
DB
300 if (cpufreq_disabled())
301 return;
302
1c3d85dd 303 freqs->flags = cpufreq_driver->flags;
2d06d8c4 304 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 305 state, freqs->new);
1da177e4 306
1da177e4 307 switch (state) {
e4472cb3 308
1da177e4 309 case CPUFREQ_PRECHANGE:
266c13d7
VK
310 if (WARN(policy->transition_ongoing ==
311 cpumask_weight(policy->cpus),
7c30ed53
VK
312 "In middle of another frequency transition\n"))
313 return;
314
266c13d7 315 policy->transition_ongoing++;
7c30ed53 316
32ee8c3e 317 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
318 * which is not equal to what the cpufreq core thinks is
319 * "old frequency".
1da177e4 320 */
1c3d85dd 321 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
322 if ((policy) && (policy->cpu == freqs->cpu) &&
323 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 324 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
325 " %u, cpufreq assumed %u kHz.\n",
326 freqs->old, policy->cur);
327 freqs->old = policy->cur;
1da177e4
LT
328 }
329 }
b4dfdbb3 330 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 331 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
332 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
333 break;
e4472cb3 334
1da177e4 335 case CPUFREQ_POSTCHANGE:
7c30ed53
VK
336 if (WARN(!policy->transition_ongoing,
337 "No frequency transition in progress\n"))
338 return;
339
266c13d7 340 policy->transition_ongoing--;
7c30ed53 341
1da177e4 342 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 343 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 344 (unsigned long)freqs->cpu);
25e41933 345 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 346 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 347 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
348 if (likely(policy) && likely(policy->cpu == freqs->cpu))
349 policy->cur = freqs->new;
1da177e4
LT
350 break;
351 }
1da177e4 352}
bb176f7d 353
b43a7ffb
VK
354/**
355 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
356 * on frequency transition.
357 *
358 * This function calls the transition notifiers and the "adjust_jiffies"
359 * function. It is called twice on all CPU frequency changes that have
360 * external effects.
361 */
362void cpufreq_notify_transition(struct cpufreq_policy *policy,
363 struct cpufreq_freqs *freqs, unsigned int state)
364{
365 for_each_cpu(freqs->cpu, policy->cpus)
366 __cpufreq_notify_transition(policy, freqs, state);
367}
1da177e4
LT
368EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
369
370
1da177e4
LT
371/*********************************************************************
372 * SYSFS INTERFACE *
373 *********************************************************************/
374
3bcb09a3
JF
375static struct cpufreq_governor *__find_governor(const char *str_governor)
376{
377 struct cpufreq_governor *t;
378
379 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 380 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
381 return t;
382
383 return NULL;
384}
385
1da177e4
LT
386/**
387 * cpufreq_parse_governor - parse a governor string
388 */
905d77cd 389static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
390 struct cpufreq_governor **governor)
391{
3bcb09a3 392 int err = -EINVAL;
1c3d85dd
RW
393
394 if (!cpufreq_driver)
3bcb09a3
JF
395 goto out;
396
1c3d85dd 397 if (cpufreq_driver->setpolicy) {
1da177e4
LT
398 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
399 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 400 err = 0;
e08f5f5b
GS
401 } else if (!strnicmp(str_governor, "powersave",
402 CPUFREQ_NAME_LEN)) {
1da177e4 403 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 404 err = 0;
1da177e4 405 }
1c3d85dd 406 } else if (cpufreq_driver->target) {
1da177e4 407 struct cpufreq_governor *t;
3bcb09a3 408
3fc54d37 409 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
410
411 t = __find_governor(str_governor);
412
ea714970 413 if (t == NULL) {
1a8e1463 414 int ret;
ea714970 415
1a8e1463
KC
416 mutex_unlock(&cpufreq_governor_mutex);
417 ret = request_module("cpufreq_%s", str_governor);
418 mutex_lock(&cpufreq_governor_mutex);
ea714970 419
1a8e1463
KC
420 if (ret == 0)
421 t = __find_governor(str_governor);
ea714970
JF
422 }
423
3bcb09a3
JF
424 if (t != NULL) {
425 *governor = t;
426 err = 0;
1da177e4 427 }
3bcb09a3 428
3fc54d37 429 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 430 }
29464f28 431out:
3bcb09a3 432 return err;
1da177e4 433}
1da177e4 434
1da177e4 435/**
e08f5f5b
GS
436 * cpufreq_per_cpu_attr_read() / show_##file_name() -
437 * print out cpufreq information
1da177e4
LT
438 *
439 * Write out information from cpufreq_driver->policy[cpu]; object must be
440 * "unsigned int".
441 */
442
32ee8c3e
DJ
443#define show_one(file_name, object) \
444static ssize_t show_##file_name \
905d77cd 445(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 446{ \
29464f28 447 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
448}
449
450show_one(cpuinfo_min_freq, cpuinfo.min_freq);
451show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 452show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
453show_one(scaling_min_freq, min);
454show_one(scaling_max_freq, max);
455show_one(scaling_cur_freq, cur);
456
3a3e9e06
VK
457static int __cpufreq_set_policy(struct cpufreq_policy *policy,
458 struct cpufreq_policy *new_policy);
7970e08b 459
1da177e4
LT
460/**
461 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
462 */
463#define store_one(file_name, object) \
464static ssize_t store_##file_name \
905d77cd 465(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 466{ \
f55c9c26 467 unsigned int ret; \
1da177e4
LT
468 struct cpufreq_policy new_policy; \
469 \
470 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
471 if (ret) \
472 return -EINVAL; \
473 \
29464f28 474 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
475 if (ret != 1) \
476 return -EINVAL; \
477 \
7970e08b
TR
478 ret = __cpufreq_set_policy(policy, &new_policy); \
479 policy->user_policy.object = policy->object; \
1da177e4
LT
480 \
481 return ret ? ret : count; \
482}
483
29464f28
DJ
484store_one(scaling_min_freq, min);
485store_one(scaling_max_freq, max);
1da177e4
LT
486
487/**
488 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
489 */
905d77cd
DJ
490static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
491 char *buf)
1da177e4 492{
5a01f2e8 493 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
494 if (!cur_freq)
495 return sprintf(buf, "<unknown>");
496 return sprintf(buf, "%u\n", cur_freq);
497}
498
1da177e4
LT
499/**
500 * show_scaling_governor - show the current policy for the specified CPU
501 */
905d77cd 502static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 503{
29464f28 504 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
505 return sprintf(buf, "powersave\n");
506 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
507 return sprintf(buf, "performance\n");
508 else if (policy->governor)
4b972f0b 509 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 510 policy->governor->name);
1da177e4
LT
511 return -EINVAL;
512}
513
1da177e4
LT
514/**
515 * store_scaling_governor - store policy for the specified CPU
516 */
905d77cd
DJ
517static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
518 const char *buf, size_t count)
1da177e4 519{
f55c9c26 520 unsigned int ret;
1da177e4
LT
521 char str_governor[16];
522 struct cpufreq_policy new_policy;
523
524 ret = cpufreq_get_policy(&new_policy, policy->cpu);
525 if (ret)
526 return ret;
527
29464f28 528 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
529 if (ret != 1)
530 return -EINVAL;
531
e08f5f5b
GS
532 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
533 &new_policy.governor))
1da177e4
LT
534 return -EINVAL;
535
bb176f7d
VK
536 /*
537 * Do not use cpufreq_set_policy here or the user_policy.max
538 * will be wrongly overridden
539 */
7970e08b
TR
540 ret = __cpufreq_set_policy(policy, &new_policy);
541
542 policy->user_policy.policy = policy->policy;
543 policy->user_policy.governor = policy->governor;
7970e08b 544
e08f5f5b
GS
545 if (ret)
546 return ret;
547 else
548 return count;
1da177e4
LT
549}
550
551/**
552 * show_scaling_driver - show the cpufreq driver currently loaded
553 */
905d77cd 554static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 555{
1c3d85dd 556 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
557}
558
559/**
560 * show_scaling_available_governors - show the available CPUfreq governors
561 */
905d77cd
DJ
562static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
563 char *buf)
1da177e4
LT
564{
565 ssize_t i = 0;
566 struct cpufreq_governor *t;
567
1c3d85dd 568 if (!cpufreq_driver->target) {
1da177e4
LT
569 i += sprintf(buf, "performance powersave");
570 goto out;
571 }
572
573 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
574 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
575 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 576 goto out;
4b972f0b 577 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 578 }
7d5e350f 579out:
1da177e4
LT
580 i += sprintf(&buf[i], "\n");
581 return i;
582}
e8628dd0 583
f4fd3797 584ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
585{
586 ssize_t i = 0;
587 unsigned int cpu;
588
835481d9 589 for_each_cpu(cpu, mask) {
1da177e4
LT
590 if (i)
591 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
592 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
593 if (i >= (PAGE_SIZE - 5))
29464f28 594 break;
1da177e4
LT
595 }
596 i += sprintf(&buf[i], "\n");
597 return i;
598}
f4fd3797 599EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 600
e8628dd0
DW
601/**
602 * show_related_cpus - show the CPUs affected by each transition even if
603 * hw coordination is in use
604 */
605static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
606{
f4fd3797 607 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
608}
609
610/**
611 * show_affected_cpus - show the CPUs affected by each transition
612 */
613static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
614{
f4fd3797 615 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
616}
617
9e76988e 618static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 619 const char *buf, size_t count)
9e76988e
VP
620{
621 unsigned int freq = 0;
622 unsigned int ret;
623
879000f9 624 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
625 return -EINVAL;
626
627 ret = sscanf(buf, "%u", &freq);
628 if (ret != 1)
629 return -EINVAL;
630
631 policy->governor->store_setspeed(policy, freq);
632
633 return count;
634}
635
636static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
637{
879000f9 638 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
639 return sprintf(buf, "<unsupported>\n");
640
641 return policy->governor->show_setspeed(policy, buf);
642}
1da177e4 643
e2f74f35 644/**
8bf1ac72 645 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
646 */
647static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
648{
649 unsigned int limit;
650 int ret;
1c3d85dd
RW
651 if (cpufreq_driver->bios_limit) {
652 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
653 if (!ret)
654 return sprintf(buf, "%u\n", limit);
655 }
656 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
657}
658
6dad2a29
BP
659cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
660cpufreq_freq_attr_ro(cpuinfo_min_freq);
661cpufreq_freq_attr_ro(cpuinfo_max_freq);
662cpufreq_freq_attr_ro(cpuinfo_transition_latency);
663cpufreq_freq_attr_ro(scaling_available_governors);
664cpufreq_freq_attr_ro(scaling_driver);
665cpufreq_freq_attr_ro(scaling_cur_freq);
666cpufreq_freq_attr_ro(bios_limit);
667cpufreq_freq_attr_ro(related_cpus);
668cpufreq_freq_attr_ro(affected_cpus);
669cpufreq_freq_attr_rw(scaling_min_freq);
670cpufreq_freq_attr_rw(scaling_max_freq);
671cpufreq_freq_attr_rw(scaling_governor);
672cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 673
905d77cd 674static struct attribute *default_attrs[] = {
1da177e4
LT
675 &cpuinfo_min_freq.attr,
676 &cpuinfo_max_freq.attr,
ed129784 677 &cpuinfo_transition_latency.attr,
1da177e4
LT
678 &scaling_min_freq.attr,
679 &scaling_max_freq.attr,
680 &affected_cpus.attr,
e8628dd0 681 &related_cpus.attr,
1da177e4
LT
682 &scaling_governor.attr,
683 &scaling_driver.attr,
684 &scaling_available_governors.attr,
9e76988e 685 &scaling_setspeed.attr,
1da177e4
LT
686 NULL
687};
688
29464f28
DJ
689#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
690#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 691
29464f28 692static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 693{
905d77cd
DJ
694 struct cpufreq_policy *policy = to_policy(kobj);
695 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 696 ssize_t ret = -EINVAL;
a9144436 697 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 698 if (!policy)
0db4a8a9 699 goto no_policy;
5a01f2e8
VP
700
701 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 702 goto fail;
5a01f2e8 703
e08f5f5b
GS
704 if (fattr->show)
705 ret = fattr->show(policy, buf);
706 else
707 ret = -EIO;
708
5a01f2e8 709 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 710fail:
a9144436 711 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 712no_policy:
1da177e4
LT
713 return ret;
714}
715
905d77cd
DJ
716static ssize_t store(struct kobject *kobj, struct attribute *attr,
717 const char *buf, size_t count)
1da177e4 718{
905d77cd
DJ
719 struct cpufreq_policy *policy = to_policy(kobj);
720 struct freq_attr *fattr = to_attr(attr);
a07530b4 721 ssize_t ret = -EINVAL;
a9144436 722 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 723 if (!policy)
a07530b4 724 goto no_policy;
5a01f2e8
VP
725
726 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 727 goto fail;
5a01f2e8 728
e08f5f5b
GS
729 if (fattr->store)
730 ret = fattr->store(policy, buf, count);
731 else
732 ret = -EIO;
733
5a01f2e8 734 unlock_policy_rwsem_write(policy->cpu);
a07530b4 735fail:
a9144436 736 cpufreq_cpu_put_sysfs(policy);
a07530b4 737no_policy:
1da177e4
LT
738 return ret;
739}
740
905d77cd 741static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 742{
905d77cd 743 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 744 pr_debug("last reference is dropped\n");
1da177e4
LT
745 complete(&policy->kobj_unregister);
746}
747
52cf25d0 748static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
749 .show = show,
750 .store = store,
751};
752
753static struct kobj_type ktype_cpufreq = {
754 .sysfs_ops = &sysfs_ops,
755 .default_attrs = default_attrs,
756 .release = cpufreq_sysfs_release,
757};
758
2361be23
VK
759struct kobject *cpufreq_global_kobject;
760EXPORT_SYMBOL(cpufreq_global_kobject);
761
762static int cpufreq_global_kobject_usage;
763
764int cpufreq_get_global_kobject(void)
765{
766 if (!cpufreq_global_kobject_usage++)
767 return kobject_add(cpufreq_global_kobject,
768 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
769
770 return 0;
771}
772EXPORT_SYMBOL(cpufreq_get_global_kobject);
773
774void cpufreq_put_global_kobject(void)
775{
776 if (!--cpufreq_global_kobject_usage)
777 kobject_del(cpufreq_global_kobject);
778}
779EXPORT_SYMBOL(cpufreq_put_global_kobject);
780
781int cpufreq_sysfs_create_file(const struct attribute *attr)
782{
783 int ret = cpufreq_get_global_kobject();
784
785 if (!ret) {
786 ret = sysfs_create_file(cpufreq_global_kobject, attr);
787 if (ret)
788 cpufreq_put_global_kobject();
789 }
790
791 return ret;
792}
793EXPORT_SYMBOL(cpufreq_sysfs_create_file);
794
795void cpufreq_sysfs_remove_file(const struct attribute *attr)
796{
797 sysfs_remove_file(cpufreq_global_kobject, attr);
798 cpufreq_put_global_kobject();
799}
800EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
801
19d6f7ec 802/* symlink affected CPUs */
308b60e7 803static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
804{
805 unsigned int j;
806 int ret = 0;
807
808 for_each_cpu(j, policy->cpus) {
8a25a2fd 809 struct device *cpu_dev;
19d6f7ec 810
308b60e7 811 if (j == policy->cpu)
19d6f7ec 812 continue;
19d6f7ec 813
e8fdde10 814 pr_debug("Adding link for CPU: %u\n", j);
8a25a2fd
KS
815 cpu_dev = get_cpu_device(j);
816 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec 817 "cpufreq");
71c3461e
RW
818 if (ret)
819 break;
19d6f7ec
DJ
820 }
821 return ret;
822}
823
308b60e7 824static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 825 struct device *dev)
909a694e
DJ
826{
827 struct freq_attr **drv_attr;
909a694e 828 int ret = 0;
909a694e
DJ
829
830 /* prepare interface data */
831 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 832 &dev->kobj, "cpufreq");
909a694e
DJ
833 if (ret)
834 return ret;
835
836 /* set up files for this cpu device */
1c3d85dd 837 drv_attr = cpufreq_driver->attr;
909a694e
DJ
838 while ((drv_attr) && (*drv_attr)) {
839 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
840 if (ret)
1c3d85dd 841 goto err_out_kobj_put;
909a694e
DJ
842 drv_attr++;
843 }
1c3d85dd 844 if (cpufreq_driver->get) {
909a694e
DJ
845 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
846 if (ret)
1c3d85dd 847 goto err_out_kobj_put;
909a694e 848 }
1c3d85dd 849 if (cpufreq_driver->target) {
909a694e
DJ
850 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
851 if (ret)
1c3d85dd 852 goto err_out_kobj_put;
909a694e 853 }
1c3d85dd 854 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
855 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
856 if (ret)
1c3d85dd 857 goto err_out_kobj_put;
e2f74f35 858 }
909a694e 859
308b60e7 860 ret = cpufreq_add_dev_symlink(policy);
ecf7e461
DJ
861 if (ret)
862 goto err_out_kobj_put;
863
e18f1682
SB
864 return ret;
865
866err_out_kobj_put:
867 kobject_put(&policy->kobj);
868 wait_for_completion(&policy->kobj_unregister);
869 return ret;
870}
871
872static void cpufreq_init_policy(struct cpufreq_policy *policy)
873{
874 struct cpufreq_policy new_policy;
875 int ret = 0;
876
d5b73cd8 877 memcpy(&new_policy, policy, sizeof(*policy));
ecf7e461
DJ
878 /* assure that the starting sequence is run in __cpufreq_set_policy */
879 policy->governor = NULL;
880
881 /* set default policy */
882 ret = __cpufreq_set_policy(policy, &new_policy);
883 policy->user_policy.policy = policy->policy;
884 policy->user_policy.governor = policy->governor;
885
886 if (ret) {
2d06d8c4 887 pr_debug("setting policy failed\n");
1c3d85dd
RW
888 if (cpufreq_driver->exit)
889 cpufreq_driver->exit(policy);
ecf7e461 890 }
909a694e
DJ
891}
892
fcf80582 893#ifdef CONFIG_HOTPLUG_CPU
d8d3b471
VK
894static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
895 unsigned int cpu, struct device *dev,
896 bool frozen)
fcf80582 897{
1c3d85dd 898 int ret = 0, has_target = !!cpufreq_driver->target;
fcf80582
VK
899 unsigned long flags;
900
820c6ca2
VK
901 if (has_target)
902 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
fcf80582 903
d8d3b471 904 lock_policy_rwsem_write(policy->cpu);
2eaa3e2d 905
0d1857a1 906 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 907
fcf80582 908 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 909 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 910 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 911 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 912
d8d3b471 913 unlock_policy_rwsem_write(policy->cpu);
2eaa3e2d 914
820c6ca2
VK
915 if (has_target) {
916 __cpufreq_governor(policy, CPUFREQ_GOV_START);
917 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
918 }
fcf80582 919
a82fab29 920 /* Don't touch sysfs links during light-weight init */
71c3461e
RW
921 if (!frozen)
922 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
a82fab29
SB
923
924 return ret;
fcf80582
VK
925}
926#endif
1da177e4 927
8414809c
SB
928static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
929{
930 struct cpufreq_policy *policy;
931 unsigned long flags;
932
933 write_lock_irqsave(&cpufreq_driver_lock, flags);
934
935 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
936
937 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
938
939 return policy;
940}
941
e9698cc5
SB
942static struct cpufreq_policy *cpufreq_policy_alloc(void)
943{
944 struct cpufreq_policy *policy;
945
946 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
947 if (!policy)
948 return NULL;
949
950 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
951 goto err_free_policy;
952
953 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
954 goto err_free_cpumask;
955
c88a1f8b 956 INIT_LIST_HEAD(&policy->policy_list);
e9698cc5
SB
957 return policy;
958
959err_free_cpumask:
960 free_cpumask_var(policy->cpus);
961err_free_policy:
962 kfree(policy);
963
964 return NULL;
965}
966
967static void cpufreq_policy_free(struct cpufreq_policy *policy)
968{
c88a1f8b
LM
969 unsigned long flags;
970
971 write_lock_irqsave(&cpufreq_driver_lock, flags);
972 list_del(&policy->policy_list);
973 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
974
e9698cc5
SB
975 free_cpumask_var(policy->related_cpus);
976 free_cpumask_var(policy->cpus);
977 kfree(policy);
978}
979
a82fab29
SB
980static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
981 bool frozen)
1da177e4 982{
fcf80582 983 unsigned int j, cpu = dev->id;
65922465 984 int ret = -ENOMEM;
1da177e4 985 struct cpufreq_policy *policy;
1da177e4 986 unsigned long flags;
90e41bac 987#ifdef CONFIG_HOTPLUG_CPU
eb608521 988 struct cpufreq_policy *tpolicy;
fcf80582 989 struct cpufreq_governor *gov;
90e41bac 990#endif
1da177e4 991
c32b6b8e
AR
992 if (cpu_is_offline(cpu))
993 return 0;
994
2d06d8c4 995 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
996
997#ifdef CONFIG_SMP
998 /* check whether a different CPU already registered this
999 * CPU because it is in the same boat. */
1000 policy = cpufreq_cpu_get(cpu);
1001 if (unlikely(policy)) {
8ff69732 1002 cpufreq_cpu_put(policy);
1da177e4
LT
1003 return 0;
1004 }
fcf80582
VK
1005
1006#ifdef CONFIG_HOTPLUG_CPU
1007 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 1008 read_lock_irqsave(&cpufreq_driver_lock, flags);
eb608521
VK
1009 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1010 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
0d1857a1 1011 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
eb608521
VK
1012 return cpufreq_add_policy_cpu(tpolicy, cpu, dev,
1013 frozen);
2eaa3e2d 1014 }
fcf80582 1015 }
0d1857a1 1016 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 1017#endif
1da177e4
LT
1018#endif
1019
1c3d85dd 1020 if (!try_module_get(cpufreq_driver->owner)) {
1da177e4
LT
1021 ret = -EINVAL;
1022 goto module_out;
1023 }
1024
8414809c
SB
1025 if (frozen)
1026 /* Restore the saved policy when doing light-weight init */
1027 policy = cpufreq_policy_restore(cpu);
1028 else
1029 policy = cpufreq_policy_alloc();
1030
059019a3 1031 if (!policy)
1da177e4 1032 goto nomem_out;
059019a3 1033
1da177e4 1034 policy->cpu = cpu;
65922465 1035 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 1036 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1037
5a01f2e8 1038 /* Initially set CPU itself as the policy_cpu */
f1625066 1039 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 1040
1da177e4 1041 init_completion(&policy->kobj_unregister);
65f27f38 1042 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
1043
1044 /* call driver. From then on the cpufreq must be able
1045 * to accept all calls to ->verify and ->setpolicy for this CPU
1046 */
1c3d85dd 1047 ret = cpufreq_driver->init(policy);
1da177e4 1048 if (ret) {
2d06d8c4 1049 pr_debug("initialization failed\n");
2eaa3e2d 1050 goto err_set_policy_cpu;
1da177e4 1051 }
643ae6e8 1052
fcf80582
VK
1053 /* related cpus should atleast have policy->cpus */
1054 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1055
643ae6e8
VK
1056 /*
1057 * affected cpus must always be the one, which are online. We aren't
1058 * managing offline cpus here.
1059 */
1060 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1061
187d9f4e
MC
1062 policy->user_policy.min = policy->min;
1063 policy->user_policy.max = policy->max;
1da177e4 1064
a1531acd
TR
1065 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1066 CPUFREQ_START, policy);
1067
fcf80582
VK
1068#ifdef CONFIG_HOTPLUG_CPU
1069 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1070 if (gov) {
1071 policy->governor = gov;
1072 pr_debug("Restoring governor %s for cpu %d\n",
1073 policy->governor->name, cpu);
4bfa042c 1074 }
fcf80582 1075#endif
1da177e4 1076
e18f1682
SB
1077 write_lock_irqsave(&cpufreq_driver_lock, flags);
1078 for_each_cpu(j, policy->cpus) {
1079 per_cpu(cpufreq_cpu_data, j) = policy;
1080 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
1081 }
1082 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1083
a82fab29 1084 if (!frozen) {
308b60e7 1085 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1086 if (ret)
1087 goto err_out_unregister;
c88a1f8b
LM
1088
1089 write_lock_irqsave(&cpufreq_driver_lock, flags);
1090 list_add(&policy->policy_list, &cpufreq_policy_list);
1091 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
a82fab29 1092 }
8ff69732 1093
e18f1682
SB
1094 cpufreq_init_policy(policy);
1095
038c5b3e 1096 kobject_uevent(&policy->kobj, KOBJ_ADD);
1c3d85dd 1097 module_put(cpufreq_driver->owner);
2d06d8c4 1098 pr_debug("initialization complete\n");
87c32271 1099
1da177e4
LT
1100 return 0;
1101
1da177e4 1102err_out_unregister:
0d1857a1 1103 write_lock_irqsave(&cpufreq_driver_lock, flags);
e18f1682 1104 for_each_cpu(j, policy->cpus) {
7a6aedfa 1105 per_cpu(cpufreq_cpu_data, j) = NULL;
e18f1682
SB
1106 if (j != cpu)
1107 per_cpu(cpufreq_policy_cpu, j) = -1;
1108 }
0d1857a1 1109 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1110
2eaa3e2d
VK
1111err_set_policy_cpu:
1112 per_cpu(cpufreq_policy_cpu, cpu) = -1;
e9698cc5 1113 cpufreq_policy_free(policy);
1da177e4 1114nomem_out:
1c3d85dd 1115 module_put(cpufreq_driver->owner);
c32b6b8e 1116module_out:
1da177e4
LT
1117 return ret;
1118}
1119
a82fab29
SB
1120/**
1121 * cpufreq_add_dev - add a CPU device
1122 *
1123 * Adds the cpufreq interface for a CPU device.
1124 *
1125 * The Oracle says: try running cpufreq registration/unregistration concurrently
1126 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1127 * mess up, but more thorough testing is needed. - Mathieu
1128 */
1129static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1130{
1131 return __cpufreq_add_dev(dev, sif, false);
1132}
1133
b8eed8af
VK
1134static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1135{
1136 int j;
1137
1138 policy->last_cpu = policy->cpu;
1139 policy->cpu = cpu;
1140
3361b7b1 1141 for_each_cpu(j, policy->cpus)
b8eed8af 1142 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
1143
1144#ifdef CONFIG_CPU_FREQ_TABLE
1145 cpufreq_frequency_table_update_policy_cpu(policy);
1146#endif
1147 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1148 CPUFREQ_UPDATE_POLICY_CPU, policy);
1149}
1da177e4 1150
3a3e9e06 1151static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
a82fab29 1152 unsigned int old_cpu, bool frozen)
f9ba680d
SB
1153{
1154 struct device *cpu_dev;
1155 unsigned long flags;
1156 int ret;
1157
1158 /* first sibling now owns the new sysfs dir */
3a3e9e06 1159 cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
a82fab29
SB
1160
1161 /* Don't touch sysfs files during light-weight tear-down */
1162 if (frozen)
1163 return cpu_dev->id;
1164
f9ba680d 1165 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
3a3e9e06 1166 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
f9ba680d
SB
1167 if (ret) {
1168 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1169
1170 WARN_ON(lock_policy_rwsem_write(old_cpu));
3a3e9e06 1171 cpumask_set_cpu(old_cpu, policy->cpus);
f9ba680d
SB
1172
1173 write_lock_irqsave(&cpufreq_driver_lock, flags);
3a3e9e06 1174 per_cpu(cpufreq_cpu_data, old_cpu) = policy;
f9ba680d
SB
1175 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1176
1177 unlock_policy_rwsem_write(old_cpu);
1178
3a3e9e06 1179 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
f9ba680d
SB
1180 "cpufreq");
1181
1182 return -EINVAL;
1183 }
1184
1185 return cpu_dev->id;
1186}
1187
1da177e4 1188/**
5a01f2e8 1189 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1190 *
1191 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1192 * Caller should already have policy_rwsem in write mode for this CPU.
1193 * This routine frees the rwsem before returning.
1da177e4 1194 */
bb176f7d 1195static int __cpufreq_remove_dev(struct device *dev,
a82fab29 1196 struct subsys_interface *sif, bool frozen)
1da177e4 1197{
f9ba680d
SB
1198 unsigned int cpu = dev->id, cpus;
1199 int new_cpu;
1da177e4 1200 unsigned long flags;
3a3e9e06 1201 struct cpufreq_policy *policy;
499bca9b
AW
1202 struct kobject *kobj;
1203 struct completion *cmp;
1da177e4 1204
b8eed8af 1205 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1206
0d1857a1 1207 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1208
3a3e9e06 1209 policy = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1210 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1211
8414809c
SB
1212 /* Save the policy somewhere when doing a light-weight tear-down */
1213 if (frozen)
3a3e9e06 1214 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
8414809c 1215
0d1857a1 1216 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1217
3a3e9e06 1218 if (!policy) {
b8eed8af 1219 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1220 return -EINVAL;
1221 }
1da177e4 1222
1c3d85dd 1223 if (cpufreq_driver->target)
3a3e9e06 1224 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1da177e4 1225
084f3493 1226#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1227 if (!cpufreq_driver->setpolicy)
fa69e33f 1228 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
3a3e9e06 1229 policy->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1230#endif
1231
2eaa3e2d 1232 WARN_ON(lock_policy_rwsem_write(cpu));
3a3e9e06 1233 cpus = cpumask_weight(policy->cpus);
e4969eba
VK
1234
1235 if (cpus > 1)
3a3e9e06 1236 cpumask_clear_cpu(cpu, policy->cpus);
2eaa3e2d 1237 unlock_policy_rwsem_write(cpu);
084f3493 1238
3a3e9e06 1239 if (cpu != policy->cpu && !frozen) {
73bf0fc2
VK
1240 sysfs_remove_link(&dev->kobj, "cpufreq");
1241 } else if (cpus > 1) {
084f3493 1242
3a3e9e06 1243 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
f9ba680d 1244 if (new_cpu >= 0) {
2eaa3e2d 1245 WARN_ON(lock_policy_rwsem_write(cpu));
3a3e9e06 1246 update_policy_cpu(policy, new_cpu);
499bca9b 1247 unlock_policy_rwsem_write(cpu);
a82fab29
SB
1248
1249 if (!frozen) {
1250 pr_debug("%s: policy Kobject moved to cpu: %d "
1251 "from: %d\n",__func__, new_cpu, cpu);
1252 }
1da177e4
LT
1253 }
1254 }
1da177e4 1255
b8eed8af
VK
1256 /* If cpu is last user of policy, free policy */
1257 if (cpus == 1) {
2a998599 1258 if (cpufreq_driver->target)
3a3e9e06 1259 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2a998599 1260
8414809c
SB
1261 if (!frozen) {
1262 lock_policy_rwsem_read(cpu);
3a3e9e06
VK
1263 kobj = &policy->kobj;
1264 cmp = &policy->kobj_unregister;
8414809c
SB
1265 unlock_policy_rwsem_read(cpu);
1266 kobject_put(kobj);
1267
1268 /*
1269 * We need to make sure that the underlying kobj is
1270 * actually not referenced anymore by anybody before we
1271 * proceed with unloading.
1272 */
1273 pr_debug("waiting for dropping of refcount\n");
1274 wait_for_completion(cmp);
1275 pr_debug("wait complete\n");
1276 }
7d26e2d5 1277
8414809c
SB
1278 /*
1279 * Perform the ->exit() even during light-weight tear-down,
1280 * since this is a core component, and is essential for the
1281 * subsequent light-weight ->init() to succeed.
b8eed8af 1282 */
1c3d85dd 1283 if (cpufreq_driver->exit)
3a3e9e06 1284 cpufreq_driver->exit(policy);
27ecddc2 1285
8414809c 1286 if (!frozen)
3a3e9e06 1287 cpufreq_policy_free(policy);
2a998599 1288 } else {
2a998599 1289 if (cpufreq_driver->target) {
3a3e9e06
VK
1290 __cpufreq_governor(policy, CPUFREQ_GOV_START);
1291 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2a998599 1292 }
27ecddc2 1293 }
1da177e4 1294
2eaa3e2d 1295 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1296 return 0;
1297}
1298
8a25a2fd 1299static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1300{
8a25a2fd 1301 unsigned int cpu = dev->id;
5a01f2e8 1302 int retval;
ec28297a
VP
1303
1304 if (cpu_is_offline(cpu))
1305 return 0;
1306
a82fab29 1307 retval = __cpufreq_remove_dev(dev, sif, false);
5a01f2e8
VP
1308 return retval;
1309}
1310
65f27f38 1311static void handle_update(struct work_struct *work)
1da177e4 1312{
65f27f38
DH
1313 struct cpufreq_policy *policy =
1314 container_of(work, struct cpufreq_policy, update);
1315 unsigned int cpu = policy->cpu;
2d06d8c4 1316 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1317 cpufreq_update_policy(cpu);
1318}
1319
1320/**
bb176f7d
VK
1321 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1322 * in deep trouble.
1da177e4
LT
1323 * @cpu: cpu number
1324 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1325 * @new_freq: CPU frequency the CPU actually runs at
1326 *
29464f28
DJ
1327 * We adjust to current frequency first, and need to clean up later.
1328 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1329 */
e08f5f5b
GS
1330static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1331 unsigned int new_freq)
1da177e4 1332{
b43a7ffb 1333 struct cpufreq_policy *policy;
1da177e4 1334 struct cpufreq_freqs freqs;
b43a7ffb
VK
1335 unsigned long flags;
1336
2d06d8c4 1337 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1338 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1339
1da177e4
LT
1340 freqs.old = old_freq;
1341 freqs.new = new_freq;
b43a7ffb
VK
1342
1343 read_lock_irqsave(&cpufreq_driver_lock, flags);
1344 policy = per_cpu(cpufreq_cpu_data, cpu);
1345 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1346
1347 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1348 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1349}
1350
32ee8c3e 1351/**
4ab70df4 1352 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1353 * @cpu: CPU number
1354 *
1355 * This is the last known freq, without actually getting it from the driver.
1356 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1357 */
1358unsigned int cpufreq_quick_get(unsigned int cpu)
1359{
9e21ba8b 1360 struct cpufreq_policy *policy;
e08f5f5b 1361 unsigned int ret_freq = 0;
95235ca2 1362
1c3d85dd
RW
1363 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1364 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1365
1366 policy = cpufreq_cpu_get(cpu);
95235ca2 1367 if (policy) {
e08f5f5b 1368 ret_freq = policy->cur;
95235ca2
VP
1369 cpufreq_cpu_put(policy);
1370 }
1371
4d34a67d 1372 return ret_freq;
95235ca2
VP
1373}
1374EXPORT_SYMBOL(cpufreq_quick_get);
1375
3d737108
JB
1376/**
1377 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1378 * @cpu: CPU number
1379 *
1380 * Just return the max possible frequency for a given CPU.
1381 */
1382unsigned int cpufreq_quick_get_max(unsigned int cpu)
1383{
1384 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1385 unsigned int ret_freq = 0;
1386
1387 if (policy) {
1388 ret_freq = policy->max;
1389 cpufreq_cpu_put(policy);
1390 }
1391
1392 return ret_freq;
1393}
1394EXPORT_SYMBOL(cpufreq_quick_get_max);
1395
5a01f2e8 1396static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1397{
7a6aedfa 1398 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1399 unsigned int ret_freq = 0;
5800043b 1400
1c3d85dd 1401 if (!cpufreq_driver->get)
4d34a67d 1402 return ret_freq;
1da177e4 1403
1c3d85dd 1404 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1405
e08f5f5b 1406 if (ret_freq && policy->cur &&
1c3d85dd 1407 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1408 /* verify no discrepancy between actual and
1409 saved value exists */
1410 if (unlikely(ret_freq != policy->cur)) {
1411 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1412 schedule_work(&policy->update);
1413 }
1414 }
1415
4d34a67d 1416 return ret_freq;
5a01f2e8 1417}
1da177e4 1418
5a01f2e8
VP
1419/**
1420 * cpufreq_get - get the current CPU frequency (in kHz)
1421 * @cpu: CPU number
1422 *
1423 * Get the CPU current (static) CPU frequency
1424 */
1425unsigned int cpufreq_get(unsigned int cpu)
1426{
1427 unsigned int ret_freq = 0;
1428 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1429
1430 if (!policy)
1431 goto out;
1432
1433 if (unlikely(lock_policy_rwsem_read(cpu)))
1434 goto out_policy;
1435
1436 ret_freq = __cpufreq_get(cpu);
1437
1438 unlock_policy_rwsem_read(cpu);
1da177e4 1439
5a01f2e8
VP
1440out_policy:
1441 cpufreq_cpu_put(policy);
1442out:
4d34a67d 1443 return ret_freq;
1da177e4
LT
1444}
1445EXPORT_SYMBOL(cpufreq_get);
1446
8a25a2fd
KS
1447static struct subsys_interface cpufreq_interface = {
1448 .name = "cpufreq",
1449 .subsys = &cpu_subsys,
1450 .add_dev = cpufreq_add_dev,
1451 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1452};
1453
42d4dc3f 1454/**
e00e56df
RW
1455 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1456 *
1457 * This function is only executed for the boot processor. The other CPUs
1458 * have been put offline by means of CPU hotplug.
42d4dc3f 1459 */
e00e56df 1460static int cpufreq_bp_suspend(void)
42d4dc3f 1461{
e08f5f5b 1462 int ret = 0;
4bc5d341 1463
e00e56df 1464 int cpu = smp_processor_id();
3a3e9e06 1465 struct cpufreq_policy *policy;
42d4dc3f 1466
2d06d8c4 1467 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1468
e00e56df 1469 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1470 policy = cpufreq_cpu_get(cpu);
1471 if (!policy)
e00e56df 1472 return 0;
42d4dc3f 1473
1c3d85dd 1474 if (cpufreq_driver->suspend) {
3a3e9e06 1475 ret = cpufreq_driver->suspend(policy);
ce6c3997 1476 if (ret)
42d4dc3f 1477 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
3a3e9e06 1478 "step on CPU %u\n", policy->cpu);
42d4dc3f
BH
1479 }
1480
3a3e9e06 1481 cpufreq_cpu_put(policy);
c9060494 1482 return ret;
42d4dc3f
BH
1483}
1484
1da177e4 1485/**
e00e56df 1486 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1487 *
1488 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1489 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1490 * restored. It will verify that the current freq is in sync with
1491 * what we believe it to be. This is a bit later than when it
1492 * should be, but nonethteless it's better than calling
1493 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1494 *
1495 * This function is only executed for the boot CPU. The other CPUs have not
1496 * been turned on yet.
1da177e4 1497 */
e00e56df 1498static void cpufreq_bp_resume(void)
1da177e4 1499{
e08f5f5b 1500 int ret = 0;
4bc5d341 1501
e00e56df 1502 int cpu = smp_processor_id();
3a3e9e06 1503 struct cpufreq_policy *policy;
1da177e4 1504
2d06d8c4 1505 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1506
e00e56df 1507 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1508 policy = cpufreq_cpu_get(cpu);
1509 if (!policy)
e00e56df 1510 return;
1da177e4 1511
1c3d85dd 1512 if (cpufreq_driver->resume) {
3a3e9e06 1513 ret = cpufreq_driver->resume(policy);
1da177e4
LT
1514 if (ret) {
1515 printk(KERN_ERR "cpufreq: resume failed in ->resume "
3a3e9e06 1516 "step on CPU %u\n", policy->cpu);
c9060494 1517 goto fail;
1da177e4
LT
1518 }
1519 }
1520
3a3e9e06 1521 schedule_work(&policy->update);
ce6c3997 1522
c9060494 1523fail:
3a3e9e06 1524 cpufreq_cpu_put(policy);
1da177e4
LT
1525}
1526
e00e56df
RW
1527static struct syscore_ops cpufreq_syscore_ops = {
1528 .suspend = cpufreq_bp_suspend,
1529 .resume = cpufreq_bp_resume,
1da177e4
LT
1530};
1531
9d95046e
BP
1532/**
1533 * cpufreq_get_current_driver - return current driver's name
1534 *
1535 * Return the name string of the currently loaded cpufreq driver
1536 * or NULL, if none.
1537 */
1538const char *cpufreq_get_current_driver(void)
1539{
1c3d85dd
RW
1540 if (cpufreq_driver)
1541 return cpufreq_driver->name;
1542
1543 return NULL;
9d95046e
BP
1544}
1545EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1546
1547/*********************************************************************
1548 * NOTIFIER LISTS INTERFACE *
1549 *********************************************************************/
1550
1551/**
1552 * cpufreq_register_notifier - register a driver with cpufreq
1553 * @nb: notifier function to register
1554 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1555 *
32ee8c3e 1556 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1557 * are notified about clock rate changes (once before and once after
1558 * the transition), or a list of drivers that are notified about
1559 * changes in cpufreq policy.
1560 *
1561 * This function may sleep, and has the same return conditions as
e041c683 1562 * blocking_notifier_chain_register.
1da177e4
LT
1563 */
1564int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1565{
1566 int ret;
1567
d5aaffa9
DB
1568 if (cpufreq_disabled())
1569 return -EINVAL;
1570
74212ca4
CEB
1571 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1572
1da177e4
LT
1573 switch (list) {
1574 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1575 ret = srcu_notifier_chain_register(
e041c683 1576 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1577 break;
1578 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1579 ret = blocking_notifier_chain_register(
1580 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1581 break;
1582 default:
1583 ret = -EINVAL;
1584 }
1da177e4
LT
1585
1586 return ret;
1587}
1588EXPORT_SYMBOL(cpufreq_register_notifier);
1589
1da177e4
LT
1590/**
1591 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1592 * @nb: notifier block to be unregistered
bb176f7d 1593 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1594 *
1595 * Remove a driver from the CPU frequency notifier list.
1596 *
1597 * This function may sleep, and has the same return conditions as
e041c683 1598 * blocking_notifier_chain_unregister.
1da177e4
LT
1599 */
1600int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1601{
1602 int ret;
1603
d5aaffa9
DB
1604 if (cpufreq_disabled())
1605 return -EINVAL;
1606
1da177e4
LT
1607 switch (list) {
1608 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1609 ret = srcu_notifier_chain_unregister(
e041c683 1610 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1611 break;
1612 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1613 ret = blocking_notifier_chain_unregister(
1614 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1615 break;
1616 default:
1617 ret = -EINVAL;
1618 }
1da177e4
LT
1619
1620 return ret;
1621}
1622EXPORT_SYMBOL(cpufreq_unregister_notifier);
1623
1624
1625/*********************************************************************
1626 * GOVERNORS *
1627 *********************************************************************/
1628
1da177e4
LT
1629int __cpufreq_driver_target(struct cpufreq_policy *policy,
1630 unsigned int target_freq,
1631 unsigned int relation)
1632{
1633 int retval = -EINVAL;
7249924e 1634 unsigned int old_target_freq = target_freq;
c32b6b8e 1635
a7b422cd
KRW
1636 if (cpufreq_disabled())
1637 return -ENODEV;
7c30ed53
VK
1638 if (policy->transition_ongoing)
1639 return -EBUSY;
a7b422cd 1640
7249924e
VK
1641 /* Make sure that target_freq is within supported range */
1642 if (target_freq > policy->max)
1643 target_freq = policy->max;
1644 if (target_freq < policy->min)
1645 target_freq = policy->min;
1646
1647 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1648 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1649
1650 if (target_freq == policy->cur)
1651 return 0;
1652
1c3d85dd
RW
1653 if (cpufreq_driver->target)
1654 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1655
1da177e4
LT
1656 return retval;
1657}
1658EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1659
1da177e4
LT
1660int cpufreq_driver_target(struct cpufreq_policy *policy,
1661 unsigned int target_freq,
1662 unsigned int relation)
1663{
f1829e4a 1664 int ret = -EINVAL;
1da177e4 1665
5a01f2e8 1666 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1667 goto fail;
1da177e4
LT
1668
1669 ret = __cpufreq_driver_target(policy, target_freq, relation);
1670
5a01f2e8 1671 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1672
f1829e4a 1673fail:
1da177e4
LT
1674 return ret;
1675}
1676EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1677
153d7f3f 1678/*
153d7f3f
AV
1679 * when "event" is CPUFREQ_GOV_LIMITS
1680 */
1da177e4 1681
e08f5f5b
GS
1682static int __cpufreq_governor(struct cpufreq_policy *policy,
1683 unsigned int event)
1da177e4 1684{
cc993cab 1685 int ret;
6afde10c
TR
1686
1687 /* Only must be defined when default governor is known to have latency
1688 restrictions, like e.g. conservative or ondemand.
1689 That this is the case is already ensured in Kconfig
1690 */
1691#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1692 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1693#else
1694 struct cpufreq_governor *gov = NULL;
1695#endif
1c256245
TR
1696
1697 if (policy->governor->max_transition_latency &&
1698 policy->cpuinfo.transition_latency >
1699 policy->governor->max_transition_latency) {
6afde10c
TR
1700 if (!gov)
1701 return -EINVAL;
1702 else {
1703 printk(KERN_WARNING "%s governor failed, too long"
1704 " transition latency of HW, fallback"
1705 " to %s governor\n",
1706 policy->governor->name,
1707 gov->name);
1708 policy->governor = gov;
1709 }
1c256245 1710 }
1da177e4 1711
fe492f3f
VK
1712 if (event == CPUFREQ_GOV_POLICY_INIT)
1713 if (!try_module_get(policy->governor->owner))
1714 return -EINVAL;
1da177e4 1715
2d06d8c4 1716 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1717 policy->cpu, event);
95731ebb
XC
1718
1719 mutex_lock(&cpufreq_governor_lock);
1720 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1721 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1722 mutex_unlock(&cpufreq_governor_lock);
fe492f3f
VK
1723 if (event == CPUFREQ_GOV_POLICY_INIT)
1724 module_put(policy->governor->owner);
95731ebb
XC
1725 return -EBUSY;
1726 }
1727
1728 if (event == CPUFREQ_GOV_STOP)
1729 policy->governor_enabled = false;
1730 else if (event == CPUFREQ_GOV_START)
1731 policy->governor_enabled = true;
1732
1733 mutex_unlock(&cpufreq_governor_lock);
1734
1da177e4
LT
1735 ret = policy->governor->governor(policy, event);
1736
4d5dcc42
VK
1737 if (!ret) {
1738 if (event == CPUFREQ_GOV_POLICY_INIT)
1739 policy->governor->initialized++;
1740 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1741 policy->governor->initialized--;
95731ebb
XC
1742 } else {
1743 /* Restore original values */
1744 mutex_lock(&cpufreq_governor_lock);
1745 if (event == CPUFREQ_GOV_STOP)
1746 policy->governor_enabled = true;
1747 else if (event == CPUFREQ_GOV_START)
1748 policy->governor_enabled = false;
1749 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1750 }
b394058f 1751
fe492f3f
VK
1752 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1753 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1da177e4
LT
1754 module_put(policy->governor->owner);
1755
1756 return ret;
1757}
1758
1da177e4
LT
1759int cpufreq_register_governor(struct cpufreq_governor *governor)
1760{
3bcb09a3 1761 int err;
1da177e4
LT
1762
1763 if (!governor)
1764 return -EINVAL;
1765
a7b422cd
KRW
1766 if (cpufreq_disabled())
1767 return -ENODEV;
1768
3fc54d37 1769 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1770
b394058f 1771 governor->initialized = 0;
3bcb09a3
JF
1772 err = -EBUSY;
1773 if (__find_governor(governor->name) == NULL) {
1774 err = 0;
1775 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1776 }
1da177e4 1777
32ee8c3e 1778 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1779 return err;
1da177e4
LT
1780}
1781EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1782
1da177e4
LT
1783void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1784{
90e41bac
PB
1785#ifdef CONFIG_HOTPLUG_CPU
1786 int cpu;
1787#endif
1788
1da177e4
LT
1789 if (!governor)
1790 return;
1791
a7b422cd
KRW
1792 if (cpufreq_disabled())
1793 return;
1794
90e41bac
PB
1795#ifdef CONFIG_HOTPLUG_CPU
1796 for_each_present_cpu(cpu) {
1797 if (cpu_online(cpu))
1798 continue;
1799 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1800 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1801 }
1802#endif
1803
3fc54d37 1804 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1805 list_del(&governor->governor_list);
3fc54d37 1806 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1807 return;
1808}
1809EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1810
1811
1da177e4
LT
1812/*********************************************************************
1813 * POLICY INTERFACE *
1814 *********************************************************************/
1815
1816/**
1817 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1818 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1819 * is written
1da177e4
LT
1820 *
1821 * Reads the current cpufreq policy.
1822 */
1823int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1824{
1825 struct cpufreq_policy *cpu_policy;
1826 if (!policy)
1827 return -EINVAL;
1828
1829 cpu_policy = cpufreq_cpu_get(cpu);
1830 if (!cpu_policy)
1831 return -EINVAL;
1832
d5b73cd8 1833 memcpy(policy, cpu_policy, sizeof(*policy));
1da177e4
LT
1834
1835 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1836 return 0;
1837}
1838EXPORT_SYMBOL(cpufreq_get_policy);
1839
153d7f3f 1840/*
e08f5f5b
GS
1841 * data : current policy.
1842 * policy : policy to be set.
153d7f3f 1843 */
3a3e9e06
VK
1844static int __cpufreq_set_policy(struct cpufreq_policy *policy,
1845 struct cpufreq_policy *new_policy)
1da177e4 1846{
7bd353a9 1847 int ret = 0, failed = 1;
1da177e4 1848
3a3e9e06
VK
1849 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1850 new_policy->min, new_policy->max);
1da177e4 1851
d5b73cd8 1852 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1da177e4 1853
3a3e9e06 1854 if (new_policy->min > policy->max || new_policy->max < policy->min) {
9c9a43ed
MD
1855 ret = -EINVAL;
1856 goto error_out;
1857 }
1858
1da177e4 1859 /* verify the cpu speed can be set within this limit */
3a3e9e06 1860 ret = cpufreq_driver->verify(new_policy);
1da177e4
LT
1861 if (ret)
1862 goto error_out;
1863
1da177e4 1864 /* adjust if necessary - all reasons */
e041c683 1865 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1866 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
1867
1868 /* adjust if necessary - hardware incompatibility*/
e041c683 1869 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1870 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 1871
bb176f7d
VK
1872 /*
1873 * verify the cpu speed can be set within this limit, which might be
1874 * different to the first one
1875 */
3a3e9e06 1876 ret = cpufreq_driver->verify(new_policy);
e041c683 1877 if (ret)
1da177e4 1878 goto error_out;
1da177e4
LT
1879
1880 /* notification of the new policy */
e041c683 1881 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1882 CPUFREQ_NOTIFY, new_policy);
1da177e4 1883
3a3e9e06
VK
1884 policy->min = new_policy->min;
1885 policy->max = new_policy->max;
1da177e4 1886
2d06d8c4 1887 pr_debug("new min and max freqs are %u - %u kHz\n",
3a3e9e06 1888 policy->min, policy->max);
1da177e4 1889
1c3d85dd 1890 if (cpufreq_driver->setpolicy) {
3a3e9e06 1891 policy->policy = new_policy->policy;
2d06d8c4 1892 pr_debug("setting range\n");
3a3e9e06 1893 ret = cpufreq_driver->setpolicy(new_policy);
1da177e4 1894 } else {
3a3e9e06 1895 if (new_policy->governor != policy->governor) {
1da177e4 1896 /* save old, working values */
3a3e9e06 1897 struct cpufreq_governor *old_gov = policy->governor;
1da177e4 1898
2d06d8c4 1899 pr_debug("governor switch\n");
1da177e4
LT
1900
1901 /* end old governor */
3a3e9e06
VK
1902 if (policy->governor) {
1903 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1904 unlock_policy_rwsem_write(new_policy->cpu);
1905 __cpufreq_governor(policy,
7bd353a9 1906 CPUFREQ_GOV_POLICY_EXIT);
3a3e9e06 1907 lock_policy_rwsem_write(new_policy->cpu);
7bd353a9 1908 }
1da177e4
LT
1909
1910 /* start new governor */
3a3e9e06
VK
1911 policy->governor = new_policy->governor;
1912 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
1913 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
7bd353a9 1914 failed = 0;
955ef483 1915 } else {
3a3e9e06
VK
1916 unlock_policy_rwsem_write(new_policy->cpu);
1917 __cpufreq_governor(policy,
7bd353a9 1918 CPUFREQ_GOV_POLICY_EXIT);
3a3e9e06 1919 lock_policy_rwsem_write(new_policy->cpu);
955ef483 1920 }
7bd353a9
VK
1921 }
1922
1923 if (failed) {
1da177e4 1924 /* new governor failed, so re-start old one */
2d06d8c4 1925 pr_debug("starting governor %s failed\n",
3a3e9e06 1926 policy->governor->name);
1da177e4 1927 if (old_gov) {
3a3e9e06
VK
1928 policy->governor = old_gov;
1929 __cpufreq_governor(policy,
7bd353a9 1930 CPUFREQ_GOV_POLICY_INIT);
3a3e9e06 1931 __cpufreq_governor(policy,
e08f5f5b 1932 CPUFREQ_GOV_START);
1da177e4
LT
1933 }
1934 ret = -EINVAL;
1935 goto error_out;
1936 }
1937 /* might be a policy change, too, so fall through */
1938 }
2d06d8c4 1939 pr_debug("governor: change or update limits\n");
3a3e9e06 1940 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
1941 }
1942
7d5e350f 1943error_out:
1da177e4
LT
1944 return ret;
1945}
1946
1da177e4
LT
1947/**
1948 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1949 * @cpu: CPU which shall be re-evaluated
1950 *
25985edc 1951 * Useful for policy notifiers which have different necessities
1da177e4
LT
1952 * at different times.
1953 */
1954int cpufreq_update_policy(unsigned int cpu)
1955{
3a3e9e06
VK
1956 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1957 struct cpufreq_policy new_policy;
f1829e4a 1958 int ret;
1da177e4 1959
3a3e9e06 1960 if (!policy) {
f1829e4a
JL
1961 ret = -ENODEV;
1962 goto no_policy;
1963 }
1da177e4 1964
f1829e4a
JL
1965 if (unlikely(lock_policy_rwsem_write(cpu))) {
1966 ret = -EINVAL;
1967 goto fail;
1968 }
1da177e4 1969
2d06d8c4 1970 pr_debug("updating policy for CPU %u\n", cpu);
d5b73cd8 1971 memcpy(&new_policy, policy, sizeof(*policy));
3a3e9e06
VK
1972 new_policy.min = policy->user_policy.min;
1973 new_policy.max = policy->user_policy.max;
1974 new_policy.policy = policy->user_policy.policy;
1975 new_policy.governor = policy->user_policy.governor;
1da177e4 1976
bb176f7d
VK
1977 /*
1978 * BIOS might change freq behind our back
1979 * -> ask driver for current freq and notify governors about a change
1980 */
1c3d85dd 1981 if (cpufreq_driver->get) {
3a3e9e06
VK
1982 new_policy.cur = cpufreq_driver->get(cpu);
1983 if (!policy->cur) {
2d06d8c4 1984 pr_debug("Driver did not initialize current freq");
3a3e9e06 1985 policy->cur = new_policy.cur;
a85f7bd3 1986 } else {
3a3e9e06
VK
1987 if (policy->cur != new_policy.cur && cpufreq_driver->target)
1988 cpufreq_out_of_sync(cpu, policy->cur,
1989 new_policy.cur);
a85f7bd3 1990 }
0961dd0d
TR
1991 }
1992
3a3e9e06 1993 ret = __cpufreq_set_policy(policy, &new_policy);
1da177e4 1994
5a01f2e8
VP
1995 unlock_policy_rwsem_write(cpu);
1996
f1829e4a 1997fail:
3a3e9e06 1998 cpufreq_cpu_put(policy);
f1829e4a 1999no_policy:
1da177e4
LT
2000 return ret;
2001}
2002EXPORT_SYMBOL(cpufreq_update_policy);
2003
2760984f 2004static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2005 unsigned long action, void *hcpu)
2006{
2007 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2008 struct device *dev;
5302c3fb 2009 bool frozen = false;
c32b6b8e 2010
8a25a2fd
KS
2011 dev = get_cpu_device(cpu);
2012 if (dev) {
5302c3fb
SB
2013
2014 if (action & CPU_TASKS_FROZEN)
2015 frozen = true;
2016
2017 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2018 case CPU_ONLINE:
5302c3fb 2019 __cpufreq_add_dev(dev, NULL, frozen);
23d32899 2020 cpufreq_update_policy(cpu);
c32b6b8e 2021 break;
5302c3fb 2022
c32b6b8e 2023 case CPU_DOWN_PREPARE:
5302c3fb 2024 __cpufreq_remove_dev(dev, NULL, frozen);
c32b6b8e 2025 break;
5302c3fb 2026
5a01f2e8 2027 case CPU_DOWN_FAILED:
5302c3fb 2028 __cpufreq_add_dev(dev, NULL, frozen);
c32b6b8e
AR
2029 break;
2030 }
2031 }
2032 return NOTIFY_OK;
2033}
2034
9c36f746 2035static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2036 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2037};
1da177e4
LT
2038
2039/*********************************************************************
2040 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2041 *********************************************************************/
2042
2043/**
2044 * cpufreq_register_driver - register a CPU Frequency driver
2045 * @driver_data: A struct cpufreq_driver containing the values#
2046 * submitted by the CPU Frequency driver.
2047 *
bb176f7d 2048 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2049 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2050 * (and isn't unregistered in the meantime).
1da177e4
LT
2051 *
2052 */
221dee28 2053int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2054{
2055 unsigned long flags;
2056 int ret;
2057
a7b422cd
KRW
2058 if (cpufreq_disabled())
2059 return -ENODEV;
2060
1da177e4
LT
2061 if (!driver_data || !driver_data->verify || !driver_data->init ||
2062 ((!driver_data->setpolicy) && (!driver_data->target)))
2063 return -EINVAL;
2064
2d06d8c4 2065 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
2066
2067 if (driver_data->setpolicy)
2068 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2069
0d1857a1 2070 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2071 if (cpufreq_driver) {
0d1857a1 2072 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2073 return -EBUSY;
2074 }
1c3d85dd 2075 cpufreq_driver = driver_data;
0d1857a1 2076 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2077
8a25a2fd 2078 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
2079 if (ret)
2080 goto err_null_driver;
1da177e4 2081
1c3d85dd 2082 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
2083 int i;
2084 ret = -ENODEV;
2085
2086 /* check for at least one working CPU */
7a6aedfa
MT
2087 for (i = 0; i < nr_cpu_ids; i++)
2088 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 2089 ret = 0;
7a6aedfa
MT
2090 break;
2091 }
1da177e4
LT
2092
2093 /* if all ->init() calls failed, unregister */
2094 if (ret) {
2d06d8c4 2095 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 2096 driver_data->name);
8a25a2fd 2097 goto err_if_unreg;
1da177e4
LT
2098 }
2099 }
2100
8f5bc2ab 2101 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2102 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2103
8f5bc2ab 2104 return 0;
8a25a2fd
KS
2105err_if_unreg:
2106 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2107err_null_driver:
0d1857a1 2108 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2109 cpufreq_driver = NULL;
0d1857a1 2110 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2111 return ret;
1da177e4
LT
2112}
2113EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2114
1da177e4
LT
2115/**
2116 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2117 *
bb176f7d 2118 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2119 * the right to do so, i.e. if you have succeeded in initialising before!
2120 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2121 * currently not initialised.
2122 */
221dee28 2123int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2124{
2125 unsigned long flags;
2126
1c3d85dd 2127 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2128 return -EINVAL;
1da177e4 2129
2d06d8c4 2130 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2131
8a25a2fd 2132 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2133 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2134
0d1857a1 2135 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2136 cpufreq_driver = NULL;
0d1857a1 2137 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2138
2139 return 0;
2140}
2141EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2142
2143static int __init cpufreq_core_init(void)
2144{
2145 int cpu;
2146
a7b422cd
KRW
2147 if (cpufreq_disabled())
2148 return -ENODEV;
2149
5a01f2e8 2150 for_each_possible_cpu(cpu) {
f1625066 2151 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
2152 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2153 }
8aa84ad8 2154
2361be23 2155 cpufreq_global_kobject = kobject_create();
8aa84ad8 2156 BUG_ON(!cpufreq_global_kobject);
e00e56df 2157 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2158
5a01f2e8
VP
2159 return 0;
2160}
5a01f2e8 2161core_initcall(cpufreq_core_init);
This page took 0.799224 seconds and 5 git commands to generate.