cpufreq: Simplify userspace governor
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
72a4ce34 20#include <asm/cputime.h>
1da177e4 21#include <linux/kernel.h>
72a4ce34 22#include <linux/kernel_stat.h>
1da177e4
LT
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/notifier.h>
26#include <linux/cpufreq.h>
27#include <linux/delay.h>
28#include <linux/interrupt.h>
29#include <linux/spinlock.h>
72a4ce34 30#include <linux/tick.h>
1da177e4
LT
31#include <linux/device.h>
32#include <linux/slab.h>
33#include <linux/cpu.h>
34#include <linux/completion.h>
3fc54d37 35#include <linux/mutex.h>
e00e56df 36#include <linux/syscore_ops.h>
1da177e4 37
6f4f2723
TR
38#include <trace/events/power.h>
39
1da177e4 40/**
cd878479 41 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
42 * level driver of CPUFreq support, and its spinlock. This lock
43 * also protects the cpufreq_cpu_data array.
44 */
1c3d85dd 45static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 46static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
47#ifdef CONFIG_HOTPLUG_CPU
48/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 49static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 50#endif
0d1857a1 51static DEFINE_RWLOCK(cpufreq_driver_lock);
1da177e4 52
5a01f2e8
VP
53/*
54 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
55 * all cpufreq/hotplug/workqueue/etc related lock issues.
56 *
57 * The rules for this semaphore:
58 * - Any routine that wants to read from the policy structure will
59 * do a down_read on this semaphore.
60 * - Any routine that will write to the policy structure and/or may take away
61 * the policy altogether (eg. CPU hotplug), will hold this lock in write
62 * mode before doing so.
63 *
64 * Additional rules:
5a01f2e8
VP
65 * - Governor routines that can be called in cpufreq hotplug path should not
66 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
67 * - Lock should not be held across
68 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 69 */
f1625066 70static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
71static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
72
73#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 74static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 75{ \
f1625066 76 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
77 BUG_ON(policy_cpu == -1); \
78 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
79 \
80 return 0; \
81}
82
83lock_policy_rwsem(read, cpu);
5a01f2e8 84lock_policy_rwsem(write, cpu);
5a01f2e8 85
fa1d8af4
VK
86#define unlock_policy_rwsem(mode, cpu) \
87static void unlock_policy_rwsem_##mode(int cpu) \
88{ \
89 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
90 BUG_ON(policy_cpu == -1); \
91 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 92}
5a01f2e8 93
fa1d8af4
VK
94unlock_policy_rwsem(read, cpu);
95unlock_policy_rwsem(write, cpu);
5a01f2e8 96
1da177e4 97/* internal prototypes */
29464f28
DJ
98static int __cpufreq_governor(struct cpufreq_policy *policy,
99 unsigned int event);
5a01f2e8 100static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 101static void handle_update(struct work_struct *work);
1da177e4
LT
102
103/**
32ee8c3e
DJ
104 * Two notifier lists: the "policy" list is involved in the
105 * validation process for a new CPU frequency policy; the
1da177e4
LT
106 * "transition" list for kernel code that needs to handle
107 * changes to devices when the CPU clock speed changes.
108 * The mutex locks both lists.
109 */
e041c683 110static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 111static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 112
74212ca4 113static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
114static int __init init_cpufreq_transition_notifier_list(void)
115{
116 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 117 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
118 return 0;
119}
b3438f82 120pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 121
a7b422cd 122static int off __read_mostly;
da584455 123static int cpufreq_disabled(void)
a7b422cd
KRW
124{
125 return off;
126}
127void disable_cpufreq(void)
128{
129 off = 1;
130}
1da177e4 131static LIST_HEAD(cpufreq_governor_list);
29464f28 132static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 133
4d5dcc42
VK
134bool have_governor_per_policy(void)
135{
1c3d85dd 136 return cpufreq_driver->have_governor_per_policy;
4d5dcc42 137}
3f869d6d 138EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 139
944e9a03
VK
140struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
141{
142 if (have_governor_per_policy())
143 return &policy->kobj;
144 else
145 return cpufreq_global_kobject;
146}
147EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
148
72a4ce34
VK
149static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
150{
151 u64 idle_time;
152 u64 cur_wall_time;
153 u64 busy_time;
154
155 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
156
157 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
158 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
159 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
160 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
163
164 idle_time = cur_wall_time - busy_time;
165 if (wall)
166 *wall = cputime_to_usecs(cur_wall_time);
167
168 return cputime_to_usecs(idle_time);
169}
170
171u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
172{
173 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
174
175 if (idle_time == -1ULL)
176 return get_cpu_idle_time_jiffy(cpu, wall);
177 else if (!io_busy)
178 idle_time += get_cpu_iowait_time_us(cpu, wall);
179
180 return idle_time;
181}
182EXPORT_SYMBOL_GPL(get_cpu_idle_time);
183
a9144436 184static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
185{
186 struct cpufreq_policy *data;
187 unsigned long flags;
188
7a6aedfa 189 if (cpu >= nr_cpu_ids)
1da177e4
LT
190 goto err_out;
191
192 /* get the cpufreq driver */
1c3d85dd 193 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 194
1c3d85dd 195 if (!cpufreq_driver)
1da177e4
LT
196 goto err_out_unlock;
197
1c3d85dd 198 if (!try_module_get(cpufreq_driver->owner))
1da177e4
LT
199 goto err_out_unlock;
200
201
202 /* get the CPU */
7a6aedfa 203 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
204
205 if (!data)
206 goto err_out_put_module;
207
a9144436 208 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
209 goto err_out_put_module;
210
0d1857a1 211 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
212 return data;
213
7d5e350f 214err_out_put_module:
1c3d85dd 215 module_put(cpufreq_driver->owner);
5800043b 216err_out_unlock:
1c3d85dd 217 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 218err_out:
1da177e4
LT
219 return NULL;
220}
a9144436
SB
221
222struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
223{
d5aaffa9
DB
224 if (cpufreq_disabled())
225 return NULL;
226
a9144436
SB
227 return __cpufreq_cpu_get(cpu, false);
228}
1da177e4
LT
229EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
230
a9144436
SB
231static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
232{
233 return __cpufreq_cpu_get(cpu, true);
234}
235
236static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
237{
238 if (!sysfs)
239 kobject_put(&data->kobj);
1c3d85dd 240 module_put(cpufreq_driver->owner);
a9144436 241}
7d5e350f 242
1da177e4
LT
243void cpufreq_cpu_put(struct cpufreq_policy *data)
244{
d5aaffa9
DB
245 if (cpufreq_disabled())
246 return;
247
a9144436 248 __cpufreq_cpu_put(data, false);
1da177e4
LT
249}
250EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
251
a9144436
SB
252static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
253{
254 __cpufreq_cpu_put(data, true);
255}
1da177e4 256
1da177e4
LT
257/*********************************************************************
258 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
259 *********************************************************************/
260
261/**
262 * adjust_jiffies - adjust the system "loops_per_jiffy"
263 *
264 * This function alters the system "loops_per_jiffy" for the clock
265 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 266 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
267 * per-CPU loops_per_jiffy value wherever possible.
268 */
269#ifndef CONFIG_SMP
270static unsigned long l_p_j_ref;
271static unsigned int l_p_j_ref_freq;
272
858119e1 273static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
274{
275 if (ci->flags & CPUFREQ_CONST_LOOPS)
276 return;
277
278 if (!l_p_j_ref_freq) {
279 l_p_j_ref = loops_per_jiffy;
280 l_p_j_ref_freq = ci->old;
2d06d8c4 281 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 282 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 283 }
d08de0c1 284 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 285 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
286 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
287 ci->new);
2d06d8c4 288 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 289 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
290 }
291}
292#else
e08f5f5b
GS
293static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
294{
295 return;
296}
1da177e4
LT
297#endif
298
299
b43a7ffb
VK
300void __cpufreq_notify_transition(struct cpufreq_policy *policy,
301 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
302{
303 BUG_ON(irqs_disabled());
304
d5aaffa9
DB
305 if (cpufreq_disabled())
306 return;
307
1c3d85dd 308 freqs->flags = cpufreq_driver->flags;
2d06d8c4 309 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 310 state, freqs->new);
1da177e4 311
1da177e4 312 switch (state) {
e4472cb3 313
1da177e4 314 case CPUFREQ_PRECHANGE:
32ee8c3e 315 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
316 * which is not equal to what the cpufreq core thinks is
317 * "old frequency".
1da177e4 318 */
1c3d85dd 319 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
320 if ((policy) && (policy->cpu == freqs->cpu) &&
321 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 322 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
323 " %u, cpufreq assumed %u kHz.\n",
324 freqs->old, policy->cur);
325 freqs->old = policy->cur;
1da177e4
LT
326 }
327 }
b4dfdbb3 328 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 329 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
330 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
331 break;
e4472cb3 332
1da177e4
LT
333 case CPUFREQ_POSTCHANGE:
334 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 335 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 336 (unsigned long)freqs->cpu);
25e41933 337 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 338 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 339 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
340 if (likely(policy) && likely(policy->cpu == freqs->cpu))
341 policy->cur = freqs->new;
1da177e4
LT
342 break;
343 }
1da177e4 344}
b43a7ffb
VK
345/**
346 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
347 * on frequency transition.
348 *
349 * This function calls the transition notifiers and the "adjust_jiffies"
350 * function. It is called twice on all CPU frequency changes that have
351 * external effects.
352 */
353void cpufreq_notify_transition(struct cpufreq_policy *policy,
354 struct cpufreq_freqs *freqs, unsigned int state)
355{
356 for_each_cpu(freqs->cpu, policy->cpus)
357 __cpufreq_notify_transition(policy, freqs, state);
358}
1da177e4
LT
359EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
360
361
362
363/*********************************************************************
364 * SYSFS INTERFACE *
365 *********************************************************************/
366
3bcb09a3
JF
367static struct cpufreq_governor *__find_governor(const char *str_governor)
368{
369 struct cpufreq_governor *t;
370
371 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 372 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
373 return t;
374
375 return NULL;
376}
377
1da177e4
LT
378/**
379 * cpufreq_parse_governor - parse a governor string
380 */
905d77cd 381static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
382 struct cpufreq_governor **governor)
383{
3bcb09a3 384 int err = -EINVAL;
1c3d85dd
RW
385
386 if (!cpufreq_driver)
3bcb09a3
JF
387 goto out;
388
1c3d85dd 389 if (cpufreq_driver->setpolicy) {
1da177e4
LT
390 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
391 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 392 err = 0;
e08f5f5b
GS
393 } else if (!strnicmp(str_governor, "powersave",
394 CPUFREQ_NAME_LEN)) {
1da177e4 395 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 396 err = 0;
1da177e4 397 }
1c3d85dd 398 } else if (cpufreq_driver->target) {
1da177e4 399 struct cpufreq_governor *t;
3bcb09a3 400
3fc54d37 401 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
402
403 t = __find_governor(str_governor);
404
ea714970 405 if (t == NULL) {
1a8e1463 406 int ret;
ea714970 407
1a8e1463
KC
408 mutex_unlock(&cpufreq_governor_mutex);
409 ret = request_module("cpufreq_%s", str_governor);
410 mutex_lock(&cpufreq_governor_mutex);
ea714970 411
1a8e1463
KC
412 if (ret == 0)
413 t = __find_governor(str_governor);
ea714970
JF
414 }
415
3bcb09a3
JF
416 if (t != NULL) {
417 *governor = t;
418 err = 0;
1da177e4 419 }
3bcb09a3 420
3fc54d37 421 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 422 }
29464f28 423out:
3bcb09a3 424 return err;
1da177e4 425}
1da177e4
LT
426
427
1da177e4 428/**
e08f5f5b
GS
429 * cpufreq_per_cpu_attr_read() / show_##file_name() -
430 * print out cpufreq information
1da177e4
LT
431 *
432 * Write out information from cpufreq_driver->policy[cpu]; object must be
433 * "unsigned int".
434 */
435
32ee8c3e
DJ
436#define show_one(file_name, object) \
437static ssize_t show_##file_name \
905d77cd 438(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 439{ \
29464f28 440 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
441}
442
443show_one(cpuinfo_min_freq, cpuinfo.min_freq);
444show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 445show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
446show_one(scaling_min_freq, min);
447show_one(scaling_max_freq, max);
448show_one(scaling_cur_freq, cur);
449
e08f5f5b
GS
450static int __cpufreq_set_policy(struct cpufreq_policy *data,
451 struct cpufreq_policy *policy);
7970e08b 452
1da177e4
LT
453/**
454 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
455 */
456#define store_one(file_name, object) \
457static ssize_t store_##file_name \
905d77cd 458(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 459{ \
f55c9c26 460 unsigned int ret; \
1da177e4
LT
461 struct cpufreq_policy new_policy; \
462 \
463 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
464 if (ret) \
465 return -EINVAL; \
466 \
29464f28 467 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
468 if (ret != 1) \
469 return -EINVAL; \
470 \
7970e08b
TR
471 ret = __cpufreq_set_policy(policy, &new_policy); \
472 policy->user_policy.object = policy->object; \
1da177e4
LT
473 \
474 return ret ? ret : count; \
475}
476
29464f28
DJ
477store_one(scaling_min_freq, min);
478store_one(scaling_max_freq, max);
1da177e4
LT
479
480/**
481 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
482 */
905d77cd
DJ
483static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
484 char *buf)
1da177e4 485{
5a01f2e8 486 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
487 if (!cur_freq)
488 return sprintf(buf, "<unknown>");
489 return sprintf(buf, "%u\n", cur_freq);
490}
491
492
493/**
494 * show_scaling_governor - show the current policy for the specified CPU
495 */
905d77cd 496static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 497{
29464f28 498 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
499 return sprintf(buf, "powersave\n");
500 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
501 return sprintf(buf, "performance\n");
502 else if (policy->governor)
4b972f0b 503 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 504 policy->governor->name);
1da177e4
LT
505 return -EINVAL;
506}
507
508
509/**
510 * store_scaling_governor - store policy for the specified CPU
511 */
905d77cd
DJ
512static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
513 const char *buf, size_t count)
1da177e4 514{
f55c9c26 515 unsigned int ret;
1da177e4
LT
516 char str_governor[16];
517 struct cpufreq_policy new_policy;
518
519 ret = cpufreq_get_policy(&new_policy, policy->cpu);
520 if (ret)
521 return ret;
522
29464f28 523 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
524 if (ret != 1)
525 return -EINVAL;
526
e08f5f5b
GS
527 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
528 &new_policy.governor))
1da177e4
LT
529 return -EINVAL;
530
7970e08b
TR
531 /* Do not use cpufreq_set_policy here or the user_policy.max
532 will be wrongly overridden */
7970e08b
TR
533 ret = __cpufreq_set_policy(policy, &new_policy);
534
535 policy->user_policy.policy = policy->policy;
536 policy->user_policy.governor = policy->governor;
7970e08b 537
e08f5f5b
GS
538 if (ret)
539 return ret;
540 else
541 return count;
1da177e4
LT
542}
543
544/**
545 * show_scaling_driver - show the cpufreq driver currently loaded
546 */
905d77cd 547static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 548{
1c3d85dd 549 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
550}
551
552/**
553 * show_scaling_available_governors - show the available CPUfreq governors
554 */
905d77cd
DJ
555static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
556 char *buf)
1da177e4
LT
557{
558 ssize_t i = 0;
559 struct cpufreq_governor *t;
560
1c3d85dd 561 if (!cpufreq_driver->target) {
1da177e4
LT
562 i += sprintf(buf, "performance powersave");
563 goto out;
564 }
565
566 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
567 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
568 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 569 goto out;
4b972f0b 570 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 571 }
7d5e350f 572out:
1da177e4
LT
573 i += sprintf(&buf[i], "\n");
574 return i;
575}
e8628dd0 576
835481d9 577static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
578{
579 ssize_t i = 0;
580 unsigned int cpu;
581
835481d9 582 for_each_cpu(cpu, mask) {
1da177e4
LT
583 if (i)
584 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
585 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
586 if (i >= (PAGE_SIZE - 5))
29464f28 587 break;
1da177e4
LT
588 }
589 i += sprintf(&buf[i], "\n");
590 return i;
591}
592
e8628dd0
DW
593/**
594 * show_related_cpus - show the CPUs affected by each transition even if
595 * hw coordination is in use
596 */
597static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
598{
e8628dd0
DW
599 return show_cpus(policy->related_cpus, buf);
600}
601
602/**
603 * show_affected_cpus - show the CPUs affected by each transition
604 */
605static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
606{
607 return show_cpus(policy->cpus, buf);
608}
609
9e76988e 610static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 611 const char *buf, size_t count)
9e76988e
VP
612{
613 unsigned int freq = 0;
614 unsigned int ret;
615
879000f9 616 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
617 return -EINVAL;
618
619 ret = sscanf(buf, "%u", &freq);
620 if (ret != 1)
621 return -EINVAL;
622
623 policy->governor->store_setspeed(policy, freq);
624
625 return count;
626}
627
628static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
629{
879000f9 630 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
631 return sprintf(buf, "<unsupported>\n");
632
633 return policy->governor->show_setspeed(policy, buf);
634}
1da177e4 635
e2f74f35 636/**
8bf1ac72 637 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
638 */
639static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
640{
641 unsigned int limit;
642 int ret;
1c3d85dd
RW
643 if (cpufreq_driver->bios_limit) {
644 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
645 if (!ret)
646 return sprintf(buf, "%u\n", limit);
647 }
648 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
649}
650
6dad2a29
BP
651cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
652cpufreq_freq_attr_ro(cpuinfo_min_freq);
653cpufreq_freq_attr_ro(cpuinfo_max_freq);
654cpufreq_freq_attr_ro(cpuinfo_transition_latency);
655cpufreq_freq_attr_ro(scaling_available_governors);
656cpufreq_freq_attr_ro(scaling_driver);
657cpufreq_freq_attr_ro(scaling_cur_freq);
658cpufreq_freq_attr_ro(bios_limit);
659cpufreq_freq_attr_ro(related_cpus);
660cpufreq_freq_attr_ro(affected_cpus);
661cpufreq_freq_attr_rw(scaling_min_freq);
662cpufreq_freq_attr_rw(scaling_max_freq);
663cpufreq_freq_attr_rw(scaling_governor);
664cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 665
905d77cd 666static struct attribute *default_attrs[] = {
1da177e4
LT
667 &cpuinfo_min_freq.attr,
668 &cpuinfo_max_freq.attr,
ed129784 669 &cpuinfo_transition_latency.attr,
1da177e4
LT
670 &scaling_min_freq.attr,
671 &scaling_max_freq.attr,
672 &affected_cpus.attr,
e8628dd0 673 &related_cpus.attr,
1da177e4
LT
674 &scaling_governor.attr,
675 &scaling_driver.attr,
676 &scaling_available_governors.attr,
9e76988e 677 &scaling_setspeed.attr,
1da177e4
LT
678 NULL
679};
680
29464f28
DJ
681#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
682#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 683
29464f28 684static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 685{
905d77cd
DJ
686 struct cpufreq_policy *policy = to_policy(kobj);
687 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 688 ssize_t ret = -EINVAL;
a9144436 689 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 690 if (!policy)
0db4a8a9 691 goto no_policy;
5a01f2e8
VP
692
693 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 694 goto fail;
5a01f2e8 695
e08f5f5b
GS
696 if (fattr->show)
697 ret = fattr->show(policy, buf);
698 else
699 ret = -EIO;
700
5a01f2e8 701 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 702fail:
a9144436 703 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 704no_policy:
1da177e4
LT
705 return ret;
706}
707
905d77cd
DJ
708static ssize_t store(struct kobject *kobj, struct attribute *attr,
709 const char *buf, size_t count)
1da177e4 710{
905d77cd
DJ
711 struct cpufreq_policy *policy = to_policy(kobj);
712 struct freq_attr *fattr = to_attr(attr);
a07530b4 713 ssize_t ret = -EINVAL;
a9144436 714 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 715 if (!policy)
a07530b4 716 goto no_policy;
5a01f2e8
VP
717
718 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 719 goto fail;
5a01f2e8 720
e08f5f5b
GS
721 if (fattr->store)
722 ret = fattr->store(policy, buf, count);
723 else
724 ret = -EIO;
725
5a01f2e8 726 unlock_policy_rwsem_write(policy->cpu);
a07530b4 727fail:
a9144436 728 cpufreq_cpu_put_sysfs(policy);
a07530b4 729no_policy:
1da177e4
LT
730 return ret;
731}
732
905d77cd 733static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 734{
905d77cd 735 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 736 pr_debug("last reference is dropped\n");
1da177e4
LT
737 complete(&policy->kobj_unregister);
738}
739
52cf25d0 740static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
741 .show = show,
742 .store = store,
743};
744
745static struct kobj_type ktype_cpufreq = {
746 .sysfs_ops = &sysfs_ops,
747 .default_attrs = default_attrs,
748 .release = cpufreq_sysfs_release,
749};
750
2361be23
VK
751struct kobject *cpufreq_global_kobject;
752EXPORT_SYMBOL(cpufreq_global_kobject);
753
754static int cpufreq_global_kobject_usage;
755
756int cpufreq_get_global_kobject(void)
757{
758 if (!cpufreq_global_kobject_usage++)
759 return kobject_add(cpufreq_global_kobject,
760 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
761
762 return 0;
763}
764EXPORT_SYMBOL(cpufreq_get_global_kobject);
765
766void cpufreq_put_global_kobject(void)
767{
768 if (!--cpufreq_global_kobject_usage)
769 kobject_del(cpufreq_global_kobject);
770}
771EXPORT_SYMBOL(cpufreq_put_global_kobject);
772
773int cpufreq_sysfs_create_file(const struct attribute *attr)
774{
775 int ret = cpufreq_get_global_kobject();
776
777 if (!ret) {
778 ret = sysfs_create_file(cpufreq_global_kobject, attr);
779 if (ret)
780 cpufreq_put_global_kobject();
781 }
782
783 return ret;
784}
785EXPORT_SYMBOL(cpufreq_sysfs_create_file);
786
787void cpufreq_sysfs_remove_file(const struct attribute *attr)
788{
789 sysfs_remove_file(cpufreq_global_kobject, attr);
790 cpufreq_put_global_kobject();
791}
792EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
793
19d6f7ec 794/* symlink affected CPUs */
cf3289d0
AC
795static int cpufreq_add_dev_symlink(unsigned int cpu,
796 struct cpufreq_policy *policy)
19d6f7ec
DJ
797{
798 unsigned int j;
799 int ret = 0;
800
801 for_each_cpu(j, policy->cpus) {
802 struct cpufreq_policy *managed_policy;
8a25a2fd 803 struct device *cpu_dev;
19d6f7ec
DJ
804
805 if (j == cpu)
806 continue;
19d6f7ec 807
2d06d8c4 808 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 809 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
810 cpu_dev = get_cpu_device(j);
811 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
812 "cpufreq");
813 if (ret) {
814 cpufreq_cpu_put(managed_policy);
815 return ret;
816 }
817 }
818 return ret;
819}
820
cf3289d0
AC
821static int cpufreq_add_dev_interface(unsigned int cpu,
822 struct cpufreq_policy *policy,
8a25a2fd 823 struct device *dev)
909a694e 824{
ecf7e461 825 struct cpufreq_policy new_policy;
909a694e
DJ
826 struct freq_attr **drv_attr;
827 unsigned long flags;
828 int ret = 0;
829 unsigned int j;
830
831 /* prepare interface data */
832 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 833 &dev->kobj, "cpufreq");
909a694e
DJ
834 if (ret)
835 return ret;
836
837 /* set up files for this cpu device */
1c3d85dd 838 drv_attr = cpufreq_driver->attr;
909a694e
DJ
839 while ((drv_attr) && (*drv_attr)) {
840 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
841 if (ret)
1c3d85dd 842 goto err_out_kobj_put;
909a694e
DJ
843 drv_attr++;
844 }
1c3d85dd 845 if (cpufreq_driver->get) {
909a694e
DJ
846 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
847 if (ret)
1c3d85dd 848 goto err_out_kobj_put;
909a694e 849 }
1c3d85dd 850 if (cpufreq_driver->target) {
909a694e
DJ
851 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
852 if (ret)
1c3d85dd 853 goto err_out_kobj_put;
909a694e 854 }
1c3d85dd 855 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
856 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
857 if (ret)
1c3d85dd 858 goto err_out_kobj_put;
e2f74f35 859 }
909a694e 860
0d1857a1 861 write_lock_irqsave(&cpufreq_driver_lock, flags);
909a694e 862 for_each_cpu(j, policy->cpus) {
909a694e 863 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 864 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e 865 }
0d1857a1 866 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
909a694e
DJ
867
868 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
869 if (ret)
870 goto err_out_kobj_put;
871
872 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
873 /* assure that the starting sequence is run in __cpufreq_set_policy */
874 policy->governor = NULL;
875
876 /* set default policy */
877 ret = __cpufreq_set_policy(policy, &new_policy);
878 policy->user_policy.policy = policy->policy;
879 policy->user_policy.governor = policy->governor;
880
881 if (ret) {
2d06d8c4 882 pr_debug("setting policy failed\n");
1c3d85dd
RW
883 if (cpufreq_driver->exit)
884 cpufreq_driver->exit(policy);
ecf7e461 885 }
909a694e
DJ
886 return ret;
887
888err_out_kobj_put:
889 kobject_put(&policy->kobj);
890 wait_for_completion(&policy->kobj_unregister);
891 return ret;
892}
893
fcf80582
VK
894#ifdef CONFIG_HOTPLUG_CPU
895static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
896 struct device *dev)
897{
898 struct cpufreq_policy *policy;
1c3d85dd 899 int ret = 0, has_target = !!cpufreq_driver->target;
fcf80582
VK
900 unsigned long flags;
901
902 policy = cpufreq_cpu_get(sibling);
903 WARN_ON(!policy);
904
820c6ca2
VK
905 if (has_target)
906 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
fcf80582 907
2eaa3e2d
VK
908 lock_policy_rwsem_write(sibling);
909
0d1857a1 910 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 911
fcf80582 912 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 913 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 914 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 915 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 916
2eaa3e2d
VK
917 unlock_policy_rwsem_write(sibling);
918
820c6ca2
VK
919 if (has_target) {
920 __cpufreq_governor(policy, CPUFREQ_GOV_START);
921 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
922 }
fcf80582 923
fcf80582
VK
924 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
925 if (ret) {
926 cpufreq_cpu_put(policy);
927 return ret;
928 }
929
930 return 0;
931}
932#endif
1da177e4
LT
933
934/**
935 * cpufreq_add_dev - add a CPU device
936 *
32ee8c3e 937 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
938 *
939 * The Oracle says: try running cpufreq registration/unregistration concurrently
940 * with with cpu hotplugging and all hell will break loose. Tried to clean this
941 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 942 */
8a25a2fd 943static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 944{
fcf80582 945 unsigned int j, cpu = dev->id;
65922465 946 int ret = -ENOMEM;
1da177e4 947 struct cpufreq_policy *policy;
1da177e4 948 unsigned long flags;
90e41bac 949#ifdef CONFIG_HOTPLUG_CPU
fcf80582 950 struct cpufreq_governor *gov;
90e41bac
PB
951 int sibling;
952#endif
1da177e4 953
c32b6b8e
AR
954 if (cpu_is_offline(cpu))
955 return 0;
956
2d06d8c4 957 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
958
959#ifdef CONFIG_SMP
960 /* check whether a different CPU already registered this
961 * CPU because it is in the same boat. */
962 policy = cpufreq_cpu_get(cpu);
963 if (unlikely(policy)) {
8ff69732 964 cpufreq_cpu_put(policy);
1da177e4
LT
965 return 0;
966 }
fcf80582
VK
967
968#ifdef CONFIG_HOTPLUG_CPU
969 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 970 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
971 for_each_online_cpu(sibling) {
972 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 973 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 974 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 975 return cpufreq_add_policy_cpu(cpu, sibling, dev);
2eaa3e2d 976 }
fcf80582 977 }
0d1857a1 978 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 979#endif
1da177e4
LT
980#endif
981
1c3d85dd 982 if (!try_module_get(cpufreq_driver->owner)) {
1da177e4
LT
983 ret = -EINVAL;
984 goto module_out;
985 }
986
e98df50c 987 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 988 if (!policy)
1da177e4 989 goto nomem_out;
059019a3
DJ
990
991 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 992 goto err_free_policy;
059019a3
DJ
993
994 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 995 goto err_free_cpumask;
1da177e4
LT
996
997 policy->cpu = cpu;
65922465 998 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 999 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1000
5a01f2e8 1001 /* Initially set CPU itself as the policy_cpu */
f1625066 1002 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 1003
1da177e4 1004 init_completion(&policy->kobj_unregister);
65f27f38 1005 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
1006
1007 /* call driver. From then on the cpufreq must be able
1008 * to accept all calls to ->verify and ->setpolicy for this CPU
1009 */
1c3d85dd 1010 ret = cpufreq_driver->init(policy);
1da177e4 1011 if (ret) {
2d06d8c4 1012 pr_debug("initialization failed\n");
2eaa3e2d 1013 goto err_set_policy_cpu;
1da177e4 1014 }
643ae6e8 1015
fcf80582
VK
1016 /* related cpus should atleast have policy->cpus */
1017 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1018
643ae6e8
VK
1019 /*
1020 * affected cpus must always be the one, which are online. We aren't
1021 * managing offline cpus here.
1022 */
1023 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1024
187d9f4e
MC
1025 policy->user_policy.min = policy->min;
1026 policy->user_policy.max = policy->max;
1da177e4 1027
a1531acd
TR
1028 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1029 CPUFREQ_START, policy);
1030
fcf80582
VK
1031#ifdef CONFIG_HOTPLUG_CPU
1032 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1033 if (gov) {
1034 policy->governor = gov;
1035 pr_debug("Restoring governor %s for cpu %d\n",
1036 policy->governor->name, cpu);
4bfa042c 1037 }
fcf80582 1038#endif
1da177e4 1039
8a25a2fd 1040 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
1041 if (ret)
1042 goto err_out_unregister;
8ff69732 1043
038c5b3e 1044 kobject_uevent(&policy->kobj, KOBJ_ADD);
1c3d85dd 1045 module_put(cpufreq_driver->owner);
2d06d8c4 1046 pr_debug("initialization complete\n");
87c32271 1047
1da177e4
LT
1048 return 0;
1049
1da177e4 1050err_out_unregister:
0d1857a1 1051 write_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 1052 for_each_cpu(j, policy->cpus)
7a6aedfa 1053 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1054 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1055
c10997f6 1056 kobject_put(&policy->kobj);
1da177e4
LT
1057 wait_for_completion(&policy->kobj_unregister);
1058
2eaa3e2d
VK
1059err_set_policy_cpu:
1060 per_cpu(cpufreq_policy_cpu, cpu) = -1;
cad70a6a 1061 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
1062err_free_cpumask:
1063 free_cpumask_var(policy->cpus);
1064err_free_policy:
1da177e4 1065 kfree(policy);
1da177e4 1066nomem_out:
1c3d85dd 1067 module_put(cpufreq_driver->owner);
c32b6b8e 1068module_out:
1da177e4
LT
1069 return ret;
1070}
1071
b8eed8af
VK
1072static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1073{
1074 int j;
1075
1076 policy->last_cpu = policy->cpu;
1077 policy->cpu = cpu;
1078
3361b7b1 1079 for_each_cpu(j, policy->cpus)
b8eed8af 1080 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
1081
1082#ifdef CONFIG_CPU_FREQ_TABLE
1083 cpufreq_frequency_table_update_policy_cpu(policy);
1084#endif
1085 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1086 CPUFREQ_UPDATE_POLICY_CPU, policy);
1087}
1da177e4
LT
1088
1089/**
5a01f2e8 1090 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1091 *
1092 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1093 * Caller should already have policy_rwsem in write mode for this CPU.
1094 * This routine frees the rwsem before returning.
1da177e4 1095 */
8a25a2fd 1096static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1097{
b8eed8af 1098 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1099 unsigned long flags;
1100 struct cpufreq_policy *data;
499bca9b
AW
1101 struct kobject *kobj;
1102 struct completion *cmp;
8a25a2fd 1103 struct device *cpu_dev;
1da177e4 1104
b8eed8af 1105 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1106
0d1857a1 1107 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1108
7a6aedfa 1109 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1110 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1111
0d1857a1 1112 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1113
1114 if (!data) {
b8eed8af 1115 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1116 return -EINVAL;
1117 }
1da177e4 1118
1c3d85dd 1119 if (cpufreq_driver->target)
f6a7409c 1120 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1da177e4 1121
084f3493 1122#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1123 if (!cpufreq_driver->setpolicy)
fa69e33f
DB
1124 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1125 data->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1126#endif
1127
2eaa3e2d 1128 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1129 cpus = cpumask_weight(data->cpus);
e4969eba
VK
1130
1131 if (cpus > 1)
1132 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1133 unlock_policy_rwsem_write(cpu);
084f3493 1134
73bf0fc2
VK
1135 if (cpu != data->cpu) {
1136 sysfs_remove_link(&dev->kobj, "cpufreq");
1137 } else if (cpus > 1) {
b8eed8af
VK
1138 /* first sibling now owns the new sysfs dir */
1139 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1140 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1141 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1142 if (ret) {
1143 pr_err("%s: Failed to move kobj: %d", __func__, ret);
084f3493 1144
2eaa3e2d 1145 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1146 cpumask_set_cpu(cpu, data->cpus);
1da177e4 1147
0d1857a1 1148 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1149 per_cpu(cpufreq_cpu_data, cpu) = data;
0d1857a1 1150 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1151
499bca9b 1152 unlock_policy_rwsem_write(cpu);
1da177e4 1153
2eaa3e2d
VK
1154 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1155 "cpufreq");
b8eed8af 1156 return -EINVAL;
1da177e4 1157 }
5a01f2e8 1158
2eaa3e2d 1159 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1160 update_policy_cpu(data, cpu_dev->id);
2eaa3e2d 1161 unlock_policy_rwsem_write(cpu);
b8eed8af
VK
1162 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1163 __func__, cpu_dev->id, cpu);
1da177e4 1164 }
1da177e4 1165
d96038e0
VK
1166 if ((cpus == 1) && (cpufreq_driver->target))
1167 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1168
b8eed8af
VK
1169 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1170 cpufreq_cpu_put(data);
1da177e4 1171
b8eed8af
VK
1172 /* If cpu is last user of policy, free policy */
1173 if (cpus == 1) {
2eaa3e2d 1174 lock_policy_rwsem_read(cpu);
b8eed8af
VK
1175 kobj = &data->kobj;
1176 cmp = &data->kobj_unregister;
2eaa3e2d 1177 unlock_policy_rwsem_read(cpu);
b8eed8af 1178 kobject_put(kobj);
7d26e2d5 1179
b8eed8af
VK
1180 /* we need to make sure that the underlying kobj is actually
1181 * not referenced anymore by anybody before we proceed with
1182 * unloading.
1183 */
1184 pr_debug("waiting for dropping of refcount\n");
1185 wait_for_completion(cmp);
1186 pr_debug("wait complete\n");
7d26e2d5 1187
1c3d85dd
RW
1188 if (cpufreq_driver->exit)
1189 cpufreq_driver->exit(data);
27ecddc2 1190
b8eed8af
VK
1191 free_cpumask_var(data->related_cpus);
1192 free_cpumask_var(data->cpus);
1193 kfree(data);
1c3d85dd 1194 } else if (cpufreq_driver->target) {
b8eed8af
VK
1195 __cpufreq_governor(data, CPUFREQ_GOV_START);
1196 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
27ecddc2 1197 }
1da177e4 1198
2eaa3e2d 1199 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1200 return 0;
1201}
1202
1203
8a25a2fd 1204static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1205{
8a25a2fd 1206 unsigned int cpu = dev->id;
5a01f2e8 1207 int retval;
ec28297a
VP
1208
1209 if (cpu_is_offline(cpu))
1210 return 0;
1211
8a25a2fd 1212 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1213 return retval;
1214}
1215
1216
65f27f38 1217static void handle_update(struct work_struct *work)
1da177e4 1218{
65f27f38
DH
1219 struct cpufreq_policy *policy =
1220 container_of(work, struct cpufreq_policy, update);
1221 unsigned int cpu = policy->cpu;
2d06d8c4 1222 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1223 cpufreq_update_policy(cpu);
1224}
1225
1226/**
1227 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1228 * @cpu: cpu number
1229 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1230 * @new_freq: CPU frequency the CPU actually runs at
1231 *
29464f28
DJ
1232 * We adjust to current frequency first, and need to clean up later.
1233 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1234 */
e08f5f5b
GS
1235static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1236 unsigned int new_freq)
1da177e4 1237{
b43a7ffb 1238 struct cpufreq_policy *policy;
1da177e4 1239 struct cpufreq_freqs freqs;
b43a7ffb
VK
1240 unsigned long flags;
1241
1da177e4 1242
2d06d8c4 1243 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1244 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1245
1da177e4
LT
1246 freqs.old = old_freq;
1247 freqs.new = new_freq;
b43a7ffb
VK
1248
1249 read_lock_irqsave(&cpufreq_driver_lock, flags);
1250 policy = per_cpu(cpufreq_cpu_data, cpu);
1251 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1252
1253 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1254 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1255}
1256
1257
32ee8c3e 1258/**
4ab70df4 1259 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1260 * @cpu: CPU number
1261 *
1262 * This is the last known freq, without actually getting it from the driver.
1263 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1264 */
1265unsigned int cpufreq_quick_get(unsigned int cpu)
1266{
9e21ba8b 1267 struct cpufreq_policy *policy;
e08f5f5b 1268 unsigned int ret_freq = 0;
95235ca2 1269
1c3d85dd
RW
1270 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1271 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1272
1273 policy = cpufreq_cpu_get(cpu);
95235ca2 1274 if (policy) {
e08f5f5b 1275 ret_freq = policy->cur;
95235ca2
VP
1276 cpufreq_cpu_put(policy);
1277 }
1278
4d34a67d 1279 return ret_freq;
95235ca2
VP
1280}
1281EXPORT_SYMBOL(cpufreq_quick_get);
1282
3d737108
JB
1283/**
1284 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1285 * @cpu: CPU number
1286 *
1287 * Just return the max possible frequency for a given CPU.
1288 */
1289unsigned int cpufreq_quick_get_max(unsigned int cpu)
1290{
1291 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1292 unsigned int ret_freq = 0;
1293
1294 if (policy) {
1295 ret_freq = policy->max;
1296 cpufreq_cpu_put(policy);
1297 }
1298
1299 return ret_freq;
1300}
1301EXPORT_SYMBOL(cpufreq_quick_get_max);
1302
95235ca2 1303
5a01f2e8 1304static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1305{
7a6aedfa 1306 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1307 unsigned int ret_freq = 0;
5800043b 1308
1c3d85dd 1309 if (!cpufreq_driver->get)
4d34a67d 1310 return ret_freq;
1da177e4 1311
1c3d85dd 1312 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1313
e08f5f5b 1314 if (ret_freq && policy->cur &&
1c3d85dd 1315 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1316 /* verify no discrepancy between actual and
1317 saved value exists */
1318 if (unlikely(ret_freq != policy->cur)) {
1319 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1320 schedule_work(&policy->update);
1321 }
1322 }
1323
4d34a67d 1324 return ret_freq;
5a01f2e8 1325}
1da177e4 1326
5a01f2e8
VP
1327/**
1328 * cpufreq_get - get the current CPU frequency (in kHz)
1329 * @cpu: CPU number
1330 *
1331 * Get the CPU current (static) CPU frequency
1332 */
1333unsigned int cpufreq_get(unsigned int cpu)
1334{
1335 unsigned int ret_freq = 0;
1336 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1337
1338 if (!policy)
1339 goto out;
1340
1341 if (unlikely(lock_policy_rwsem_read(cpu)))
1342 goto out_policy;
1343
1344 ret_freq = __cpufreq_get(cpu);
1345
1346 unlock_policy_rwsem_read(cpu);
1da177e4 1347
5a01f2e8
VP
1348out_policy:
1349 cpufreq_cpu_put(policy);
1350out:
4d34a67d 1351 return ret_freq;
1da177e4
LT
1352}
1353EXPORT_SYMBOL(cpufreq_get);
1354
8a25a2fd
KS
1355static struct subsys_interface cpufreq_interface = {
1356 .name = "cpufreq",
1357 .subsys = &cpu_subsys,
1358 .add_dev = cpufreq_add_dev,
1359 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1360};
1361
1da177e4 1362
42d4dc3f 1363/**
e00e56df
RW
1364 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1365 *
1366 * This function is only executed for the boot processor. The other CPUs
1367 * have been put offline by means of CPU hotplug.
42d4dc3f 1368 */
e00e56df 1369static int cpufreq_bp_suspend(void)
42d4dc3f 1370{
e08f5f5b 1371 int ret = 0;
4bc5d341 1372
e00e56df 1373 int cpu = smp_processor_id();
42d4dc3f
BH
1374 struct cpufreq_policy *cpu_policy;
1375
2d06d8c4 1376 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1377
e00e56df 1378 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1379 cpu_policy = cpufreq_cpu_get(cpu);
1380 if (!cpu_policy)
e00e56df 1381 return 0;
42d4dc3f 1382
1c3d85dd
RW
1383 if (cpufreq_driver->suspend) {
1384 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1385 if (ret)
42d4dc3f
BH
1386 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1387 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1388 }
1389
42d4dc3f 1390 cpufreq_cpu_put(cpu_policy);
c9060494 1391 return ret;
42d4dc3f
BH
1392}
1393
1da177e4 1394/**
e00e56df 1395 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1396 *
1397 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1398 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1399 * restored. It will verify that the current freq is in sync with
1400 * what we believe it to be. This is a bit later than when it
1401 * should be, but nonethteless it's better than calling
1402 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1403 *
1404 * This function is only executed for the boot CPU. The other CPUs have not
1405 * been turned on yet.
1da177e4 1406 */
e00e56df 1407static void cpufreq_bp_resume(void)
1da177e4 1408{
e08f5f5b 1409 int ret = 0;
4bc5d341 1410
e00e56df 1411 int cpu = smp_processor_id();
1da177e4
LT
1412 struct cpufreq_policy *cpu_policy;
1413
2d06d8c4 1414 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1415
e00e56df 1416 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1417 cpu_policy = cpufreq_cpu_get(cpu);
1418 if (!cpu_policy)
e00e56df 1419 return;
1da177e4 1420
1c3d85dd
RW
1421 if (cpufreq_driver->resume) {
1422 ret = cpufreq_driver->resume(cpu_policy);
1da177e4
LT
1423 if (ret) {
1424 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1425 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1426 goto fail;
1da177e4
LT
1427 }
1428 }
1429
1da177e4 1430 schedule_work(&cpu_policy->update);
ce6c3997 1431
c9060494 1432fail:
1da177e4 1433 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1434}
1435
e00e56df
RW
1436static struct syscore_ops cpufreq_syscore_ops = {
1437 .suspend = cpufreq_bp_suspend,
1438 .resume = cpufreq_bp_resume,
1da177e4
LT
1439};
1440
9d95046e
BP
1441/**
1442 * cpufreq_get_current_driver - return current driver's name
1443 *
1444 * Return the name string of the currently loaded cpufreq driver
1445 * or NULL, if none.
1446 */
1447const char *cpufreq_get_current_driver(void)
1448{
1c3d85dd
RW
1449 if (cpufreq_driver)
1450 return cpufreq_driver->name;
1451
1452 return NULL;
9d95046e
BP
1453}
1454EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1455
1456/*********************************************************************
1457 * NOTIFIER LISTS INTERFACE *
1458 *********************************************************************/
1459
1460/**
1461 * cpufreq_register_notifier - register a driver with cpufreq
1462 * @nb: notifier function to register
1463 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1464 *
32ee8c3e 1465 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1466 * are notified about clock rate changes (once before and once after
1467 * the transition), or a list of drivers that are notified about
1468 * changes in cpufreq policy.
1469 *
1470 * This function may sleep, and has the same return conditions as
e041c683 1471 * blocking_notifier_chain_register.
1da177e4
LT
1472 */
1473int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1474{
1475 int ret;
1476
d5aaffa9
DB
1477 if (cpufreq_disabled())
1478 return -EINVAL;
1479
74212ca4
CEB
1480 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1481
1da177e4
LT
1482 switch (list) {
1483 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1484 ret = srcu_notifier_chain_register(
e041c683 1485 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1486 break;
1487 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1488 ret = blocking_notifier_chain_register(
1489 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1490 break;
1491 default:
1492 ret = -EINVAL;
1493 }
1da177e4
LT
1494
1495 return ret;
1496}
1497EXPORT_SYMBOL(cpufreq_register_notifier);
1498
1499
1500/**
1501 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1502 * @nb: notifier block to be unregistered
1503 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1504 *
1505 * Remove a driver from the CPU frequency notifier list.
1506 *
1507 * This function may sleep, and has the same return conditions as
e041c683 1508 * blocking_notifier_chain_unregister.
1da177e4
LT
1509 */
1510int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1511{
1512 int ret;
1513
d5aaffa9
DB
1514 if (cpufreq_disabled())
1515 return -EINVAL;
1516
1da177e4
LT
1517 switch (list) {
1518 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1519 ret = srcu_notifier_chain_unregister(
e041c683 1520 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1521 break;
1522 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1523 ret = blocking_notifier_chain_unregister(
1524 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1525 break;
1526 default:
1527 ret = -EINVAL;
1528 }
1da177e4
LT
1529
1530 return ret;
1531}
1532EXPORT_SYMBOL(cpufreq_unregister_notifier);
1533
1534
1535/*********************************************************************
1536 * GOVERNORS *
1537 *********************************************************************/
1538
1539
1540int __cpufreq_driver_target(struct cpufreq_policy *policy,
1541 unsigned int target_freq,
1542 unsigned int relation)
1543{
1544 int retval = -EINVAL;
7249924e 1545 unsigned int old_target_freq = target_freq;
c32b6b8e 1546
a7b422cd
KRW
1547 if (cpufreq_disabled())
1548 return -ENODEV;
1549
7249924e
VK
1550 /* Make sure that target_freq is within supported range */
1551 if (target_freq > policy->max)
1552 target_freq = policy->max;
1553 if (target_freq < policy->min)
1554 target_freq = policy->min;
1555
1556 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1557 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1558
1559 if (target_freq == policy->cur)
1560 return 0;
1561
1c3d85dd
RW
1562 if (cpufreq_driver->target)
1563 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1564
1da177e4
LT
1565 return retval;
1566}
1567EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1568
1da177e4
LT
1569int cpufreq_driver_target(struct cpufreq_policy *policy,
1570 unsigned int target_freq,
1571 unsigned int relation)
1572{
f1829e4a 1573 int ret = -EINVAL;
1da177e4 1574
5a01f2e8 1575 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1576 goto fail;
1da177e4
LT
1577
1578 ret = __cpufreq_driver_target(policy, target_freq, relation);
1579
5a01f2e8 1580 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1581
f1829e4a 1582fail:
1da177e4
LT
1583 return ret;
1584}
1585EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1586
bf0b90e3 1587int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62 1588{
d5aaffa9 1589 if (cpufreq_disabled())
a262e94c 1590 return 0;
d5aaffa9 1591
1c3d85dd 1592 if (!cpufreq_driver->getavg)
0676f7f2
VK
1593 return 0;
1594
a262e94c 1595 return cpufreq_driver->getavg(policy, cpu);
dfde5d62 1596}
5a01f2e8 1597EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1598
153d7f3f 1599/*
153d7f3f
AV
1600 * when "event" is CPUFREQ_GOV_LIMITS
1601 */
1da177e4 1602
e08f5f5b
GS
1603static int __cpufreq_governor(struct cpufreq_policy *policy,
1604 unsigned int event)
1da177e4 1605{
cc993cab 1606 int ret;
6afde10c
TR
1607
1608 /* Only must be defined when default governor is known to have latency
1609 restrictions, like e.g. conservative or ondemand.
1610 That this is the case is already ensured in Kconfig
1611 */
1612#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1613 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1614#else
1615 struct cpufreq_governor *gov = NULL;
1616#endif
1c256245
TR
1617
1618 if (policy->governor->max_transition_latency &&
1619 policy->cpuinfo.transition_latency >
1620 policy->governor->max_transition_latency) {
6afde10c
TR
1621 if (!gov)
1622 return -EINVAL;
1623 else {
1624 printk(KERN_WARNING "%s governor failed, too long"
1625 " transition latency of HW, fallback"
1626 " to %s governor\n",
1627 policy->governor->name,
1628 gov->name);
1629 policy->governor = gov;
1630 }
1c256245 1631 }
1da177e4
LT
1632
1633 if (!try_module_get(policy->governor->owner))
1634 return -EINVAL;
1635
2d06d8c4 1636 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1637 policy->cpu, event);
1da177e4
LT
1638 ret = policy->governor->governor(policy, event);
1639
4d5dcc42
VK
1640 if (!ret) {
1641 if (event == CPUFREQ_GOV_POLICY_INIT)
1642 policy->governor->initialized++;
1643 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1644 policy->governor->initialized--;
1645 }
b394058f 1646
e08f5f5b
GS
1647 /* we keep one module reference alive for
1648 each CPU governed by this CPU */
1da177e4
LT
1649 if ((event != CPUFREQ_GOV_START) || ret)
1650 module_put(policy->governor->owner);
1651 if ((event == CPUFREQ_GOV_STOP) && !ret)
1652 module_put(policy->governor->owner);
1653
1654 return ret;
1655}
1656
1657
1da177e4
LT
1658int cpufreq_register_governor(struct cpufreq_governor *governor)
1659{
3bcb09a3 1660 int err;
1da177e4
LT
1661
1662 if (!governor)
1663 return -EINVAL;
1664
a7b422cd
KRW
1665 if (cpufreq_disabled())
1666 return -ENODEV;
1667
3fc54d37 1668 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1669
b394058f 1670 governor->initialized = 0;
3bcb09a3
JF
1671 err = -EBUSY;
1672 if (__find_governor(governor->name) == NULL) {
1673 err = 0;
1674 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1675 }
1da177e4 1676
32ee8c3e 1677 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1678 return err;
1da177e4
LT
1679}
1680EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1681
1682
1683void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1684{
90e41bac
PB
1685#ifdef CONFIG_HOTPLUG_CPU
1686 int cpu;
1687#endif
1688
1da177e4
LT
1689 if (!governor)
1690 return;
1691
a7b422cd
KRW
1692 if (cpufreq_disabled())
1693 return;
1694
90e41bac
PB
1695#ifdef CONFIG_HOTPLUG_CPU
1696 for_each_present_cpu(cpu) {
1697 if (cpu_online(cpu))
1698 continue;
1699 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1700 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1701 }
1702#endif
1703
3fc54d37 1704 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1705 list_del(&governor->governor_list);
3fc54d37 1706 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1707 return;
1708}
1709EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1710
1711
1712
1713/*********************************************************************
1714 * POLICY INTERFACE *
1715 *********************************************************************/
1716
1717/**
1718 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1719 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1720 * is written
1da177e4
LT
1721 *
1722 * Reads the current cpufreq policy.
1723 */
1724int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1725{
1726 struct cpufreq_policy *cpu_policy;
1727 if (!policy)
1728 return -EINVAL;
1729
1730 cpu_policy = cpufreq_cpu_get(cpu);
1731 if (!cpu_policy)
1732 return -EINVAL;
1733
1da177e4 1734 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1735
1736 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1737 return 0;
1738}
1739EXPORT_SYMBOL(cpufreq_get_policy);
1740
1741
153d7f3f 1742/*
e08f5f5b
GS
1743 * data : current policy.
1744 * policy : policy to be set.
153d7f3f 1745 */
e08f5f5b
GS
1746static int __cpufreq_set_policy(struct cpufreq_policy *data,
1747 struct cpufreq_policy *policy)
1da177e4 1748{
7bd353a9 1749 int ret = 0, failed = 1;
1da177e4 1750
2d06d8c4 1751 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1752 policy->min, policy->max);
1753
e08f5f5b
GS
1754 memcpy(&policy->cpuinfo, &data->cpuinfo,
1755 sizeof(struct cpufreq_cpuinfo));
1da177e4 1756
53391fa2 1757 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1758 ret = -EINVAL;
1759 goto error_out;
1760 }
1761
1da177e4 1762 /* verify the cpu speed can be set within this limit */
1c3d85dd 1763 ret = cpufreq_driver->verify(policy);
1da177e4
LT
1764 if (ret)
1765 goto error_out;
1766
1da177e4 1767 /* adjust if necessary - all reasons */
e041c683
AS
1768 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1769 CPUFREQ_ADJUST, policy);
1da177e4
LT
1770
1771 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1772 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1773 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1774
1775 /* verify the cpu speed can be set within this limit,
1776 which might be different to the first one */
1c3d85dd 1777 ret = cpufreq_driver->verify(policy);
e041c683 1778 if (ret)
1da177e4 1779 goto error_out;
1da177e4
LT
1780
1781 /* notification of the new policy */
e041c683
AS
1782 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1783 CPUFREQ_NOTIFY, policy);
1da177e4 1784
7d5e350f
DJ
1785 data->min = policy->min;
1786 data->max = policy->max;
1da177e4 1787
2d06d8c4 1788 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1789 data->min, data->max);
1da177e4 1790
1c3d85dd 1791 if (cpufreq_driver->setpolicy) {
1da177e4 1792 data->policy = policy->policy;
2d06d8c4 1793 pr_debug("setting range\n");
1c3d85dd 1794 ret = cpufreq_driver->setpolicy(policy);
1da177e4
LT
1795 } else {
1796 if (policy->governor != data->governor) {
1797 /* save old, working values */
1798 struct cpufreq_governor *old_gov = data->governor;
1799
2d06d8c4 1800 pr_debug("governor switch\n");
1da177e4
LT
1801
1802 /* end old governor */
7bd353a9 1803 if (data->governor) {
1da177e4 1804 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
955ef483 1805 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1806 __cpufreq_governor(data,
1807 CPUFREQ_GOV_POLICY_EXIT);
955ef483 1808 lock_policy_rwsem_write(policy->cpu);
7bd353a9 1809 }
1da177e4
LT
1810
1811 /* start new governor */
1812 data->governor = policy->governor;
7bd353a9 1813 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
955ef483 1814 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
7bd353a9 1815 failed = 0;
955ef483
VK
1816 } else {
1817 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1818 __cpufreq_governor(data,
1819 CPUFREQ_GOV_POLICY_EXIT);
955ef483
VK
1820 lock_policy_rwsem_write(policy->cpu);
1821 }
7bd353a9
VK
1822 }
1823
1824 if (failed) {
1da177e4 1825 /* new governor failed, so re-start old one */
2d06d8c4 1826 pr_debug("starting governor %s failed\n",
e08f5f5b 1827 data->governor->name);
1da177e4
LT
1828 if (old_gov) {
1829 data->governor = old_gov;
7bd353a9
VK
1830 __cpufreq_governor(data,
1831 CPUFREQ_GOV_POLICY_INIT);
e08f5f5b
GS
1832 __cpufreq_governor(data,
1833 CPUFREQ_GOV_START);
1da177e4
LT
1834 }
1835 ret = -EINVAL;
1836 goto error_out;
1837 }
1838 /* might be a policy change, too, so fall through */
1839 }
2d06d8c4 1840 pr_debug("governor: change or update limits\n");
1da177e4
LT
1841 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1842 }
1843
7d5e350f 1844error_out:
1da177e4
LT
1845 return ret;
1846}
1847
1da177e4
LT
1848/**
1849 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1850 * @cpu: CPU which shall be re-evaluated
1851 *
25985edc 1852 * Useful for policy notifiers which have different necessities
1da177e4
LT
1853 * at different times.
1854 */
1855int cpufreq_update_policy(unsigned int cpu)
1856{
1857 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1858 struct cpufreq_policy policy;
f1829e4a 1859 int ret;
1da177e4 1860
f1829e4a
JL
1861 if (!data) {
1862 ret = -ENODEV;
1863 goto no_policy;
1864 }
1da177e4 1865
f1829e4a
JL
1866 if (unlikely(lock_policy_rwsem_write(cpu))) {
1867 ret = -EINVAL;
1868 goto fail;
1869 }
1da177e4 1870
2d06d8c4 1871 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1872 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1873 policy.min = data->user_policy.min;
1874 policy.max = data->user_policy.max;
1875 policy.policy = data->user_policy.policy;
1876 policy.governor = data->user_policy.governor;
1877
0961dd0d
TR
1878 /* BIOS might change freq behind our back
1879 -> ask driver for current freq and notify governors about a change */
1c3d85dd
RW
1880 if (cpufreq_driver->get) {
1881 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1882 if (!data->cur) {
2d06d8c4 1883 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1884 data->cur = policy.cur;
1885 } else {
1c3d85dd 1886 if (data->cur != policy.cur && cpufreq_driver->target)
e08f5f5b
GS
1887 cpufreq_out_of_sync(cpu, data->cur,
1888 policy.cur);
a85f7bd3 1889 }
0961dd0d
TR
1890 }
1891
1da177e4
LT
1892 ret = __cpufreq_set_policy(data, &policy);
1893
5a01f2e8
VP
1894 unlock_policy_rwsem_write(cpu);
1895
f1829e4a 1896fail:
1da177e4 1897 cpufreq_cpu_put(data);
f1829e4a 1898no_policy:
1da177e4
LT
1899 return ret;
1900}
1901EXPORT_SYMBOL(cpufreq_update_policy);
1902
dd184a01 1903static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1904 unsigned long action, void *hcpu)
1905{
1906 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1907 struct device *dev;
c32b6b8e 1908
8a25a2fd
KS
1909 dev = get_cpu_device(cpu);
1910 if (dev) {
c32b6b8e
AR
1911 switch (action) {
1912 case CPU_ONLINE:
8a25a2fd 1913 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1914 break;
1915 case CPU_DOWN_PREPARE:
a66b2e50 1916 case CPU_UP_CANCELED_FROZEN:
8a25a2fd 1917 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1918 break;
5a01f2e8 1919 case CPU_DOWN_FAILED:
8a25a2fd 1920 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1921 break;
1922 }
1923 }
1924 return NOTIFY_OK;
1925}
1926
9c36f746 1927static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1928 .notifier_call = cpufreq_cpu_callback,
1929};
1da177e4
LT
1930
1931/*********************************************************************
1932 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1933 *********************************************************************/
1934
1935/**
1936 * cpufreq_register_driver - register a CPU Frequency driver
1937 * @driver_data: A struct cpufreq_driver containing the values#
1938 * submitted by the CPU Frequency driver.
1939 *
32ee8c3e 1940 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1941 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1942 * (and isn't unregistered in the meantime).
1da177e4
LT
1943 *
1944 */
221dee28 1945int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1946{
1947 unsigned long flags;
1948 int ret;
1949
a7b422cd
KRW
1950 if (cpufreq_disabled())
1951 return -ENODEV;
1952
1da177e4
LT
1953 if (!driver_data || !driver_data->verify || !driver_data->init ||
1954 ((!driver_data->setpolicy) && (!driver_data->target)))
1955 return -EINVAL;
1956
2d06d8c4 1957 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1958
1959 if (driver_data->setpolicy)
1960 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1961
0d1857a1 1962 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1963 if (cpufreq_driver) {
0d1857a1 1964 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1965 return -EBUSY;
1966 }
1c3d85dd 1967 cpufreq_driver = driver_data;
0d1857a1 1968 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1969
8a25a2fd 1970 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1971 if (ret)
1972 goto err_null_driver;
1da177e4 1973
1c3d85dd 1974 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1975 int i;
1976 ret = -ENODEV;
1977
1978 /* check for at least one working CPU */
7a6aedfa
MT
1979 for (i = 0; i < nr_cpu_ids; i++)
1980 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1981 ret = 0;
7a6aedfa
MT
1982 break;
1983 }
1da177e4
LT
1984
1985 /* if all ->init() calls failed, unregister */
1986 if (ret) {
2d06d8c4 1987 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1988 driver_data->name);
8a25a2fd 1989 goto err_if_unreg;
1da177e4
LT
1990 }
1991 }
1992
8f5bc2ab 1993 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1994 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1995
8f5bc2ab 1996 return 0;
8a25a2fd
KS
1997err_if_unreg:
1998 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 1999err_null_driver:
0d1857a1 2000 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2001 cpufreq_driver = NULL;
0d1857a1 2002 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2003 return ret;
1da177e4
LT
2004}
2005EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2006
2007
2008/**
2009 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2010 *
32ee8c3e 2011 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2012 * the right to do so, i.e. if you have succeeded in initialising before!
2013 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2014 * currently not initialised.
2015 */
221dee28 2016int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2017{
2018 unsigned long flags;
2019
1c3d85dd 2020 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2021 return -EINVAL;
1da177e4 2022
2d06d8c4 2023 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2024
8a25a2fd 2025 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2026 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2027
0d1857a1 2028 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2029 cpufreq_driver = NULL;
0d1857a1 2030 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2031
2032 return 0;
2033}
2034EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2035
2036static int __init cpufreq_core_init(void)
2037{
2038 int cpu;
2039
a7b422cd
KRW
2040 if (cpufreq_disabled())
2041 return -ENODEV;
2042
5a01f2e8 2043 for_each_possible_cpu(cpu) {
f1625066 2044 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
2045 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2046 }
8aa84ad8 2047
2361be23 2048 cpufreq_global_kobject = kobject_create();
8aa84ad8 2049 BUG_ON(!cpufreq_global_kobject);
e00e56df 2050 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2051
5a01f2e8
VP
2052 return 0;
2053}
5a01f2e8 2054core_initcall(cpufreq_core_init);
This page took 0.766836 seconds and 5 git commands to generate.