cpufreq / intel_pstate: Optimize intel_pstate_set_policy
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
1da177e4
LT
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/notifier.h>
24#include <linux/cpufreq.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/completion.h>
3fc54d37 32#include <linux/mutex.h>
e00e56df 33#include <linux/syscore_ops.h>
1da177e4 34
6f4f2723
TR
35#include <trace/events/power.h>
36
1da177e4 37/**
cd878479 38 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
41 */
5800043b 42static struct cpufreq_driver __rcu *cpufreq_driver;
7a6aedfa 43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 47#endif
0d1857a1 48static DEFINE_RWLOCK(cpufreq_driver_lock);
1da177e4 49
5a01f2e8
VP
50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
5a01f2e8
VP
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 66 */
f1625066 67static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 71static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 72{ \
f1625066 73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
76 \
77 return 0; \
78}
79
80lock_policy_rwsem(read, cpu);
5a01f2e8 81lock_policy_rwsem(write, cpu);
5a01f2e8 82
fa1d8af4
VK
83#define unlock_policy_rwsem(mode, cpu) \
84static void unlock_policy_rwsem_##mode(int cpu) \
85{ \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 89}
5a01f2e8 90
fa1d8af4
VK
91unlock_policy_rwsem(read, cpu);
92unlock_policy_rwsem(write, cpu);
5a01f2e8 93
1da177e4 94/* internal prototypes */
29464f28
DJ
95static int __cpufreq_governor(struct cpufreq_policy *policy,
96 unsigned int event);
5a01f2e8 97static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 98static void handle_update(struct work_struct *work);
1da177e4
LT
99
100/**
32ee8c3e
DJ
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
1da177e4
LT
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
106 */
e041c683 107static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 108static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 109
74212ca4 110static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
111static int __init init_cpufreq_transition_notifier_list(void)
112{
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 114 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
115 return 0;
116}
b3438f82 117pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 118
a7b422cd 119static int off __read_mostly;
da584455 120static int cpufreq_disabled(void)
a7b422cd
KRW
121{
122 return off;
123}
124void disable_cpufreq(void)
125{
126 off = 1;
127}
1da177e4 128static LIST_HEAD(cpufreq_governor_list);
29464f28 129static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 130
4d5dcc42
VK
131bool have_governor_per_policy(void)
132{
5800043b
NZ
133 bool have_governor_per_policy;
134 rcu_read_lock();
135 have_governor_per_policy =
136 rcu_dereference(cpufreq_driver)->have_governor_per_policy;
137 rcu_read_unlock();
138 return have_governor_per_policy;
4d5dcc42
VK
139}
140
a9144436 141static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
142{
143 struct cpufreq_policy *data;
5800043b 144 struct cpufreq_driver *driver;
1da177e4
LT
145 unsigned long flags;
146
7a6aedfa 147 if (cpu >= nr_cpu_ids)
1da177e4
LT
148 goto err_out;
149
150 /* get the cpufreq driver */
5800043b
NZ
151 rcu_read_lock();
152 driver = rcu_dereference(cpufreq_driver);
1da177e4 153
5800043b 154 if (!driver)
1da177e4
LT
155 goto err_out_unlock;
156
5800043b 157 if (!try_module_get(driver->owner))
1da177e4
LT
158 goto err_out_unlock;
159
5800043b 160 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4
LT
161
162 /* get the CPU */
7a6aedfa 163 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
164
165 if (!data)
166 goto err_out_put_module;
167
a9144436 168 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
169 goto err_out_put_module;
170
0d1857a1 171 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
5800043b 172 rcu_read_unlock();
1da177e4
LT
173 return data;
174
7d5e350f 175err_out_put_module:
5800043b 176 module_put(driver->owner);
0d1857a1 177 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
5800043b
NZ
178err_out_unlock:
179 rcu_read_unlock();
7d5e350f 180err_out:
1da177e4
LT
181 return NULL;
182}
a9144436
SB
183
184struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
185{
d5aaffa9
DB
186 if (cpufreq_disabled())
187 return NULL;
188
a9144436
SB
189 return __cpufreq_cpu_get(cpu, false);
190}
1da177e4
LT
191EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
192
a9144436
SB
193static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
194{
195 return __cpufreq_cpu_get(cpu, true);
196}
197
198static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
199{
200 if (!sysfs)
201 kobject_put(&data->kobj);
5800043b
NZ
202 rcu_read_lock();
203 module_put(rcu_dereference(cpufreq_driver)->owner);
204 rcu_read_unlock();
a9144436 205}
7d5e350f 206
1da177e4
LT
207void cpufreq_cpu_put(struct cpufreq_policy *data)
208{
d5aaffa9
DB
209 if (cpufreq_disabled())
210 return;
211
a9144436 212 __cpufreq_cpu_put(data, false);
1da177e4
LT
213}
214EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
215
a9144436
SB
216static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
217{
218 __cpufreq_cpu_put(data, true);
219}
1da177e4 220
1da177e4
LT
221/*********************************************************************
222 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
223 *********************************************************************/
224
225/**
226 * adjust_jiffies - adjust the system "loops_per_jiffy"
227 *
228 * This function alters the system "loops_per_jiffy" for the clock
229 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 230 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
231 * per-CPU loops_per_jiffy value wherever possible.
232 */
233#ifndef CONFIG_SMP
234static unsigned long l_p_j_ref;
235static unsigned int l_p_j_ref_freq;
236
858119e1 237static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
238{
239 if (ci->flags & CPUFREQ_CONST_LOOPS)
240 return;
241
242 if (!l_p_j_ref_freq) {
243 l_p_j_ref = loops_per_jiffy;
244 l_p_j_ref_freq = ci->old;
2d06d8c4 245 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 246 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 247 }
d08de0c1 248 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 249 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
250 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
251 ci->new);
2d06d8c4 252 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 253 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
254 }
255}
256#else
e08f5f5b
GS
257static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
258{
259 return;
260}
1da177e4
LT
261#endif
262
263
b43a7ffb
VK
264void __cpufreq_notify_transition(struct cpufreq_policy *policy,
265 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
266{
267 BUG_ON(irqs_disabled());
268
d5aaffa9
DB
269 if (cpufreq_disabled())
270 return;
271
5800043b
NZ
272 rcu_read_lock();
273 freqs->flags = rcu_dereference(cpufreq_driver)->flags;
274 rcu_read_unlock();
2d06d8c4 275 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 276 state, freqs->new);
1da177e4 277
1da177e4 278 switch (state) {
e4472cb3 279
1da177e4 280 case CPUFREQ_PRECHANGE:
32ee8c3e 281 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
282 * which is not equal to what the cpufreq core thinks is
283 * "old frequency".
1da177e4 284 */
5800043b 285 if (!(freqs->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
286 if ((policy) && (policy->cpu == freqs->cpu) &&
287 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 288 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
289 " %u, cpufreq assumed %u kHz.\n",
290 freqs->old, policy->cur);
291 freqs->old = policy->cur;
1da177e4
LT
292 }
293 }
b4dfdbb3 294 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 295 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
296 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
297 break;
e4472cb3 298
1da177e4
LT
299 case CPUFREQ_POSTCHANGE:
300 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 301 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 302 (unsigned long)freqs->cpu);
25e41933 303 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 304 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 305 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
306 if (likely(policy) && likely(policy->cpu == freqs->cpu))
307 policy->cur = freqs->new;
1da177e4
LT
308 break;
309 }
1da177e4 310}
b43a7ffb
VK
311/**
312 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
313 * on frequency transition.
314 *
315 * This function calls the transition notifiers and the "adjust_jiffies"
316 * function. It is called twice on all CPU frequency changes that have
317 * external effects.
318 */
319void cpufreq_notify_transition(struct cpufreq_policy *policy,
320 struct cpufreq_freqs *freqs, unsigned int state)
321{
322 for_each_cpu(freqs->cpu, policy->cpus)
323 __cpufreq_notify_transition(policy, freqs, state);
324}
1da177e4
LT
325EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
326
327
328
329/*********************************************************************
330 * SYSFS INTERFACE *
331 *********************************************************************/
332
3bcb09a3
JF
333static struct cpufreq_governor *__find_governor(const char *str_governor)
334{
335 struct cpufreq_governor *t;
336
337 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 338 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
339 return t;
340
341 return NULL;
342}
343
1da177e4
LT
344/**
345 * cpufreq_parse_governor - parse a governor string
346 */
905d77cd 347static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
348 struct cpufreq_governor **governor)
349{
3bcb09a3 350 int err = -EINVAL;
5800043b
NZ
351 struct cpufreq_driver *driver;
352 bool has_setpolicy;
353 bool has_target;
354
355 rcu_read_lock();
356 driver = rcu_dereference(cpufreq_driver);
357 if (!driver) {
358 rcu_read_unlock();
3bcb09a3 359 goto out;
5800043b
NZ
360 }
361 has_setpolicy = driver->setpolicy ? true : false;
362 has_target = driver->target ? true : false;
363 rcu_read_unlock();
3bcb09a3 364
5800043b 365 if (has_setpolicy) {
1da177e4
LT
366 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
367 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 368 err = 0;
e08f5f5b
GS
369 } else if (!strnicmp(str_governor, "powersave",
370 CPUFREQ_NAME_LEN)) {
1da177e4 371 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 372 err = 0;
1da177e4 373 }
5800043b 374 } else if (has_target) {
1da177e4 375 struct cpufreq_governor *t;
3bcb09a3 376
3fc54d37 377 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
378
379 t = __find_governor(str_governor);
380
ea714970 381 if (t == NULL) {
1a8e1463 382 int ret;
ea714970 383
1a8e1463
KC
384 mutex_unlock(&cpufreq_governor_mutex);
385 ret = request_module("cpufreq_%s", str_governor);
386 mutex_lock(&cpufreq_governor_mutex);
ea714970 387
1a8e1463
KC
388 if (ret == 0)
389 t = __find_governor(str_governor);
ea714970
JF
390 }
391
3bcb09a3
JF
392 if (t != NULL) {
393 *governor = t;
394 err = 0;
1da177e4 395 }
3bcb09a3 396
3fc54d37 397 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 398 }
29464f28 399out:
3bcb09a3 400 return err;
1da177e4 401}
1da177e4
LT
402
403
1da177e4 404/**
e08f5f5b
GS
405 * cpufreq_per_cpu_attr_read() / show_##file_name() -
406 * print out cpufreq information
1da177e4
LT
407 *
408 * Write out information from cpufreq_driver->policy[cpu]; object must be
409 * "unsigned int".
410 */
411
32ee8c3e
DJ
412#define show_one(file_name, object) \
413static ssize_t show_##file_name \
905d77cd 414(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 415{ \
29464f28 416 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
417}
418
419show_one(cpuinfo_min_freq, cpuinfo.min_freq);
420show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 421show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
422show_one(scaling_min_freq, min);
423show_one(scaling_max_freq, max);
424show_one(scaling_cur_freq, cur);
425
e08f5f5b
GS
426static int __cpufreq_set_policy(struct cpufreq_policy *data,
427 struct cpufreq_policy *policy);
7970e08b 428
1da177e4
LT
429/**
430 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
431 */
432#define store_one(file_name, object) \
433static ssize_t store_##file_name \
905d77cd 434(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 435{ \
f55c9c26 436 unsigned int ret; \
1da177e4
LT
437 struct cpufreq_policy new_policy; \
438 \
439 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
440 if (ret) \
441 return -EINVAL; \
442 \
29464f28 443 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
444 if (ret != 1) \
445 return -EINVAL; \
446 \
7970e08b
TR
447 ret = __cpufreq_set_policy(policy, &new_policy); \
448 policy->user_policy.object = policy->object; \
1da177e4
LT
449 \
450 return ret ? ret : count; \
451}
452
29464f28
DJ
453store_one(scaling_min_freq, min);
454store_one(scaling_max_freq, max);
1da177e4
LT
455
456/**
457 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
458 */
905d77cd
DJ
459static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
460 char *buf)
1da177e4 461{
5a01f2e8 462 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
463 if (!cur_freq)
464 return sprintf(buf, "<unknown>");
465 return sprintf(buf, "%u\n", cur_freq);
466}
467
468
469/**
470 * show_scaling_governor - show the current policy for the specified CPU
471 */
905d77cd 472static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 473{
29464f28 474 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
475 return sprintf(buf, "powersave\n");
476 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
477 return sprintf(buf, "performance\n");
478 else if (policy->governor)
4b972f0b 479 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 480 policy->governor->name);
1da177e4
LT
481 return -EINVAL;
482}
483
484
485/**
486 * store_scaling_governor - store policy for the specified CPU
487 */
905d77cd
DJ
488static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
489 const char *buf, size_t count)
1da177e4 490{
f55c9c26 491 unsigned int ret;
1da177e4
LT
492 char str_governor[16];
493 struct cpufreq_policy new_policy;
494
495 ret = cpufreq_get_policy(&new_policy, policy->cpu);
496 if (ret)
497 return ret;
498
29464f28 499 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
500 if (ret != 1)
501 return -EINVAL;
502
e08f5f5b
GS
503 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
504 &new_policy.governor))
1da177e4
LT
505 return -EINVAL;
506
7970e08b
TR
507 /* Do not use cpufreq_set_policy here or the user_policy.max
508 will be wrongly overridden */
7970e08b
TR
509 ret = __cpufreq_set_policy(policy, &new_policy);
510
511 policy->user_policy.policy = policy->policy;
512 policy->user_policy.governor = policy->governor;
7970e08b 513
e08f5f5b
GS
514 if (ret)
515 return ret;
516 else
517 return count;
1da177e4
LT
518}
519
520/**
521 * show_scaling_driver - show the cpufreq driver currently loaded
522 */
905d77cd 523static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 524{
5800043b
NZ
525 ssize_t size;
526 rcu_read_lock();
527 size = scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
528 rcu_dereference(cpufreq_driver)->name);
529 rcu_read_unlock();
530 return size;
1da177e4
LT
531}
532
533/**
534 * show_scaling_available_governors - show the available CPUfreq governors
535 */
905d77cd
DJ
536static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
537 char *buf)
1da177e4
LT
538{
539 ssize_t i = 0;
540 struct cpufreq_governor *t;
541
5800043b
NZ
542 rcu_read_lock();
543 if (!rcu_dereference(cpufreq_driver)->target) {
544 rcu_read_unlock();
1da177e4
LT
545 i += sprintf(buf, "performance powersave");
546 goto out;
547 }
5800043b 548 rcu_read_unlock();
1da177e4
LT
549
550 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
551 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
552 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 553 goto out;
4b972f0b 554 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 555 }
7d5e350f 556out:
1da177e4
LT
557 i += sprintf(&buf[i], "\n");
558 return i;
559}
e8628dd0 560
835481d9 561static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
562{
563 ssize_t i = 0;
564 unsigned int cpu;
565
835481d9 566 for_each_cpu(cpu, mask) {
1da177e4
LT
567 if (i)
568 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
569 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
570 if (i >= (PAGE_SIZE - 5))
29464f28 571 break;
1da177e4
LT
572 }
573 i += sprintf(&buf[i], "\n");
574 return i;
575}
576
e8628dd0
DW
577/**
578 * show_related_cpus - show the CPUs affected by each transition even if
579 * hw coordination is in use
580 */
581static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
582{
e8628dd0
DW
583 return show_cpus(policy->related_cpus, buf);
584}
585
586/**
587 * show_affected_cpus - show the CPUs affected by each transition
588 */
589static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
590{
591 return show_cpus(policy->cpus, buf);
592}
593
9e76988e 594static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 595 const char *buf, size_t count)
9e76988e
VP
596{
597 unsigned int freq = 0;
598 unsigned int ret;
599
879000f9 600 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
601 return -EINVAL;
602
603 ret = sscanf(buf, "%u", &freq);
604 if (ret != 1)
605 return -EINVAL;
606
607 policy->governor->store_setspeed(policy, freq);
608
609 return count;
610}
611
612static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
613{
879000f9 614 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
615 return sprintf(buf, "<unsupported>\n");
616
617 return policy->governor->show_setspeed(policy, buf);
618}
1da177e4 619
e2f74f35 620/**
8bf1ac72 621 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
622 */
623static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
624{
625 unsigned int limit;
5800043b 626 int (*bios_limit)(int cpu, unsigned int *limit);
e2f74f35 627 int ret;
5800043b
NZ
628
629 rcu_read_lock();
630 bios_limit = rcu_dereference(cpufreq_driver)->bios_limit;
631 rcu_read_unlock();
632
633 if (bios_limit) {
634 ret = bios_limit(policy->cpu, &limit);
e2f74f35
TR
635 if (!ret)
636 return sprintf(buf, "%u\n", limit);
637 }
638 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
639}
640
6dad2a29
BP
641cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
642cpufreq_freq_attr_ro(cpuinfo_min_freq);
643cpufreq_freq_attr_ro(cpuinfo_max_freq);
644cpufreq_freq_attr_ro(cpuinfo_transition_latency);
645cpufreq_freq_attr_ro(scaling_available_governors);
646cpufreq_freq_attr_ro(scaling_driver);
647cpufreq_freq_attr_ro(scaling_cur_freq);
648cpufreq_freq_attr_ro(bios_limit);
649cpufreq_freq_attr_ro(related_cpus);
650cpufreq_freq_attr_ro(affected_cpus);
651cpufreq_freq_attr_rw(scaling_min_freq);
652cpufreq_freq_attr_rw(scaling_max_freq);
653cpufreq_freq_attr_rw(scaling_governor);
654cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 655
905d77cd 656static struct attribute *default_attrs[] = {
1da177e4
LT
657 &cpuinfo_min_freq.attr,
658 &cpuinfo_max_freq.attr,
ed129784 659 &cpuinfo_transition_latency.attr,
1da177e4
LT
660 &scaling_min_freq.attr,
661 &scaling_max_freq.attr,
662 &affected_cpus.attr,
e8628dd0 663 &related_cpus.attr,
1da177e4
LT
664 &scaling_governor.attr,
665 &scaling_driver.attr,
666 &scaling_available_governors.attr,
9e76988e 667 &scaling_setspeed.attr,
1da177e4
LT
668 NULL
669};
670
8aa84ad8
TR
671struct kobject *cpufreq_global_kobject;
672EXPORT_SYMBOL(cpufreq_global_kobject);
673
29464f28
DJ
674#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
675#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 676
29464f28 677static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 678{
905d77cd
DJ
679 struct cpufreq_policy *policy = to_policy(kobj);
680 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 681 ssize_t ret = -EINVAL;
a9144436 682 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 683 if (!policy)
0db4a8a9 684 goto no_policy;
5a01f2e8
VP
685
686 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 687 goto fail;
5a01f2e8 688
e08f5f5b
GS
689 if (fattr->show)
690 ret = fattr->show(policy, buf);
691 else
692 ret = -EIO;
693
5a01f2e8 694 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 695fail:
a9144436 696 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 697no_policy:
1da177e4
LT
698 return ret;
699}
700
905d77cd
DJ
701static ssize_t store(struct kobject *kobj, struct attribute *attr,
702 const char *buf, size_t count)
1da177e4 703{
905d77cd
DJ
704 struct cpufreq_policy *policy = to_policy(kobj);
705 struct freq_attr *fattr = to_attr(attr);
a07530b4 706 ssize_t ret = -EINVAL;
a9144436 707 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 708 if (!policy)
a07530b4 709 goto no_policy;
5a01f2e8
VP
710
711 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 712 goto fail;
5a01f2e8 713
e08f5f5b
GS
714 if (fattr->store)
715 ret = fattr->store(policy, buf, count);
716 else
717 ret = -EIO;
718
5a01f2e8 719 unlock_policy_rwsem_write(policy->cpu);
a07530b4 720fail:
a9144436 721 cpufreq_cpu_put_sysfs(policy);
a07530b4 722no_policy:
1da177e4
LT
723 return ret;
724}
725
905d77cd 726static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 727{
905d77cd 728 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 729 pr_debug("last reference is dropped\n");
1da177e4
LT
730 complete(&policy->kobj_unregister);
731}
732
52cf25d0 733static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
734 .show = show,
735 .store = store,
736};
737
738static struct kobj_type ktype_cpufreq = {
739 .sysfs_ops = &sysfs_ops,
740 .default_attrs = default_attrs,
741 .release = cpufreq_sysfs_release,
742};
743
19d6f7ec 744/* symlink affected CPUs */
cf3289d0
AC
745static int cpufreq_add_dev_symlink(unsigned int cpu,
746 struct cpufreq_policy *policy)
19d6f7ec
DJ
747{
748 unsigned int j;
749 int ret = 0;
750
751 for_each_cpu(j, policy->cpus) {
752 struct cpufreq_policy *managed_policy;
8a25a2fd 753 struct device *cpu_dev;
19d6f7ec
DJ
754
755 if (j == cpu)
756 continue;
19d6f7ec 757
2d06d8c4 758 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 759 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
760 cpu_dev = get_cpu_device(j);
761 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
762 "cpufreq");
763 if (ret) {
764 cpufreq_cpu_put(managed_policy);
765 return ret;
766 }
767 }
768 return ret;
769}
770
cf3289d0
AC
771static int cpufreq_add_dev_interface(unsigned int cpu,
772 struct cpufreq_policy *policy,
8a25a2fd 773 struct device *dev)
909a694e 774{
ecf7e461 775 struct cpufreq_policy new_policy;
909a694e 776 struct freq_attr **drv_attr;
5800043b 777 struct cpufreq_driver *driver;
909a694e
DJ
778 unsigned long flags;
779 int ret = 0;
780 unsigned int j;
781
782 /* prepare interface data */
783 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 784 &dev->kobj, "cpufreq");
909a694e
DJ
785 if (ret)
786 return ret;
787
788 /* set up files for this cpu device */
5800043b
NZ
789 rcu_read_lock();
790 driver = rcu_dereference(cpufreq_driver);
791 drv_attr = driver->attr;
909a694e
DJ
792 while ((drv_attr) && (*drv_attr)) {
793 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
794 if (ret)
5800043b 795 goto err_out_unlock;
909a694e
DJ
796 drv_attr++;
797 }
5800043b 798 if (driver->get) {
909a694e
DJ
799 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
800 if (ret)
5800043b 801 goto err_out_unlock;
909a694e 802 }
5800043b 803 if (driver->target) {
909a694e
DJ
804 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
805 if (ret)
5800043b 806 goto err_out_unlock;
909a694e 807 }
5800043b 808 if (driver->bios_limit) {
e2f74f35
TR
809 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
810 if (ret)
5800043b 811 goto err_out_unlock;
e2f74f35 812 }
5800043b 813 rcu_read_unlock();
909a694e 814
0d1857a1 815 write_lock_irqsave(&cpufreq_driver_lock, flags);
909a694e 816 for_each_cpu(j, policy->cpus) {
909a694e 817 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 818 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e 819 }
0d1857a1 820 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
909a694e
DJ
821
822 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
823 if (ret)
824 goto err_out_kobj_put;
825
826 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
827 /* assure that the starting sequence is run in __cpufreq_set_policy */
828 policy->governor = NULL;
829
830 /* set default policy */
831 ret = __cpufreq_set_policy(policy, &new_policy);
832 policy->user_policy.policy = policy->policy;
833 policy->user_policy.governor = policy->governor;
834
835 if (ret) {
5800043b
NZ
836 int (*exit)(struct cpufreq_policy *policy);
837
2d06d8c4 838 pr_debug("setting policy failed\n");
5800043b
NZ
839 rcu_read_lock();
840 exit = rcu_dereference(cpufreq_driver)->exit;
841 rcu_read_unlock();
842 if (exit)
843 exit(policy);
844
ecf7e461 845 }
909a694e
DJ
846 return ret;
847
5800043b
NZ
848err_out_unlock:
849 rcu_read_unlock();
909a694e
DJ
850err_out_kobj_put:
851 kobject_put(&policy->kobj);
852 wait_for_completion(&policy->kobj_unregister);
853 return ret;
854}
855
fcf80582
VK
856#ifdef CONFIG_HOTPLUG_CPU
857static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
858 struct device *dev)
859{
860 struct cpufreq_policy *policy;
861 int ret = 0;
862 unsigned long flags;
863
864 policy = cpufreq_cpu_get(sibling);
865 WARN_ON(!policy);
866
fcf80582
VK
867 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
868
2eaa3e2d
VK
869 lock_policy_rwsem_write(sibling);
870
0d1857a1 871 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 872
fcf80582 873 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 874 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 875 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 876 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 877
2eaa3e2d
VK
878 unlock_policy_rwsem_write(sibling);
879
fcf80582
VK
880 __cpufreq_governor(policy, CPUFREQ_GOV_START);
881 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
882
fcf80582
VK
883 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
884 if (ret) {
885 cpufreq_cpu_put(policy);
886 return ret;
887 }
888
889 return 0;
890}
891#endif
1da177e4
LT
892
893/**
894 * cpufreq_add_dev - add a CPU device
895 *
32ee8c3e 896 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
897 *
898 * The Oracle says: try running cpufreq registration/unregistration concurrently
899 * with with cpu hotplugging and all hell will break loose. Tried to clean this
900 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 901 */
8a25a2fd 902static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 903{
fcf80582 904 unsigned int j, cpu = dev->id;
65922465 905 int ret = -ENOMEM;
1da177e4 906 struct cpufreq_policy *policy;
5800043b
NZ
907 struct cpufreq_driver *driver;
908 int (*init)(struct cpufreq_policy *policy);
1da177e4 909 unsigned long flags;
90e41bac 910#ifdef CONFIG_HOTPLUG_CPU
fcf80582 911 struct cpufreq_governor *gov;
90e41bac
PB
912 int sibling;
913#endif
1da177e4 914
c32b6b8e
AR
915 if (cpu_is_offline(cpu))
916 return 0;
917
2d06d8c4 918 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
919
920#ifdef CONFIG_SMP
921 /* check whether a different CPU already registered this
922 * CPU because it is in the same boat. */
923 policy = cpufreq_cpu_get(cpu);
924 if (unlikely(policy)) {
8ff69732 925 cpufreq_cpu_put(policy);
1da177e4
LT
926 return 0;
927 }
fcf80582
VK
928
929#ifdef CONFIG_HOTPLUG_CPU
930 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 931 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
932 for_each_online_cpu(sibling) {
933 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 934 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 935 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 936 return cpufreq_add_policy_cpu(cpu, sibling, dev);
2eaa3e2d 937 }
fcf80582 938 }
0d1857a1 939 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 940#endif
1da177e4
LT
941#endif
942
5800043b
NZ
943 rcu_read_lock();
944 driver = rcu_dereference(cpufreq_driver);
945 if (!try_module_get(driver->owner)) {
946 rcu_read_unlock();
1da177e4
LT
947 ret = -EINVAL;
948 goto module_out;
949 }
5800043b
NZ
950 init = driver->init;
951 rcu_read_unlock();
1da177e4 952
e98df50c 953 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 954 if (!policy)
1da177e4 955 goto nomem_out;
059019a3
DJ
956
957 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 958 goto err_free_policy;
059019a3
DJ
959
960 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 961 goto err_free_cpumask;
1da177e4
LT
962
963 policy->cpu = cpu;
65922465 964 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 965 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 966
5a01f2e8 967 /* Initially set CPU itself as the policy_cpu */
f1625066 968 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 969
1da177e4 970 init_completion(&policy->kobj_unregister);
65f27f38 971 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
972
973 /* call driver. From then on the cpufreq must be able
974 * to accept all calls to ->verify and ->setpolicy for this CPU
975 */
5800043b 976 ret = init(policy);
1da177e4 977 if (ret) {
2d06d8c4 978 pr_debug("initialization failed\n");
2eaa3e2d 979 goto err_set_policy_cpu;
1da177e4 980 }
643ae6e8 981
fcf80582
VK
982 /* related cpus should atleast have policy->cpus */
983 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
984
643ae6e8
VK
985 /*
986 * affected cpus must always be the one, which are online. We aren't
987 * managing offline cpus here.
988 */
989 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
990
187d9f4e
MC
991 policy->user_policy.min = policy->min;
992 policy->user_policy.max = policy->max;
1da177e4 993
a1531acd
TR
994 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
995 CPUFREQ_START, policy);
996
fcf80582
VK
997#ifdef CONFIG_HOTPLUG_CPU
998 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
999 if (gov) {
1000 policy->governor = gov;
1001 pr_debug("Restoring governor %s for cpu %d\n",
1002 policy->governor->name, cpu);
4bfa042c 1003 }
fcf80582 1004#endif
1da177e4 1005
8a25a2fd 1006 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
1007 if (ret)
1008 goto err_out_unregister;
8ff69732 1009
038c5b3e 1010 kobject_uevent(&policy->kobj, KOBJ_ADD);
5800043b
NZ
1011 rcu_read_lock();
1012 module_put(rcu_dereference(cpufreq_driver)->owner);
1013 rcu_read_unlock();
2d06d8c4 1014 pr_debug("initialization complete\n");
87c32271 1015
1da177e4
LT
1016 return 0;
1017
1da177e4 1018err_out_unregister:
0d1857a1 1019 write_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 1020 for_each_cpu(j, policy->cpus)
7a6aedfa 1021 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1022 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1023
c10997f6 1024 kobject_put(&policy->kobj);
1da177e4
LT
1025 wait_for_completion(&policy->kobj_unregister);
1026
2eaa3e2d
VK
1027err_set_policy_cpu:
1028 per_cpu(cpufreq_policy_cpu, cpu) = -1;
cad70a6a 1029 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
1030err_free_cpumask:
1031 free_cpumask_var(policy->cpus);
1032err_free_policy:
1da177e4 1033 kfree(policy);
1da177e4 1034nomem_out:
5800043b
NZ
1035 rcu_read_lock();
1036 module_put(rcu_dereference(cpufreq_driver)->owner);
1037 rcu_read_unlock();
c32b6b8e 1038module_out:
1da177e4
LT
1039 return ret;
1040}
1041
b8eed8af
VK
1042static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1043{
1044 int j;
1045
1046 policy->last_cpu = policy->cpu;
1047 policy->cpu = cpu;
1048
3361b7b1 1049 for_each_cpu(j, policy->cpus)
b8eed8af 1050 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
1051
1052#ifdef CONFIG_CPU_FREQ_TABLE
1053 cpufreq_frequency_table_update_policy_cpu(policy);
1054#endif
1055 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1056 CPUFREQ_UPDATE_POLICY_CPU, policy);
1057}
1da177e4
LT
1058
1059/**
5a01f2e8 1060 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1061 *
1062 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1063 * Caller should already have policy_rwsem in write mode for this CPU.
1064 * This routine frees the rwsem before returning.
1da177e4 1065 */
8a25a2fd 1066static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1067{
b8eed8af 1068 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1069 unsigned long flags;
1070 struct cpufreq_policy *data;
5800043b 1071 struct cpufreq_driver *driver;
499bca9b
AW
1072 struct kobject *kobj;
1073 struct completion *cmp;
8a25a2fd 1074 struct device *cpu_dev;
5800043b
NZ
1075 bool has_target;
1076 int (*exit)(struct cpufreq_policy *policy);
1da177e4 1077
b8eed8af 1078 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1079
0d1857a1 1080 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1081
7a6aedfa 1082 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1083 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1084
0d1857a1 1085 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1086
1087 if (!data) {
b8eed8af 1088 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1089 return -EINVAL;
1090 }
1da177e4 1091
5800043b
NZ
1092 rcu_read_lock();
1093 driver = rcu_dereference(cpufreq_driver);
1094 has_target = driver->target ? true : false;
1095 exit = driver->exit;
1096 if (has_target)
f6a7409c 1097 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1da177e4 1098
084f3493 1099#ifdef CONFIG_HOTPLUG_CPU
5800043b 1100 if (!driver->setpolicy)
fa69e33f
DB
1101 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1102 data->governor->name, CPUFREQ_NAME_LEN);
1da177e4 1103#endif
5800043b 1104 rcu_read_unlock();
1da177e4 1105
2eaa3e2d 1106 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af
VK
1107 cpus = cpumask_weight(data->cpus);
1108 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1109 unlock_policy_rwsem_write(cpu);
084f3493 1110
73bf0fc2
VK
1111 if (cpu != data->cpu) {
1112 sysfs_remove_link(&dev->kobj, "cpufreq");
1113 } else if (cpus > 1) {
b8eed8af
VK
1114 /* first sibling now owns the new sysfs dir */
1115 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1116 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1117 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1118 if (ret) {
1119 pr_err("%s: Failed to move kobj: %d", __func__, ret);
084f3493 1120
2eaa3e2d 1121 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1122 cpumask_set_cpu(cpu, data->cpus);
1da177e4 1123
0d1857a1 1124 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1125 per_cpu(cpufreq_cpu_data, cpu) = data;
0d1857a1 1126 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1127
499bca9b 1128 unlock_policy_rwsem_write(cpu);
1da177e4 1129
2eaa3e2d
VK
1130 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1131 "cpufreq");
b8eed8af 1132 return -EINVAL;
1da177e4 1133 }
5a01f2e8 1134
2eaa3e2d 1135 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1136 update_policy_cpu(data, cpu_dev->id);
2eaa3e2d 1137 unlock_policy_rwsem_write(cpu);
b8eed8af
VK
1138 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1139 __func__, cpu_dev->id, cpu);
1da177e4 1140 }
1da177e4 1141
b8eed8af
VK
1142 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1143 cpufreq_cpu_put(data);
1da177e4 1144
b8eed8af
VK
1145 /* If cpu is last user of policy, free policy */
1146 if (cpus == 1) {
7bd353a9
VK
1147 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1148
2eaa3e2d 1149 lock_policy_rwsem_read(cpu);
b8eed8af
VK
1150 kobj = &data->kobj;
1151 cmp = &data->kobj_unregister;
2eaa3e2d 1152 unlock_policy_rwsem_read(cpu);
b8eed8af 1153 kobject_put(kobj);
7d26e2d5 1154
b8eed8af
VK
1155 /* we need to make sure that the underlying kobj is actually
1156 * not referenced anymore by anybody before we proceed with
1157 * unloading.
1158 */
1159 pr_debug("waiting for dropping of refcount\n");
1160 wait_for_completion(cmp);
1161 pr_debug("wait complete\n");
7d26e2d5 1162
5800043b
NZ
1163 if (exit)
1164 exit(data);
27ecddc2 1165
b8eed8af
VK
1166 free_cpumask_var(data->related_cpus);
1167 free_cpumask_var(data->cpus);
1168 kfree(data);
5800043b 1169 } else if (has_target) {
b8eed8af
VK
1170 __cpufreq_governor(data, CPUFREQ_GOV_START);
1171 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
27ecddc2 1172 }
1da177e4 1173
2eaa3e2d 1174 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1175 return 0;
1176}
1177
1178
8a25a2fd 1179static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1180{
8a25a2fd 1181 unsigned int cpu = dev->id;
5a01f2e8 1182 int retval;
ec28297a
VP
1183
1184 if (cpu_is_offline(cpu))
1185 return 0;
1186
8a25a2fd 1187 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1188 return retval;
1189}
1190
1191
65f27f38 1192static void handle_update(struct work_struct *work)
1da177e4 1193{
65f27f38
DH
1194 struct cpufreq_policy *policy =
1195 container_of(work, struct cpufreq_policy, update);
1196 unsigned int cpu = policy->cpu;
2d06d8c4 1197 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1198 cpufreq_update_policy(cpu);
1199}
1200
1201/**
1202 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1203 * @cpu: cpu number
1204 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1205 * @new_freq: CPU frequency the CPU actually runs at
1206 *
29464f28
DJ
1207 * We adjust to current frequency first, and need to clean up later.
1208 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1209 */
e08f5f5b
GS
1210static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1211 unsigned int new_freq)
1da177e4 1212{
b43a7ffb 1213 struct cpufreq_policy *policy;
1da177e4 1214 struct cpufreq_freqs freqs;
b43a7ffb
VK
1215 unsigned long flags;
1216
1da177e4 1217
2d06d8c4 1218 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1219 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1220
1da177e4
LT
1221 freqs.old = old_freq;
1222 freqs.new = new_freq;
b43a7ffb
VK
1223
1224 read_lock_irqsave(&cpufreq_driver_lock, flags);
1225 policy = per_cpu(cpufreq_cpu_data, cpu);
1226 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1227
1228 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1229 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1230}
1231
1232
32ee8c3e 1233/**
4ab70df4 1234 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1235 * @cpu: CPU number
1236 *
1237 * This is the last known freq, without actually getting it from the driver.
1238 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1239 */
1240unsigned int cpufreq_quick_get(unsigned int cpu)
1241{
9e21ba8b 1242 struct cpufreq_policy *policy;
5800043b
NZ
1243 struct cpufreq_driver *driver;
1244 unsigned int (*get)(unsigned int cpu);
e08f5f5b 1245 unsigned int ret_freq = 0;
95235ca2 1246
5800043b
NZ
1247 rcu_read_lock();
1248 driver = rcu_dereference(cpufreq_driver);
1249 if (driver && driver->setpolicy && driver->get) {
1250 get = driver->get;
1251 rcu_read_unlock();
1252 return get(cpu);
1253 }
1254 rcu_read_unlock();
9e21ba8b
DB
1255
1256 policy = cpufreq_cpu_get(cpu);
95235ca2 1257 if (policy) {
e08f5f5b 1258 ret_freq = policy->cur;
95235ca2
VP
1259 cpufreq_cpu_put(policy);
1260 }
1261
4d34a67d 1262 return ret_freq;
95235ca2
VP
1263}
1264EXPORT_SYMBOL(cpufreq_quick_get);
1265
3d737108
JB
1266/**
1267 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1268 * @cpu: CPU number
1269 *
1270 * Just return the max possible frequency for a given CPU.
1271 */
1272unsigned int cpufreq_quick_get_max(unsigned int cpu)
1273{
1274 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1275 unsigned int ret_freq = 0;
1276
1277 if (policy) {
1278 ret_freq = policy->max;
1279 cpufreq_cpu_put(policy);
1280 }
1281
1282 return ret_freq;
1283}
1284EXPORT_SYMBOL(cpufreq_quick_get_max);
1285
95235ca2 1286
5a01f2e8 1287static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1288{
7a6aedfa 1289 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
5800043b
NZ
1290 struct cpufreq_driver *driver;
1291 unsigned int (*get)(unsigned int cpu);
e08f5f5b 1292 unsigned int ret_freq = 0;
5800043b
NZ
1293 u8 flags;
1294
1da177e4 1295
5800043b
NZ
1296 rcu_read_lock();
1297 driver = rcu_dereference(cpufreq_driver);
1298 if (!driver->get) {
1299 rcu_read_unlock();
4d34a67d 1300 return ret_freq;
5800043b
NZ
1301 }
1302 flags = driver->flags;
1303 get = driver->get;
1304 rcu_read_unlock();
1da177e4 1305
5800043b 1306 ret_freq = get(cpu);
1da177e4 1307
e08f5f5b 1308 if (ret_freq && policy->cur &&
5800043b 1309 !(flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1310 /* verify no discrepancy between actual and
1311 saved value exists */
1312 if (unlikely(ret_freq != policy->cur)) {
1313 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1314 schedule_work(&policy->update);
1315 }
1316 }
1317
4d34a67d 1318 return ret_freq;
5a01f2e8 1319}
1da177e4 1320
5a01f2e8
VP
1321/**
1322 * cpufreq_get - get the current CPU frequency (in kHz)
1323 * @cpu: CPU number
1324 *
1325 * Get the CPU current (static) CPU frequency
1326 */
1327unsigned int cpufreq_get(unsigned int cpu)
1328{
1329 unsigned int ret_freq = 0;
1330 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1331
1332 if (!policy)
1333 goto out;
1334
1335 if (unlikely(lock_policy_rwsem_read(cpu)))
1336 goto out_policy;
1337
1338 ret_freq = __cpufreq_get(cpu);
1339
1340 unlock_policy_rwsem_read(cpu);
1da177e4 1341
5a01f2e8
VP
1342out_policy:
1343 cpufreq_cpu_put(policy);
1344out:
4d34a67d 1345 return ret_freq;
1da177e4
LT
1346}
1347EXPORT_SYMBOL(cpufreq_get);
1348
8a25a2fd
KS
1349static struct subsys_interface cpufreq_interface = {
1350 .name = "cpufreq",
1351 .subsys = &cpu_subsys,
1352 .add_dev = cpufreq_add_dev,
1353 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1354};
1355
1da177e4 1356
42d4dc3f 1357/**
e00e56df
RW
1358 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1359 *
1360 * This function is only executed for the boot processor. The other CPUs
1361 * have been put offline by means of CPU hotplug.
42d4dc3f 1362 */
e00e56df 1363static int cpufreq_bp_suspend(void)
42d4dc3f 1364{
5800043b 1365 int (*suspend)(struct cpufreq_policy *policy);
e08f5f5b 1366 int ret = 0;
4bc5d341 1367
e00e56df 1368 int cpu = smp_processor_id();
42d4dc3f
BH
1369 struct cpufreq_policy *cpu_policy;
1370
2d06d8c4 1371 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1372
e00e56df 1373 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1374 cpu_policy = cpufreq_cpu_get(cpu);
1375 if (!cpu_policy)
e00e56df 1376 return 0;
42d4dc3f 1377
5800043b
NZ
1378 rcu_read_lock();
1379 suspend = rcu_dereference(cpufreq_driver)->suspend;
1380 rcu_read_unlock();
1381 if (suspend) {
1382 ret = suspend(cpu_policy);
ce6c3997 1383 if (ret)
42d4dc3f
BH
1384 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1385 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1386 }
1387
42d4dc3f 1388 cpufreq_cpu_put(cpu_policy);
c9060494 1389 return ret;
42d4dc3f
BH
1390}
1391
1da177e4 1392/**
e00e56df 1393 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1394 *
1395 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1396 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1397 * restored. It will verify that the current freq is in sync with
1398 * what we believe it to be. This is a bit later than when it
1399 * should be, but nonethteless it's better than calling
1400 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1401 *
1402 * This function is only executed for the boot CPU. The other CPUs have not
1403 * been turned on yet.
1da177e4 1404 */
e00e56df 1405static void cpufreq_bp_resume(void)
1da177e4 1406{
e08f5f5b 1407 int ret = 0;
5800043b 1408 int (*resume)(struct cpufreq_policy *policy);
4bc5d341 1409
e00e56df 1410 int cpu = smp_processor_id();
1da177e4
LT
1411 struct cpufreq_policy *cpu_policy;
1412
2d06d8c4 1413 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1414
e00e56df 1415 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1416 cpu_policy = cpufreq_cpu_get(cpu);
1417 if (!cpu_policy)
e00e56df 1418 return;
1da177e4 1419
5800043b
NZ
1420 rcu_read_lock();
1421 resume = rcu_dereference(cpufreq_driver)->resume;
1422 rcu_read_unlock();
1423
1424 if (resume) {
1425 ret = resume(cpu_policy);
1da177e4
LT
1426 if (ret) {
1427 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1428 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1429 goto fail;
1da177e4
LT
1430 }
1431 }
1432
1da177e4 1433 schedule_work(&cpu_policy->update);
ce6c3997 1434
c9060494 1435fail:
1da177e4 1436 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1437}
1438
e00e56df
RW
1439static struct syscore_ops cpufreq_syscore_ops = {
1440 .suspend = cpufreq_bp_suspend,
1441 .resume = cpufreq_bp_resume,
1da177e4
LT
1442};
1443
9d95046e
BP
1444/**
1445 * cpufreq_get_current_driver - return current driver's name
1446 *
1447 * Return the name string of the currently loaded cpufreq driver
1448 * or NULL, if none.
1449 */
1450const char *cpufreq_get_current_driver(void)
1451{
5800043b
NZ
1452 struct cpufreq_driver *driver;
1453 const char *name = NULL;
1454 rcu_read_lock();
1455 driver = rcu_dereference(cpufreq_driver);
1456 if (driver)
1457 name = driver->name;
1458 rcu_read_unlock();
1459 return name;
9d95046e
BP
1460}
1461EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1462
1463/*********************************************************************
1464 * NOTIFIER LISTS INTERFACE *
1465 *********************************************************************/
1466
1467/**
1468 * cpufreq_register_notifier - register a driver with cpufreq
1469 * @nb: notifier function to register
1470 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1471 *
32ee8c3e 1472 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1473 * are notified about clock rate changes (once before and once after
1474 * the transition), or a list of drivers that are notified about
1475 * changes in cpufreq policy.
1476 *
1477 * This function may sleep, and has the same return conditions as
e041c683 1478 * blocking_notifier_chain_register.
1da177e4
LT
1479 */
1480int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1481{
1482 int ret;
1483
d5aaffa9
DB
1484 if (cpufreq_disabled())
1485 return -EINVAL;
1486
74212ca4
CEB
1487 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1488
1da177e4
LT
1489 switch (list) {
1490 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1491 ret = srcu_notifier_chain_register(
e041c683 1492 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1493 break;
1494 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1495 ret = blocking_notifier_chain_register(
1496 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1497 break;
1498 default:
1499 ret = -EINVAL;
1500 }
1da177e4
LT
1501
1502 return ret;
1503}
1504EXPORT_SYMBOL(cpufreq_register_notifier);
1505
1506
1507/**
1508 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1509 * @nb: notifier block to be unregistered
1510 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1511 *
1512 * Remove a driver from the CPU frequency notifier list.
1513 *
1514 * This function may sleep, and has the same return conditions as
e041c683 1515 * blocking_notifier_chain_unregister.
1da177e4
LT
1516 */
1517int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1518{
1519 int ret;
1520
d5aaffa9
DB
1521 if (cpufreq_disabled())
1522 return -EINVAL;
1523
1da177e4
LT
1524 switch (list) {
1525 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1526 ret = srcu_notifier_chain_unregister(
e041c683 1527 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1528 break;
1529 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1530 ret = blocking_notifier_chain_unregister(
1531 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1532 break;
1533 default:
1534 ret = -EINVAL;
1535 }
1da177e4
LT
1536
1537 return ret;
1538}
1539EXPORT_SYMBOL(cpufreq_unregister_notifier);
1540
1541
1542/*********************************************************************
1543 * GOVERNORS *
1544 *********************************************************************/
1545
1546
1547int __cpufreq_driver_target(struct cpufreq_policy *policy,
1548 unsigned int target_freq,
1549 unsigned int relation)
1550{
1551 int retval = -EINVAL;
7249924e 1552 unsigned int old_target_freq = target_freq;
5800043b
NZ
1553 int (*target)(struct cpufreq_policy *policy,
1554 unsigned int target_freq,
1555 unsigned int relation);
c32b6b8e 1556
a7b422cd
KRW
1557 if (cpufreq_disabled())
1558 return -ENODEV;
1559
7249924e
VK
1560 /* Make sure that target_freq is within supported range */
1561 if (target_freq > policy->max)
1562 target_freq = policy->max;
1563 if (target_freq < policy->min)
1564 target_freq = policy->min;
1565
1566 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1567 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1568
1569 if (target_freq == policy->cur)
1570 return 0;
1571
5800043b
NZ
1572 rcu_read_lock();
1573 target = rcu_dereference(cpufreq_driver)->target;
1574 rcu_read_unlock();
1575 if (target)
1576 retval = target(policy, target_freq, relation);
90d45d17 1577
1da177e4
LT
1578 return retval;
1579}
1580EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1581
1da177e4
LT
1582int cpufreq_driver_target(struct cpufreq_policy *policy,
1583 unsigned int target_freq,
1584 unsigned int relation)
1585{
f1829e4a 1586 int ret = -EINVAL;
1da177e4
LT
1587
1588 policy = cpufreq_cpu_get(policy->cpu);
1589 if (!policy)
f1829e4a 1590 goto no_policy;
1da177e4 1591
5a01f2e8 1592 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1593 goto fail;
1da177e4
LT
1594
1595 ret = __cpufreq_driver_target(policy, target_freq, relation);
1596
5a01f2e8 1597 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1598
f1829e4a 1599fail:
1da177e4 1600 cpufreq_cpu_put(policy);
f1829e4a 1601no_policy:
1da177e4
LT
1602 return ret;
1603}
1604EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1605
bf0b90e3 1606int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1607{
1608 int ret = 0;
5800043b
NZ
1609 unsigned int (*getavg)(struct cpufreq_policy *policy,
1610 unsigned int cpu);
dfde5d62 1611
d5aaffa9
DB
1612 if (cpufreq_disabled())
1613 return ret;
1614
5800043b
NZ
1615 rcu_read_lock();
1616 getavg = rcu_dereference(cpufreq_driver)->getavg;
1617 rcu_read_unlock();
1618
1619 if (!getavg)
0676f7f2
VK
1620 return 0;
1621
dfde5d62
VP
1622 policy = cpufreq_cpu_get(policy->cpu);
1623 if (!policy)
1624 return -EINVAL;
1625
5800043b 1626 ret = getavg(policy, cpu);
dfde5d62 1627
dfde5d62
VP
1628 cpufreq_cpu_put(policy);
1629 return ret;
1630}
5a01f2e8 1631EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1632
153d7f3f 1633/*
153d7f3f
AV
1634 * when "event" is CPUFREQ_GOV_LIMITS
1635 */
1da177e4 1636
e08f5f5b
GS
1637static int __cpufreq_governor(struct cpufreq_policy *policy,
1638 unsigned int event)
1da177e4 1639{
cc993cab 1640 int ret;
6afde10c
TR
1641
1642 /* Only must be defined when default governor is known to have latency
1643 restrictions, like e.g. conservative or ondemand.
1644 That this is the case is already ensured in Kconfig
1645 */
1646#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1647 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1648#else
1649 struct cpufreq_governor *gov = NULL;
1650#endif
1c256245
TR
1651
1652 if (policy->governor->max_transition_latency &&
1653 policy->cpuinfo.transition_latency >
1654 policy->governor->max_transition_latency) {
6afde10c
TR
1655 if (!gov)
1656 return -EINVAL;
1657 else {
1658 printk(KERN_WARNING "%s governor failed, too long"
1659 " transition latency of HW, fallback"
1660 " to %s governor\n",
1661 policy->governor->name,
1662 gov->name);
1663 policy->governor = gov;
1664 }
1c256245 1665 }
1da177e4
LT
1666
1667 if (!try_module_get(policy->governor->owner))
1668 return -EINVAL;
1669
2d06d8c4 1670 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1671 policy->cpu, event);
1da177e4
LT
1672 ret = policy->governor->governor(policy, event);
1673
4d5dcc42
VK
1674 if (!ret) {
1675 if (event == CPUFREQ_GOV_POLICY_INIT)
1676 policy->governor->initialized++;
1677 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1678 policy->governor->initialized--;
1679 }
b394058f 1680
e08f5f5b
GS
1681 /* we keep one module reference alive for
1682 each CPU governed by this CPU */
1da177e4
LT
1683 if ((event != CPUFREQ_GOV_START) || ret)
1684 module_put(policy->governor->owner);
1685 if ((event == CPUFREQ_GOV_STOP) && !ret)
1686 module_put(policy->governor->owner);
1687
1688 return ret;
1689}
1690
1691
1da177e4
LT
1692int cpufreq_register_governor(struct cpufreq_governor *governor)
1693{
3bcb09a3 1694 int err;
1da177e4
LT
1695
1696 if (!governor)
1697 return -EINVAL;
1698
a7b422cd
KRW
1699 if (cpufreq_disabled())
1700 return -ENODEV;
1701
3fc54d37 1702 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1703
b394058f 1704 governor->initialized = 0;
3bcb09a3
JF
1705 err = -EBUSY;
1706 if (__find_governor(governor->name) == NULL) {
1707 err = 0;
1708 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1709 }
1da177e4 1710
32ee8c3e 1711 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1712 return err;
1da177e4
LT
1713}
1714EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1715
1716
1717void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1718{
90e41bac
PB
1719#ifdef CONFIG_HOTPLUG_CPU
1720 int cpu;
1721#endif
1722
1da177e4
LT
1723 if (!governor)
1724 return;
1725
a7b422cd
KRW
1726 if (cpufreq_disabled())
1727 return;
1728
90e41bac
PB
1729#ifdef CONFIG_HOTPLUG_CPU
1730 for_each_present_cpu(cpu) {
1731 if (cpu_online(cpu))
1732 continue;
1733 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1734 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1735 }
1736#endif
1737
3fc54d37 1738 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1739 list_del(&governor->governor_list);
3fc54d37 1740 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1741 return;
1742}
1743EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1744
1745
1746
1747/*********************************************************************
1748 * POLICY INTERFACE *
1749 *********************************************************************/
1750
1751/**
1752 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1753 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1754 * is written
1da177e4
LT
1755 *
1756 * Reads the current cpufreq policy.
1757 */
1758int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1759{
1760 struct cpufreq_policy *cpu_policy;
1761 if (!policy)
1762 return -EINVAL;
1763
1764 cpu_policy = cpufreq_cpu_get(cpu);
1765 if (!cpu_policy)
1766 return -EINVAL;
1767
1da177e4 1768 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1769
1770 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1771 return 0;
1772}
1773EXPORT_SYMBOL(cpufreq_get_policy);
1774
1775
153d7f3f 1776/*
e08f5f5b
GS
1777 * data : current policy.
1778 * policy : policy to be set.
153d7f3f 1779 */
e08f5f5b
GS
1780static int __cpufreq_set_policy(struct cpufreq_policy *data,
1781 struct cpufreq_policy *policy)
1da177e4 1782{
7bd353a9 1783 int ret = 0, failed = 1;
5800043b
NZ
1784 struct cpufreq_driver *driver;
1785 int (*verify)(struct cpufreq_policy *policy);
1786 int (*setpolicy)(struct cpufreq_policy *policy);
1da177e4 1787
2d06d8c4 1788 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1789 policy->min, policy->max);
1790
e08f5f5b
GS
1791 memcpy(&policy->cpuinfo, &data->cpuinfo,
1792 sizeof(struct cpufreq_cpuinfo));
1da177e4 1793
53391fa2 1794 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1795 ret = -EINVAL;
1796 goto error_out;
1797 }
1798
1da177e4 1799 /* verify the cpu speed can be set within this limit */
5800043b
NZ
1800 rcu_read_lock();
1801 driver = rcu_dereference(cpufreq_driver);
1802 verify = driver->verify;
1803 setpolicy = driver->setpolicy;
1804 rcu_read_unlock();
1805
1806 ret = verify(policy);
1da177e4
LT
1807 if (ret)
1808 goto error_out;
1809
1da177e4 1810 /* adjust if necessary - all reasons */
e041c683
AS
1811 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1812 CPUFREQ_ADJUST, policy);
1da177e4
LT
1813
1814 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1815 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1816 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1817
1818 /* verify the cpu speed can be set within this limit,
1819 which might be different to the first one */
5800043b 1820 ret = verify(policy);
e041c683 1821 if (ret)
1da177e4 1822 goto error_out;
1da177e4
LT
1823
1824 /* notification of the new policy */
e041c683
AS
1825 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1826 CPUFREQ_NOTIFY, policy);
1da177e4 1827
7d5e350f
DJ
1828 data->min = policy->min;
1829 data->max = policy->max;
1da177e4 1830
2d06d8c4 1831 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1832 data->min, data->max);
1da177e4 1833
5800043b 1834 if (setpolicy) {
1da177e4 1835 data->policy = policy->policy;
2d06d8c4 1836 pr_debug("setting range\n");
5800043b 1837 ret = setpolicy(policy);
1da177e4
LT
1838 } else {
1839 if (policy->governor != data->governor) {
1840 /* save old, working values */
1841 struct cpufreq_governor *old_gov = data->governor;
1842
2d06d8c4 1843 pr_debug("governor switch\n");
1da177e4
LT
1844
1845 /* end old governor */
7bd353a9 1846 if (data->governor) {
1da177e4 1847 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
7bd353a9
VK
1848 __cpufreq_governor(data,
1849 CPUFREQ_GOV_POLICY_EXIT);
1850 }
1da177e4
LT
1851
1852 /* start new governor */
1853 data->governor = policy->governor;
7bd353a9
VK
1854 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1855 if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
1856 failed = 0;
1857 else
1858 __cpufreq_governor(data,
1859 CPUFREQ_GOV_POLICY_EXIT);
1860 }
1861
1862 if (failed) {
1da177e4 1863 /* new governor failed, so re-start old one */
2d06d8c4 1864 pr_debug("starting governor %s failed\n",
e08f5f5b 1865 data->governor->name);
1da177e4
LT
1866 if (old_gov) {
1867 data->governor = old_gov;
7bd353a9
VK
1868 __cpufreq_governor(data,
1869 CPUFREQ_GOV_POLICY_INIT);
e08f5f5b
GS
1870 __cpufreq_governor(data,
1871 CPUFREQ_GOV_START);
1da177e4
LT
1872 }
1873 ret = -EINVAL;
1874 goto error_out;
1875 }
1876 /* might be a policy change, too, so fall through */
1877 }
2d06d8c4 1878 pr_debug("governor: change or update limits\n");
1da177e4
LT
1879 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1880 }
1881
7d5e350f 1882error_out:
1da177e4
LT
1883 return ret;
1884}
1885
1da177e4
LT
1886/**
1887 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1888 * @cpu: CPU which shall be re-evaluated
1889 *
25985edc 1890 * Useful for policy notifiers which have different necessities
1da177e4
LT
1891 * at different times.
1892 */
1893int cpufreq_update_policy(unsigned int cpu)
1894{
1895 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1896 struct cpufreq_policy policy;
5800043b
NZ
1897 struct cpufreq_driver *driver;
1898 unsigned int (*get)(unsigned int cpu);
1899 int (*target)(struct cpufreq_policy *policy,
1900 unsigned int target_freq,
1901 unsigned int relation);
f1829e4a 1902 int ret;
1da177e4 1903
f1829e4a
JL
1904 if (!data) {
1905 ret = -ENODEV;
1906 goto no_policy;
1907 }
1da177e4 1908
f1829e4a
JL
1909 if (unlikely(lock_policy_rwsem_write(cpu))) {
1910 ret = -EINVAL;
1911 goto fail;
1912 }
1da177e4 1913
2d06d8c4 1914 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1915 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1916 policy.min = data->user_policy.min;
1917 policy.max = data->user_policy.max;
1918 policy.policy = data->user_policy.policy;
1919 policy.governor = data->user_policy.governor;
1920
0961dd0d
TR
1921 /* BIOS might change freq behind our back
1922 -> ask driver for current freq and notify governors about a change */
5800043b
NZ
1923 rcu_read_lock();
1924 driver = rcu_access_pointer(cpufreq_driver);
1925 get = driver->get;
1926 target = driver->target;
1927 rcu_read_unlock();
1928 if (get) {
1929 policy.cur = get(cpu);
a85f7bd3 1930 if (!data->cur) {
2d06d8c4 1931 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1932 data->cur = policy.cur;
1933 } else {
5800043b 1934 if (data->cur != policy.cur && target)
e08f5f5b
GS
1935 cpufreq_out_of_sync(cpu, data->cur,
1936 policy.cur);
a85f7bd3 1937 }
0961dd0d
TR
1938 }
1939
1da177e4
LT
1940 ret = __cpufreq_set_policy(data, &policy);
1941
5a01f2e8
VP
1942 unlock_policy_rwsem_write(cpu);
1943
f1829e4a 1944fail:
1da177e4 1945 cpufreq_cpu_put(data);
f1829e4a 1946no_policy:
1da177e4
LT
1947 return ret;
1948}
1949EXPORT_SYMBOL(cpufreq_update_policy);
1950
dd184a01 1951static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1952 unsigned long action, void *hcpu)
1953{
1954 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1955 struct device *dev;
c32b6b8e 1956
8a25a2fd
KS
1957 dev = get_cpu_device(cpu);
1958 if (dev) {
c32b6b8e
AR
1959 switch (action) {
1960 case CPU_ONLINE:
8bb78442 1961 case CPU_ONLINE_FROZEN:
8a25a2fd 1962 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1963 break;
1964 case CPU_DOWN_PREPARE:
8bb78442 1965 case CPU_DOWN_PREPARE_FROZEN:
8a25a2fd 1966 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1967 break;
5a01f2e8 1968 case CPU_DOWN_FAILED:
8bb78442 1969 case CPU_DOWN_FAILED_FROZEN:
8a25a2fd 1970 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1971 break;
1972 }
1973 }
1974 return NOTIFY_OK;
1975}
1976
9c36f746 1977static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1978 .notifier_call = cpufreq_cpu_callback,
1979};
1da177e4
LT
1980
1981/*********************************************************************
1982 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1983 *********************************************************************/
1984
1985/**
1986 * cpufreq_register_driver - register a CPU Frequency driver
1987 * @driver_data: A struct cpufreq_driver containing the values#
1988 * submitted by the CPU Frequency driver.
1989 *
32ee8c3e 1990 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1991 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1992 * (and isn't unregistered in the meantime).
1da177e4
LT
1993 *
1994 */
221dee28 1995int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1996{
1997 unsigned long flags;
1998 int ret;
1999
a7b422cd
KRW
2000 if (cpufreq_disabled())
2001 return -ENODEV;
2002
1da177e4
LT
2003 if (!driver_data || !driver_data->verify || !driver_data->init ||
2004 ((!driver_data->setpolicy) && (!driver_data->target)))
2005 return -EINVAL;
2006
2d06d8c4 2007 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
2008
2009 if (driver_data->setpolicy)
2010 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2011
0d1857a1 2012 write_lock_irqsave(&cpufreq_driver_lock, flags);
5800043b 2013 if (rcu_access_pointer(cpufreq_driver)) {
0d1857a1 2014 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2015 return -EBUSY;
2016 }
5800043b 2017 rcu_assign_pointer(cpufreq_driver, driver_data);
0d1857a1 2018 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
5800043b 2019 synchronize_rcu();
1da177e4 2020
8a25a2fd 2021 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
2022 if (ret)
2023 goto err_null_driver;
1da177e4 2024
5800043b 2025 if (!(driver_data->flags & CPUFREQ_STICKY)) {
1da177e4
LT
2026 int i;
2027 ret = -ENODEV;
2028
2029 /* check for at least one working CPU */
7a6aedfa
MT
2030 for (i = 0; i < nr_cpu_ids; i++)
2031 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 2032 ret = 0;
7a6aedfa
MT
2033 break;
2034 }
1da177e4
LT
2035
2036 /* if all ->init() calls failed, unregister */
2037 if (ret) {
2d06d8c4 2038 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 2039 driver_data->name);
8a25a2fd 2040 goto err_if_unreg;
1da177e4
LT
2041 }
2042 }
2043
8f5bc2ab 2044 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2045 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2046
8f5bc2ab 2047 return 0;
8a25a2fd
KS
2048err_if_unreg:
2049 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2050err_null_driver:
0d1857a1 2051 write_lock_irqsave(&cpufreq_driver_lock, flags);
5800043b 2052 rcu_assign_pointer(cpufreq_driver, NULL);
0d1857a1 2053 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
5800043b 2054 synchronize_rcu();
4d34a67d 2055 return ret;
1da177e4
LT
2056}
2057EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2058
2059
2060/**
2061 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2062 *
32ee8c3e 2063 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2064 * the right to do so, i.e. if you have succeeded in initialising before!
2065 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2066 * currently not initialised.
2067 */
221dee28 2068int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2069{
2070 unsigned long flags;
5800043b 2071 struct cpufreq_driver *old_driver;
1da177e4 2072
5800043b
NZ
2073 rcu_read_lock();
2074 old_driver = rcu_access_pointer(cpufreq_driver);
2075 if (!old_driver || (driver != old_driver)) {
2076 rcu_read_unlock();
1da177e4 2077 return -EINVAL;
5800043b
NZ
2078 }
2079 rcu_read_unlock();
1da177e4 2080
2d06d8c4 2081 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2082
8a25a2fd 2083 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2084 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2085
0d1857a1 2086 write_lock_irqsave(&cpufreq_driver_lock, flags);
5800043b 2087 rcu_assign_pointer(cpufreq_driver, NULL);
0d1857a1 2088 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
5800043b 2089 synchronize_rcu();
1da177e4
LT
2090
2091 return 0;
2092}
2093EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2094
2095static int __init cpufreq_core_init(void)
2096{
2097 int cpu;
2098
a7b422cd
KRW
2099 if (cpufreq_disabled())
2100 return -ENODEV;
2101
5a01f2e8 2102 for_each_possible_cpu(cpu) {
f1625066 2103 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
2104 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2105 }
8aa84ad8 2106
8a25a2fd 2107 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 2108 BUG_ON(!cpufreq_global_kobject);
e00e56df 2109 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2110
5a01f2e8
VP
2111 return 0;
2112}
5a01f2e8 2113core_initcall(cpufreq_core_init);
This page took 0.876069 seconds and 5 git commands to generate.