[CPUFREQ] cpufreq_stats.c: Fixed brace coding style issue
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
1da177e4
LT
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/notifier.h>
22#include <linux/cpufreq.h>
23#include <linux/delay.h>
24#include <linux/interrupt.h>
25#include <linux/spinlock.h>
26#include <linux/device.h>
27#include <linux/slab.h>
28#include <linux/cpu.h>
29#include <linux/completion.h>
3fc54d37 30#include <linux/mutex.h>
e00e56df 31#include <linux/syscore_ops.h>
1da177e4 32
6f4f2723
TR
33#include <trace/events/power.h>
34
1da177e4 35/**
cd878479 36 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
37 * level driver of CPUFreq support, and its spinlock. This lock
38 * also protects the cpufreq_cpu_data array.
39 */
7d5e350f 40static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 41static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
42#ifdef CONFIG_HOTPLUG_CPU
43/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 44static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 45#endif
1da177e4
LT
46static DEFINE_SPINLOCK(cpufreq_driver_lock);
47
5a01f2e8
VP
48/*
49 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
50 * all cpufreq/hotplug/workqueue/etc related lock issues.
51 *
52 * The rules for this semaphore:
53 * - Any routine that wants to read from the policy structure will
54 * do a down_read on this semaphore.
55 * - Any routine that will write to the policy structure and/or may take away
56 * the policy altogether (eg. CPU hotplug), will hold this lock in write
57 * mode before doing so.
58 *
59 * Additional rules:
60 * - All holders of the lock should check to make sure that the CPU they
61 * are concerned with are online after they get the lock.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 66 */
f1625066 67static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70#define lock_policy_rwsem(mode, cpu) \
226528c6 71static int lock_policy_rwsem_##mode \
5a01f2e8
VP
72(int cpu) \
73{ \
f1625066 74 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
75 BUG_ON(policy_cpu == -1); \
76 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
77 if (unlikely(!cpu_online(cpu))) { \
78 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
79 return -1; \
80 } \
81 \
82 return 0; \
83}
84
85lock_policy_rwsem(read, cpu);
5a01f2e8
VP
86
87lock_policy_rwsem(write, cpu);
5a01f2e8 88
226528c6 89static void unlock_policy_rwsem_read(int cpu)
5a01f2e8 90{
f1625066 91 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
5a01f2e8
VP
92 BUG_ON(policy_cpu == -1);
93 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
94}
5a01f2e8 95
226528c6 96static void unlock_policy_rwsem_write(int cpu)
5a01f2e8 97{
f1625066 98 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
5a01f2e8
VP
99 BUG_ON(policy_cpu == -1);
100 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
101}
5a01f2e8
VP
102
103
1da177e4 104/* internal prototypes */
29464f28
DJ
105static int __cpufreq_governor(struct cpufreq_policy *policy,
106 unsigned int event);
5a01f2e8 107static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 108static void handle_update(struct work_struct *work);
1da177e4
LT
109
110/**
32ee8c3e
DJ
111 * Two notifier lists: the "policy" list is involved in the
112 * validation process for a new CPU frequency policy; the
1da177e4
LT
113 * "transition" list for kernel code that needs to handle
114 * changes to devices when the CPU clock speed changes.
115 * The mutex locks both lists.
116 */
e041c683 117static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 118static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 119
74212ca4 120static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
121static int __init init_cpufreq_transition_notifier_list(void)
122{
123 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 124 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
125 return 0;
126}
b3438f82 127pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4
LT
128
129static LIST_HEAD(cpufreq_governor_list);
29464f28 130static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 131
7d5e350f 132struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4
LT
133{
134 struct cpufreq_policy *data;
135 unsigned long flags;
136
7a6aedfa 137 if (cpu >= nr_cpu_ids)
1da177e4
LT
138 goto err_out;
139
140 /* get the cpufreq driver */
141 spin_lock_irqsave(&cpufreq_driver_lock, flags);
142
143 if (!cpufreq_driver)
144 goto err_out_unlock;
145
146 if (!try_module_get(cpufreq_driver->owner))
147 goto err_out_unlock;
148
149
150 /* get the CPU */
7a6aedfa 151 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
152
153 if (!data)
154 goto err_out_put_module;
155
156 if (!kobject_get(&data->kobj))
157 goto err_out_put_module;
158
1da177e4 159 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
160 return data;
161
7d5e350f 162err_out_put_module:
1da177e4 163 module_put(cpufreq_driver->owner);
7d5e350f 164err_out_unlock:
1da177e4 165 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 166err_out:
1da177e4
LT
167 return NULL;
168}
169EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
170
7d5e350f 171
1da177e4
LT
172void cpufreq_cpu_put(struct cpufreq_policy *data)
173{
174 kobject_put(&data->kobj);
175 module_put(cpufreq_driver->owner);
176}
177EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
178
179
1da177e4
LT
180/*********************************************************************
181 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
182 *********************************************************************/
183
184/**
185 * adjust_jiffies - adjust the system "loops_per_jiffy"
186 *
187 * This function alters the system "loops_per_jiffy" for the clock
188 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 189 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
190 * per-CPU loops_per_jiffy value wherever possible.
191 */
192#ifndef CONFIG_SMP
193static unsigned long l_p_j_ref;
194static unsigned int l_p_j_ref_freq;
195
858119e1 196static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
197{
198 if (ci->flags & CPUFREQ_CONST_LOOPS)
199 return;
200
201 if (!l_p_j_ref_freq) {
202 l_p_j_ref = loops_per_jiffy;
203 l_p_j_ref_freq = ci->old;
2d06d8c4 204 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 205 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4
LT
206 }
207 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
208 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
42d4dc3f 209 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
210 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
211 ci->new);
2d06d8c4 212 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 213 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
214 }
215}
216#else
e08f5f5b
GS
217static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
218{
219 return;
220}
1da177e4
LT
221#endif
222
223
224/**
e4472cb3
DJ
225 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
226 * on frequency transition.
1da177e4 227 *
e4472cb3
DJ
228 * This function calls the transition notifiers and the "adjust_jiffies"
229 * function. It is called twice on all CPU frequency changes that have
32ee8c3e 230 * external effects.
1da177e4
LT
231 */
232void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
233{
e4472cb3
DJ
234 struct cpufreq_policy *policy;
235
1da177e4
LT
236 BUG_ON(irqs_disabled());
237
238 freqs->flags = cpufreq_driver->flags;
2d06d8c4 239 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 240 state, freqs->new);
1da177e4 241
7a6aedfa 242 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
1da177e4 243 switch (state) {
e4472cb3 244
1da177e4 245 case CPUFREQ_PRECHANGE:
32ee8c3e 246 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
247 * which is not equal to what the cpufreq core thinks is
248 * "old frequency".
1da177e4
LT
249 */
250 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
251 if ((policy) && (policy->cpu == freqs->cpu) &&
252 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 253 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
254 " %u, cpufreq assumed %u kHz.\n",
255 freqs->old, policy->cur);
256 freqs->old = policy->cur;
1da177e4
LT
257 }
258 }
b4dfdbb3 259 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 260 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
261 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
262 break;
e4472cb3 263
1da177e4
LT
264 case CPUFREQ_POSTCHANGE:
265 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 266 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723
TR
267 (unsigned long)freqs->cpu);
268 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
25e41933 269 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 270 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 271 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
272 if (likely(policy) && likely(policy->cpu == freqs->cpu))
273 policy->cur = freqs->new;
1da177e4
LT
274 break;
275 }
1da177e4
LT
276}
277EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
278
279
280
281/*********************************************************************
282 * SYSFS INTERFACE *
283 *********************************************************************/
284
3bcb09a3
JF
285static struct cpufreq_governor *__find_governor(const char *str_governor)
286{
287 struct cpufreq_governor *t;
288
289 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 290 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
291 return t;
292
293 return NULL;
294}
295
1da177e4
LT
296/**
297 * cpufreq_parse_governor - parse a governor string
298 */
905d77cd 299static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
300 struct cpufreq_governor **governor)
301{
3bcb09a3
JF
302 int err = -EINVAL;
303
1da177e4 304 if (!cpufreq_driver)
3bcb09a3
JF
305 goto out;
306
1da177e4
LT
307 if (cpufreq_driver->setpolicy) {
308 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
309 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 310 err = 0;
e08f5f5b
GS
311 } else if (!strnicmp(str_governor, "powersave",
312 CPUFREQ_NAME_LEN)) {
1da177e4 313 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 314 err = 0;
1da177e4 315 }
3bcb09a3 316 } else if (cpufreq_driver->target) {
1da177e4 317 struct cpufreq_governor *t;
3bcb09a3 318
3fc54d37 319 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
320
321 t = __find_governor(str_governor);
322
ea714970 323 if (t == NULL) {
e08f5f5b
GS
324 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
325 str_governor);
ea714970
JF
326
327 if (name) {
328 int ret;
329
330 mutex_unlock(&cpufreq_governor_mutex);
326f6a5c 331 ret = request_module("%s", name);
ea714970
JF
332 mutex_lock(&cpufreq_governor_mutex);
333
334 if (ret == 0)
335 t = __find_governor(str_governor);
336 }
337
338 kfree(name);
339 }
340
3bcb09a3
JF
341 if (t != NULL) {
342 *governor = t;
343 err = 0;
1da177e4 344 }
3bcb09a3 345
3fc54d37 346 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 347 }
29464f28 348out:
3bcb09a3 349 return err;
1da177e4 350}
1da177e4
LT
351
352
1da177e4 353/**
e08f5f5b
GS
354 * cpufreq_per_cpu_attr_read() / show_##file_name() -
355 * print out cpufreq information
1da177e4
LT
356 *
357 * Write out information from cpufreq_driver->policy[cpu]; object must be
358 * "unsigned int".
359 */
360
32ee8c3e
DJ
361#define show_one(file_name, object) \
362static ssize_t show_##file_name \
905d77cd 363(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 364{ \
29464f28 365 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
366}
367
368show_one(cpuinfo_min_freq, cpuinfo.min_freq);
369show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 370show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
371show_one(scaling_min_freq, min);
372show_one(scaling_max_freq, max);
373show_one(scaling_cur_freq, cur);
374
e08f5f5b
GS
375static int __cpufreq_set_policy(struct cpufreq_policy *data,
376 struct cpufreq_policy *policy);
7970e08b 377
1da177e4
LT
378/**
379 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
380 */
381#define store_one(file_name, object) \
382static ssize_t store_##file_name \
905d77cd 383(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4
LT
384{ \
385 unsigned int ret = -EINVAL; \
386 struct cpufreq_policy new_policy; \
387 \
388 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
389 if (ret) \
390 return -EINVAL; \
391 \
29464f28 392 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
393 if (ret != 1) \
394 return -EINVAL; \
395 \
7970e08b
TR
396 ret = __cpufreq_set_policy(policy, &new_policy); \
397 policy->user_policy.object = policy->object; \
1da177e4
LT
398 \
399 return ret ? ret : count; \
400}
401
29464f28
DJ
402store_one(scaling_min_freq, min);
403store_one(scaling_max_freq, max);
1da177e4
LT
404
405/**
406 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
407 */
905d77cd
DJ
408static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
409 char *buf)
1da177e4 410{
5a01f2e8 411 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
412 if (!cur_freq)
413 return sprintf(buf, "<unknown>");
414 return sprintf(buf, "%u\n", cur_freq);
415}
416
417
418/**
419 * show_scaling_governor - show the current policy for the specified CPU
420 */
905d77cd 421static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 422{
29464f28 423 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
424 return sprintf(buf, "powersave\n");
425 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
426 return sprintf(buf, "performance\n");
427 else if (policy->governor)
29464f28
DJ
428 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
429 policy->governor->name);
1da177e4
LT
430 return -EINVAL;
431}
432
433
434/**
435 * store_scaling_governor - store policy for the specified CPU
436 */
905d77cd
DJ
437static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
438 const char *buf, size_t count)
1da177e4
LT
439{
440 unsigned int ret = -EINVAL;
441 char str_governor[16];
442 struct cpufreq_policy new_policy;
443
444 ret = cpufreq_get_policy(&new_policy, policy->cpu);
445 if (ret)
446 return ret;
447
29464f28 448 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
449 if (ret != 1)
450 return -EINVAL;
451
e08f5f5b
GS
452 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
453 &new_policy.governor))
1da177e4
LT
454 return -EINVAL;
455
7970e08b
TR
456 /* Do not use cpufreq_set_policy here or the user_policy.max
457 will be wrongly overridden */
7970e08b
TR
458 ret = __cpufreq_set_policy(policy, &new_policy);
459
460 policy->user_policy.policy = policy->policy;
461 policy->user_policy.governor = policy->governor;
7970e08b 462
e08f5f5b
GS
463 if (ret)
464 return ret;
465 else
466 return count;
1da177e4
LT
467}
468
469/**
470 * show_scaling_driver - show the cpufreq driver currently loaded
471 */
905d77cd 472static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4
LT
473{
474 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
475}
476
477/**
478 * show_scaling_available_governors - show the available CPUfreq governors
479 */
905d77cd
DJ
480static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
481 char *buf)
1da177e4
LT
482{
483 ssize_t i = 0;
484 struct cpufreq_governor *t;
485
486 if (!cpufreq_driver->target) {
487 i += sprintf(buf, "performance powersave");
488 goto out;
489 }
490
491 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
492 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
493 - (CPUFREQ_NAME_LEN + 2)))
1da177e4
LT
494 goto out;
495 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
496 }
7d5e350f 497out:
1da177e4
LT
498 i += sprintf(&buf[i], "\n");
499 return i;
500}
e8628dd0 501
835481d9 502static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
503{
504 ssize_t i = 0;
505 unsigned int cpu;
506
835481d9 507 for_each_cpu(cpu, mask) {
1da177e4
LT
508 if (i)
509 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
510 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
511 if (i >= (PAGE_SIZE - 5))
29464f28 512 break;
1da177e4
LT
513 }
514 i += sprintf(&buf[i], "\n");
515 return i;
516}
517
e8628dd0
DW
518/**
519 * show_related_cpus - show the CPUs affected by each transition even if
520 * hw coordination is in use
521 */
522static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
523{
835481d9 524 if (cpumask_empty(policy->related_cpus))
e8628dd0
DW
525 return show_cpus(policy->cpus, buf);
526 return show_cpus(policy->related_cpus, buf);
527}
528
529/**
530 * show_affected_cpus - show the CPUs affected by each transition
531 */
532static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
533{
534 return show_cpus(policy->cpus, buf);
535}
536
9e76988e 537static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 538 const char *buf, size_t count)
9e76988e
VP
539{
540 unsigned int freq = 0;
541 unsigned int ret;
542
879000f9 543 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
544 return -EINVAL;
545
546 ret = sscanf(buf, "%u", &freq);
547 if (ret != 1)
548 return -EINVAL;
549
550 policy->governor->store_setspeed(policy, freq);
551
552 return count;
553}
554
555static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
556{
879000f9 557 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
558 return sprintf(buf, "<unsupported>\n");
559
560 return policy->governor->show_setspeed(policy, buf);
561}
1da177e4 562
e2f74f35
TR
563/**
564 * show_scaling_driver - show the current cpufreq HW/BIOS limitation
565 */
566static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
567{
568 unsigned int limit;
569 int ret;
570 if (cpufreq_driver->bios_limit) {
571 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
572 if (!ret)
573 return sprintf(buf, "%u\n", limit);
574 }
575 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
576}
577
6dad2a29
BP
578cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
579cpufreq_freq_attr_ro(cpuinfo_min_freq);
580cpufreq_freq_attr_ro(cpuinfo_max_freq);
581cpufreq_freq_attr_ro(cpuinfo_transition_latency);
582cpufreq_freq_attr_ro(scaling_available_governors);
583cpufreq_freq_attr_ro(scaling_driver);
584cpufreq_freq_attr_ro(scaling_cur_freq);
585cpufreq_freq_attr_ro(bios_limit);
586cpufreq_freq_attr_ro(related_cpus);
587cpufreq_freq_attr_ro(affected_cpus);
588cpufreq_freq_attr_rw(scaling_min_freq);
589cpufreq_freq_attr_rw(scaling_max_freq);
590cpufreq_freq_attr_rw(scaling_governor);
591cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 592
905d77cd 593static struct attribute *default_attrs[] = {
1da177e4
LT
594 &cpuinfo_min_freq.attr,
595 &cpuinfo_max_freq.attr,
ed129784 596 &cpuinfo_transition_latency.attr,
1da177e4
LT
597 &scaling_min_freq.attr,
598 &scaling_max_freq.attr,
599 &affected_cpus.attr,
e8628dd0 600 &related_cpus.attr,
1da177e4
LT
601 &scaling_governor.attr,
602 &scaling_driver.attr,
603 &scaling_available_governors.attr,
9e76988e 604 &scaling_setspeed.attr,
1da177e4
LT
605 NULL
606};
607
8aa84ad8
TR
608struct kobject *cpufreq_global_kobject;
609EXPORT_SYMBOL(cpufreq_global_kobject);
610
29464f28
DJ
611#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
612#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 613
29464f28 614static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 615{
905d77cd
DJ
616 struct cpufreq_policy *policy = to_policy(kobj);
617 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 618 ssize_t ret = -EINVAL;
1da177e4
LT
619 policy = cpufreq_cpu_get(policy->cpu);
620 if (!policy)
0db4a8a9 621 goto no_policy;
5a01f2e8
VP
622
623 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 624 goto fail;
5a01f2e8 625
e08f5f5b
GS
626 if (fattr->show)
627 ret = fattr->show(policy, buf);
628 else
629 ret = -EIO;
630
5a01f2e8 631 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 632fail:
1da177e4 633 cpufreq_cpu_put(policy);
0db4a8a9 634no_policy:
1da177e4
LT
635 return ret;
636}
637
905d77cd
DJ
638static ssize_t store(struct kobject *kobj, struct attribute *attr,
639 const char *buf, size_t count)
1da177e4 640{
905d77cd
DJ
641 struct cpufreq_policy *policy = to_policy(kobj);
642 struct freq_attr *fattr = to_attr(attr);
a07530b4 643 ssize_t ret = -EINVAL;
1da177e4
LT
644 policy = cpufreq_cpu_get(policy->cpu);
645 if (!policy)
a07530b4 646 goto no_policy;
5a01f2e8
VP
647
648 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 649 goto fail;
5a01f2e8 650
e08f5f5b
GS
651 if (fattr->store)
652 ret = fattr->store(policy, buf, count);
653 else
654 ret = -EIO;
655
5a01f2e8 656 unlock_policy_rwsem_write(policy->cpu);
a07530b4 657fail:
1da177e4 658 cpufreq_cpu_put(policy);
a07530b4 659no_policy:
1da177e4
LT
660 return ret;
661}
662
905d77cd 663static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 664{
905d77cd 665 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 666 pr_debug("last reference is dropped\n");
1da177e4
LT
667 complete(&policy->kobj_unregister);
668}
669
52cf25d0 670static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
671 .show = show,
672 .store = store,
673};
674
675static struct kobj_type ktype_cpufreq = {
676 .sysfs_ops = &sysfs_ops,
677 .default_attrs = default_attrs,
678 .release = cpufreq_sysfs_release,
679};
680
4bfa042c
TR
681/*
682 * Returns:
683 * Negative: Failure
684 * 0: Success
685 * Positive: When we have a managed CPU and the sysfs got symlinked
686 */
cf3289d0
AC
687static int cpufreq_add_dev_policy(unsigned int cpu,
688 struct cpufreq_policy *policy,
689 struct sys_device *sys_dev)
ecf7e461
DJ
690{
691 int ret = 0;
692#ifdef CONFIG_SMP
693 unsigned long flags;
694 unsigned int j;
ecf7e461 695#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
696 struct cpufreq_governor *gov;
697
698 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
699 if (gov) {
700 policy->governor = gov;
2d06d8c4 701 pr_debug("Restoring governor %s for cpu %d\n",
ecf7e461
DJ
702 policy->governor->name, cpu);
703 }
704#endif
705
706 for_each_cpu(j, policy->cpus) {
707 struct cpufreq_policy *managed_policy;
708
709 if (cpu == j)
710 continue;
711
712 /* Check for existing affected CPUs.
713 * They may not be aware of it due to CPU Hotplug.
714 * cpufreq_cpu_put is called when the device is removed
715 * in __cpufreq_remove_dev()
716 */
717 managed_policy = cpufreq_cpu_get(j);
718 if (unlikely(managed_policy)) {
719
720 /* Set proper policy_cpu */
721 unlock_policy_rwsem_write(cpu);
f1625066 722 per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
ecf7e461
DJ
723
724 if (lock_policy_rwsem_write(cpu) < 0) {
725 /* Should not go through policy unlock path */
726 if (cpufreq_driver->exit)
727 cpufreq_driver->exit(policy);
728 cpufreq_cpu_put(managed_policy);
729 return -EBUSY;
730 }
731
732 spin_lock_irqsave(&cpufreq_driver_lock, flags);
733 cpumask_copy(managed_policy->cpus, policy->cpus);
734 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
735 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
736
2d06d8c4 737 pr_debug("CPU already managed, adding link\n");
ecf7e461
DJ
738 ret = sysfs_create_link(&sys_dev->kobj,
739 &managed_policy->kobj,
740 "cpufreq");
741 if (ret)
742 cpufreq_cpu_put(managed_policy);
743 /*
744 * Success. We only needed to be added to the mask.
745 * Call driver->exit() because only the cpu parent of
746 * the kobj needed to call init().
747 */
748 if (cpufreq_driver->exit)
749 cpufreq_driver->exit(policy);
4bfa042c
TR
750
751 if (!ret)
752 return 1;
753 else
754 return ret;
ecf7e461
DJ
755 }
756 }
757#endif
758 return ret;
759}
760
761
19d6f7ec 762/* symlink affected CPUs */
cf3289d0
AC
763static int cpufreq_add_dev_symlink(unsigned int cpu,
764 struct cpufreq_policy *policy)
19d6f7ec
DJ
765{
766 unsigned int j;
767 int ret = 0;
768
769 for_each_cpu(j, policy->cpus) {
770 struct cpufreq_policy *managed_policy;
771 struct sys_device *cpu_sys_dev;
772
773 if (j == cpu)
774 continue;
775 if (!cpu_online(j))
776 continue;
777
2d06d8c4 778 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec
DJ
779 managed_policy = cpufreq_cpu_get(cpu);
780 cpu_sys_dev = get_cpu_sysdev(j);
781 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
782 "cpufreq");
783 if (ret) {
784 cpufreq_cpu_put(managed_policy);
785 return ret;
786 }
787 }
788 return ret;
789}
790
cf3289d0
AC
791static int cpufreq_add_dev_interface(unsigned int cpu,
792 struct cpufreq_policy *policy,
793 struct sys_device *sys_dev)
909a694e 794{
ecf7e461 795 struct cpufreq_policy new_policy;
909a694e
DJ
796 struct freq_attr **drv_attr;
797 unsigned long flags;
798 int ret = 0;
799 unsigned int j;
800
801 /* prepare interface data */
802 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
803 &sys_dev->kobj, "cpufreq");
804 if (ret)
805 return ret;
806
807 /* set up files for this cpu device */
808 drv_attr = cpufreq_driver->attr;
809 while ((drv_attr) && (*drv_attr)) {
810 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
811 if (ret)
812 goto err_out_kobj_put;
813 drv_attr++;
814 }
815 if (cpufreq_driver->get) {
816 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
817 if (ret)
818 goto err_out_kobj_put;
819 }
820 if (cpufreq_driver->target) {
821 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
822 if (ret)
823 goto err_out_kobj_put;
824 }
e2f74f35
TR
825 if (cpufreq_driver->bios_limit) {
826 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
827 if (ret)
828 goto err_out_kobj_put;
829 }
909a694e
DJ
830
831 spin_lock_irqsave(&cpufreq_driver_lock, flags);
832 for_each_cpu(j, policy->cpus) {
bec037aa
JL
833 if (!cpu_online(j))
834 continue;
909a694e 835 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 836 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e
DJ
837 }
838 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
839
840 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
841 if (ret)
842 goto err_out_kobj_put;
843
844 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
845 /* assure that the starting sequence is run in __cpufreq_set_policy */
846 policy->governor = NULL;
847
848 /* set default policy */
849 ret = __cpufreq_set_policy(policy, &new_policy);
850 policy->user_policy.policy = policy->policy;
851 policy->user_policy.governor = policy->governor;
852
853 if (ret) {
2d06d8c4 854 pr_debug("setting policy failed\n");
ecf7e461
DJ
855 if (cpufreq_driver->exit)
856 cpufreq_driver->exit(policy);
857 }
909a694e
DJ
858 return ret;
859
860err_out_kobj_put:
861 kobject_put(&policy->kobj);
862 wait_for_completion(&policy->kobj_unregister);
863 return ret;
864}
865
1da177e4
LT
866
867/**
868 * cpufreq_add_dev - add a CPU device
869 *
32ee8c3e 870 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
871 *
872 * The Oracle says: try running cpufreq registration/unregistration concurrently
873 * with with cpu hotplugging and all hell will break loose. Tried to clean this
874 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 875 */
905d77cd 876static int cpufreq_add_dev(struct sys_device *sys_dev)
1da177e4
LT
877{
878 unsigned int cpu = sys_dev->id;
90e41bac 879 int ret = 0, found = 0;
1da177e4 880 struct cpufreq_policy *policy;
1da177e4
LT
881 unsigned long flags;
882 unsigned int j;
90e41bac
PB
883#ifdef CONFIG_HOTPLUG_CPU
884 int sibling;
885#endif
1da177e4 886
c32b6b8e
AR
887 if (cpu_is_offline(cpu))
888 return 0;
889
2d06d8c4 890 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
891
892#ifdef CONFIG_SMP
893 /* check whether a different CPU already registered this
894 * CPU because it is in the same boat. */
895 policy = cpufreq_cpu_get(cpu);
896 if (unlikely(policy)) {
8ff69732 897 cpufreq_cpu_put(policy);
1da177e4
LT
898 return 0;
899 }
900#endif
901
902 if (!try_module_get(cpufreq_driver->owner)) {
903 ret = -EINVAL;
904 goto module_out;
905 }
906
059019a3 907 ret = -ENOMEM;
e98df50c 908 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 909 if (!policy)
1da177e4 910 goto nomem_out;
059019a3
DJ
911
912 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 913 goto err_free_policy;
059019a3
DJ
914
915 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 916 goto err_free_cpumask;
1da177e4
LT
917
918 policy->cpu = cpu;
835481d9 919 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 920
5a01f2e8 921 /* Initially set CPU itself as the policy_cpu */
f1625066 922 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
3f4a782b
MD
923 ret = (lock_policy_rwsem_write(cpu) < 0);
924 WARN_ON(ret);
5a01f2e8 925
1da177e4 926 init_completion(&policy->kobj_unregister);
65f27f38 927 INIT_WORK(&policy->update, handle_update);
1da177e4 928
8122c6ce 929 /* Set governor before ->init, so that driver could check it */
90e41bac
PB
930#ifdef CONFIG_HOTPLUG_CPU
931 for_each_online_cpu(sibling) {
932 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
933 if (cp && cp->governor &&
934 (cpumask_test_cpu(cpu, cp->related_cpus))) {
935 policy->governor = cp->governor;
936 found = 1;
937 break;
938 }
939 }
940#endif
941 if (!found)
942 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1da177e4
LT
943 /* call driver. From then on the cpufreq must be able
944 * to accept all calls to ->verify and ->setpolicy for this CPU
945 */
946 ret = cpufreq_driver->init(policy);
947 if (ret) {
2d06d8c4 948 pr_debug("initialization failed\n");
3f4a782b 949 goto err_unlock_policy;
1da177e4 950 }
187d9f4e
MC
951 policy->user_policy.min = policy->min;
952 policy->user_policy.max = policy->max;
1da177e4 953
a1531acd
TR
954 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
955 CPUFREQ_START, policy);
956
ecf7e461 957 ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
4bfa042c
TR
958 if (ret) {
959 if (ret > 0)
960 /* This is a managed cpu, symlink created,
961 exit with 0 */
962 ret = 0;
ecf7e461 963 goto err_unlock_policy;
4bfa042c 964 }
1da177e4 965
909a694e 966 ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
19d6f7ec
DJ
967 if (ret)
968 goto err_out_unregister;
8ff69732 969
dca02613
LW
970 unlock_policy_rwsem_write(cpu);
971
038c5b3e 972 kobject_uevent(&policy->kobj, KOBJ_ADD);
1da177e4 973 module_put(cpufreq_driver->owner);
2d06d8c4 974 pr_debug("initialization complete\n");
87c32271 975
1da177e4
LT
976 return 0;
977
978
979err_out_unregister:
980 spin_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 981 for_each_cpu(j, policy->cpus)
7a6aedfa 982 per_cpu(cpufreq_cpu_data, j) = NULL;
1da177e4
LT
983 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
984
c10997f6 985 kobject_put(&policy->kobj);
1da177e4
LT
986 wait_for_completion(&policy->kobj_unregister);
987
3f4a782b 988err_unlock_policy:
45709118 989 unlock_policy_rwsem_write(cpu);
cad70a6a 990 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
991err_free_cpumask:
992 free_cpumask_var(policy->cpus);
993err_free_policy:
1da177e4 994 kfree(policy);
1da177e4
LT
995nomem_out:
996 module_put(cpufreq_driver->owner);
c32b6b8e 997module_out:
1da177e4
LT
998 return ret;
999}
1000
1001
1002/**
5a01f2e8 1003 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1004 *
1005 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1006 * Caller should already have policy_rwsem in write mode for this CPU.
1007 * This routine frees the rwsem before returning.
1da177e4 1008 */
905d77cd 1009static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1da177e4
LT
1010{
1011 unsigned int cpu = sys_dev->id;
1012 unsigned long flags;
1013 struct cpufreq_policy *data;
499bca9b
AW
1014 struct kobject *kobj;
1015 struct completion *cmp;
1da177e4 1016#ifdef CONFIG_SMP
e738cf6d 1017 struct sys_device *cpu_sys_dev;
1da177e4
LT
1018 unsigned int j;
1019#endif
1020
2d06d8c4 1021 pr_debug("unregistering CPU %u\n", cpu);
1da177e4
LT
1022
1023 spin_lock_irqsave(&cpufreq_driver_lock, flags);
7a6aedfa 1024 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
1025
1026 if (!data) {
1027 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
5a01f2e8 1028 unlock_policy_rwsem_write(cpu);
1da177e4
LT
1029 return -EINVAL;
1030 }
7a6aedfa 1031 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1da177e4
LT
1032
1033
1034#ifdef CONFIG_SMP
1035 /* if this isn't the CPU which is the parent of the kobj, we
32ee8c3e 1036 * only need to unlink, put and exit
1da177e4
LT
1037 */
1038 if (unlikely(cpu != data->cpu)) {
2d06d8c4 1039 pr_debug("removing link\n");
835481d9 1040 cpumask_clear_cpu(cpu, data->cpus);
1da177e4 1041 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
499bca9b 1042 kobj = &sys_dev->kobj;
1da177e4 1043 cpufreq_cpu_put(data);
5a01f2e8 1044 unlock_policy_rwsem_write(cpu);
499bca9b 1045 sysfs_remove_link(kobj, "cpufreq");
1da177e4
LT
1046 return 0;
1047 }
1048#endif
1049
1da177e4 1050#ifdef CONFIG_SMP
084f3493
TR
1051
1052#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
1053 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1054 CPUFREQ_NAME_LEN);
084f3493
TR
1055#endif
1056
1da177e4
LT
1057 /* if we have other CPUs still registered, we need to unlink them,
1058 * or else wait_for_completion below will lock up. Clean the
7a6aedfa
MT
1059 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1060 * the sysfs links afterwards.
1da177e4 1061 */
835481d9
RR
1062 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1063 for_each_cpu(j, data->cpus) {
1da177e4
LT
1064 if (j == cpu)
1065 continue;
7a6aedfa 1066 per_cpu(cpufreq_cpu_data, j) = NULL;
1da177e4
LT
1067 }
1068 }
1069
1070 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1071
835481d9
RR
1072 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1073 for_each_cpu(j, data->cpus) {
1da177e4
LT
1074 if (j == cpu)
1075 continue;
2d06d8c4 1076 pr_debug("removing link for cpu %u\n", j);
084f3493 1077#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
1078 strncpy(per_cpu(cpufreq_cpu_governor, j),
1079 data->governor->name, CPUFREQ_NAME_LEN);
084f3493 1080#endif
d434fca7 1081 cpu_sys_dev = get_cpu_sysdev(j);
499bca9b
AW
1082 kobj = &cpu_sys_dev->kobj;
1083 unlock_policy_rwsem_write(cpu);
1084 sysfs_remove_link(kobj, "cpufreq");
1085 lock_policy_rwsem_write(cpu);
1da177e4
LT
1086 cpufreq_cpu_put(data);
1087 }
1088 }
1089#else
1090 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1091#endif
1092
1da177e4
LT
1093 if (cpufreq_driver->target)
1094 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 1095
499bca9b
AW
1096 kobj = &data->kobj;
1097 cmp = &data->kobj_unregister;
1098 unlock_policy_rwsem_write(cpu);
1099 kobject_put(kobj);
1da177e4
LT
1100
1101 /* we need to make sure that the underlying kobj is actually
32ee8c3e 1102 * not referenced anymore by anybody before we proceed with
1da177e4
LT
1103 * unloading.
1104 */
2d06d8c4 1105 pr_debug("waiting for dropping of refcount\n");
499bca9b 1106 wait_for_completion(cmp);
2d06d8c4 1107 pr_debug("wait complete\n");
1da177e4 1108
499bca9b 1109 lock_policy_rwsem_write(cpu);
1da177e4
LT
1110 if (cpufreq_driver->exit)
1111 cpufreq_driver->exit(data);
7d26e2d5 1112 unlock_policy_rwsem_write(cpu);
1113
27ecddc2
JS
1114#ifdef CONFIG_HOTPLUG_CPU
1115 /* when the CPU which is the parent of the kobj is hotplugged
1116 * offline, check for siblings, and create cpufreq sysfs interface
1117 * and symlinks
1118 */
1119 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1120 /* first sibling now owns the new sysfs dir */
1121 cpumask_clear_cpu(cpu, data->cpus);
1122 cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus)));
1123
1124 /* finally remove our own symlink */
1125 lock_policy_rwsem_write(cpu);
1126 __cpufreq_remove_dev(sys_dev);
1127 }
1128#endif
1129
835481d9
RR
1130 free_cpumask_var(data->related_cpus);
1131 free_cpumask_var(data->cpus);
1da177e4
LT
1132 kfree(data);
1133
1da177e4
LT
1134 return 0;
1135}
1136
1137
905d77cd 1138static int cpufreq_remove_dev(struct sys_device *sys_dev)
5a01f2e8
VP
1139{
1140 unsigned int cpu = sys_dev->id;
1141 int retval;
ec28297a
VP
1142
1143 if (cpu_is_offline(cpu))
1144 return 0;
1145
5a01f2e8
VP
1146 if (unlikely(lock_policy_rwsem_write(cpu)))
1147 BUG();
1148
1149 retval = __cpufreq_remove_dev(sys_dev);
1150 return retval;
1151}
1152
1153
65f27f38 1154static void handle_update(struct work_struct *work)
1da177e4 1155{
65f27f38
DH
1156 struct cpufreq_policy *policy =
1157 container_of(work, struct cpufreq_policy, update);
1158 unsigned int cpu = policy->cpu;
2d06d8c4 1159 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1160 cpufreq_update_policy(cpu);
1161}
1162
1163/**
1164 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1165 * @cpu: cpu number
1166 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1167 * @new_freq: CPU frequency the CPU actually runs at
1168 *
29464f28
DJ
1169 * We adjust to current frequency first, and need to clean up later.
1170 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1171 */
e08f5f5b
GS
1172static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1173 unsigned int new_freq)
1da177e4
LT
1174{
1175 struct cpufreq_freqs freqs;
1176
2d06d8c4 1177 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1178 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1179
1180 freqs.cpu = cpu;
1181 freqs.old = old_freq;
1182 freqs.new = new_freq;
1183 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1184 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1185}
1186
1187
32ee8c3e 1188/**
4ab70df4 1189 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1190 * @cpu: CPU number
1191 *
1192 * This is the last known freq, without actually getting it from the driver.
1193 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1194 */
1195unsigned int cpufreq_quick_get(unsigned int cpu)
1196{
1197 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
e08f5f5b 1198 unsigned int ret_freq = 0;
95235ca2
VP
1199
1200 if (policy) {
e08f5f5b 1201 ret_freq = policy->cur;
95235ca2
VP
1202 cpufreq_cpu_put(policy);
1203 }
1204
4d34a67d 1205 return ret_freq;
95235ca2
VP
1206}
1207EXPORT_SYMBOL(cpufreq_quick_get);
1208
1209
5a01f2e8 1210static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1211{
7a6aedfa 1212 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1213 unsigned int ret_freq = 0;
1da177e4 1214
1da177e4 1215 if (!cpufreq_driver->get)
4d34a67d 1216 return ret_freq;
1da177e4 1217
e08f5f5b 1218 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1219
e08f5f5b
GS
1220 if (ret_freq && policy->cur &&
1221 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1222 /* verify no discrepancy between actual and
1223 saved value exists */
1224 if (unlikely(ret_freq != policy->cur)) {
1225 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1226 schedule_work(&policy->update);
1227 }
1228 }
1229
4d34a67d 1230 return ret_freq;
5a01f2e8 1231}
1da177e4 1232
5a01f2e8
VP
1233/**
1234 * cpufreq_get - get the current CPU frequency (in kHz)
1235 * @cpu: CPU number
1236 *
1237 * Get the CPU current (static) CPU frequency
1238 */
1239unsigned int cpufreq_get(unsigned int cpu)
1240{
1241 unsigned int ret_freq = 0;
1242 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1243
1244 if (!policy)
1245 goto out;
1246
1247 if (unlikely(lock_policy_rwsem_read(cpu)))
1248 goto out_policy;
1249
1250 ret_freq = __cpufreq_get(cpu);
1251
1252 unlock_policy_rwsem_read(cpu);
1da177e4 1253
5a01f2e8
VP
1254out_policy:
1255 cpufreq_cpu_put(policy);
1256out:
4d34a67d 1257 return ret_freq;
1da177e4
LT
1258}
1259EXPORT_SYMBOL(cpufreq_get);
1260
e00e56df
RW
1261static struct sysdev_driver cpufreq_sysdev_driver = {
1262 .add = cpufreq_add_dev,
1263 .remove = cpufreq_remove_dev,
1264};
1265
1da177e4 1266
42d4dc3f 1267/**
e00e56df
RW
1268 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1269 *
1270 * This function is only executed for the boot processor. The other CPUs
1271 * have been put offline by means of CPU hotplug.
42d4dc3f 1272 */
e00e56df 1273static int cpufreq_bp_suspend(void)
42d4dc3f 1274{
e08f5f5b 1275 int ret = 0;
4bc5d341 1276
e00e56df 1277 int cpu = smp_processor_id();
42d4dc3f
BH
1278 struct cpufreq_policy *cpu_policy;
1279
2d06d8c4 1280 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1281
e00e56df 1282 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1283 cpu_policy = cpufreq_cpu_get(cpu);
1284 if (!cpu_policy)
e00e56df 1285 return 0;
42d4dc3f
BH
1286
1287 if (cpufreq_driver->suspend) {
7ca64e2d 1288 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1289 if (ret)
42d4dc3f
BH
1290 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1291 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1292 }
1293
42d4dc3f 1294 cpufreq_cpu_put(cpu_policy);
c9060494 1295 return ret;
42d4dc3f
BH
1296}
1297
1da177e4 1298/**
e00e56df 1299 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1300 *
1301 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1302 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1303 * restored. It will verify that the current freq is in sync with
1304 * what we believe it to be. This is a bit later than when it
1305 * should be, but nonethteless it's better than calling
1306 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1307 *
1308 * This function is only executed for the boot CPU. The other CPUs have not
1309 * been turned on yet.
1da177e4 1310 */
e00e56df 1311static void cpufreq_bp_resume(void)
1da177e4 1312{
e08f5f5b 1313 int ret = 0;
4bc5d341 1314
e00e56df 1315 int cpu = smp_processor_id();
1da177e4
LT
1316 struct cpufreq_policy *cpu_policy;
1317
2d06d8c4 1318 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1319
e00e56df 1320 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1321 cpu_policy = cpufreq_cpu_get(cpu);
1322 if (!cpu_policy)
e00e56df 1323 return;
1da177e4
LT
1324
1325 if (cpufreq_driver->resume) {
1326 ret = cpufreq_driver->resume(cpu_policy);
1327 if (ret) {
1328 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1329 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1330 goto fail;
1da177e4
LT
1331 }
1332 }
1333
1da177e4 1334 schedule_work(&cpu_policy->update);
ce6c3997 1335
c9060494 1336fail:
1da177e4 1337 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1338}
1339
e00e56df
RW
1340static struct syscore_ops cpufreq_syscore_ops = {
1341 .suspend = cpufreq_bp_suspend,
1342 .resume = cpufreq_bp_resume,
1da177e4
LT
1343};
1344
1345
1346/*********************************************************************
1347 * NOTIFIER LISTS INTERFACE *
1348 *********************************************************************/
1349
1350/**
1351 * cpufreq_register_notifier - register a driver with cpufreq
1352 * @nb: notifier function to register
1353 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1354 *
32ee8c3e 1355 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1356 * are notified about clock rate changes (once before and once after
1357 * the transition), or a list of drivers that are notified about
1358 * changes in cpufreq policy.
1359 *
1360 * This function may sleep, and has the same return conditions as
e041c683 1361 * blocking_notifier_chain_register.
1da177e4
LT
1362 */
1363int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1364{
1365 int ret;
1366
74212ca4
CEB
1367 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1368
1da177e4
LT
1369 switch (list) {
1370 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1371 ret = srcu_notifier_chain_register(
e041c683 1372 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1373 break;
1374 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1375 ret = blocking_notifier_chain_register(
1376 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1377 break;
1378 default:
1379 ret = -EINVAL;
1380 }
1da177e4
LT
1381
1382 return ret;
1383}
1384EXPORT_SYMBOL(cpufreq_register_notifier);
1385
1386
1387/**
1388 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1389 * @nb: notifier block to be unregistered
1390 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1391 *
1392 * Remove a driver from the CPU frequency notifier list.
1393 *
1394 * This function may sleep, and has the same return conditions as
e041c683 1395 * blocking_notifier_chain_unregister.
1da177e4
LT
1396 */
1397int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1398{
1399 int ret;
1400
1da177e4
LT
1401 switch (list) {
1402 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1403 ret = srcu_notifier_chain_unregister(
e041c683 1404 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1405 break;
1406 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1407 ret = blocking_notifier_chain_unregister(
1408 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1409 break;
1410 default:
1411 ret = -EINVAL;
1412 }
1da177e4
LT
1413
1414 return ret;
1415}
1416EXPORT_SYMBOL(cpufreq_unregister_notifier);
1417
1418
1419/*********************************************************************
1420 * GOVERNORS *
1421 *********************************************************************/
1422
1423
1424int __cpufreq_driver_target(struct cpufreq_policy *policy,
1425 unsigned int target_freq,
1426 unsigned int relation)
1427{
1428 int retval = -EINVAL;
c32b6b8e 1429
2d06d8c4 1430 pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1da177e4
LT
1431 target_freq, relation);
1432 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1433 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1434
1da177e4
LT
1435 return retval;
1436}
1437EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1438
1da177e4
LT
1439int cpufreq_driver_target(struct cpufreq_policy *policy,
1440 unsigned int target_freq,
1441 unsigned int relation)
1442{
f1829e4a 1443 int ret = -EINVAL;
1da177e4
LT
1444
1445 policy = cpufreq_cpu_get(policy->cpu);
1446 if (!policy)
f1829e4a 1447 goto no_policy;
1da177e4 1448
5a01f2e8 1449 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1450 goto fail;
1da177e4
LT
1451
1452 ret = __cpufreq_driver_target(policy, target_freq, relation);
1453
5a01f2e8 1454 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1455
f1829e4a 1456fail:
1da177e4 1457 cpufreq_cpu_put(policy);
f1829e4a 1458no_policy:
1da177e4
LT
1459 return ret;
1460}
1461EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1462
bf0b90e3 1463int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1464{
1465 int ret = 0;
1466
1467 policy = cpufreq_cpu_get(policy->cpu);
1468 if (!policy)
1469 return -EINVAL;
1470
bf0b90e3 1471 if (cpu_online(cpu) && cpufreq_driver->getavg)
1472 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1473
dfde5d62
VP
1474 cpufreq_cpu_put(policy);
1475 return ret;
1476}
5a01f2e8 1477EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1478
153d7f3f 1479/*
153d7f3f
AV
1480 * when "event" is CPUFREQ_GOV_LIMITS
1481 */
1da177e4 1482
e08f5f5b
GS
1483static int __cpufreq_governor(struct cpufreq_policy *policy,
1484 unsigned int event)
1da177e4 1485{
cc993cab 1486 int ret;
6afde10c
TR
1487
1488 /* Only must be defined when default governor is known to have latency
1489 restrictions, like e.g. conservative or ondemand.
1490 That this is the case is already ensured in Kconfig
1491 */
1492#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1493 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1494#else
1495 struct cpufreq_governor *gov = NULL;
1496#endif
1c256245
TR
1497
1498 if (policy->governor->max_transition_latency &&
1499 policy->cpuinfo.transition_latency >
1500 policy->governor->max_transition_latency) {
6afde10c
TR
1501 if (!gov)
1502 return -EINVAL;
1503 else {
1504 printk(KERN_WARNING "%s governor failed, too long"
1505 " transition latency of HW, fallback"
1506 " to %s governor\n",
1507 policy->governor->name,
1508 gov->name);
1509 policy->governor = gov;
1510 }
1c256245 1511 }
1da177e4
LT
1512
1513 if (!try_module_get(policy->governor->owner))
1514 return -EINVAL;
1515
2d06d8c4 1516 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1517 policy->cpu, event);
1da177e4
LT
1518 ret = policy->governor->governor(policy, event);
1519
e08f5f5b
GS
1520 /* we keep one module reference alive for
1521 each CPU governed by this CPU */
1da177e4
LT
1522 if ((event != CPUFREQ_GOV_START) || ret)
1523 module_put(policy->governor->owner);
1524 if ((event == CPUFREQ_GOV_STOP) && !ret)
1525 module_put(policy->governor->owner);
1526
1527 return ret;
1528}
1529
1530
1da177e4
LT
1531int cpufreq_register_governor(struct cpufreq_governor *governor)
1532{
3bcb09a3 1533 int err;
1da177e4
LT
1534
1535 if (!governor)
1536 return -EINVAL;
1537
3fc54d37 1538 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1539
3bcb09a3
JF
1540 err = -EBUSY;
1541 if (__find_governor(governor->name) == NULL) {
1542 err = 0;
1543 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1544 }
1da177e4 1545
32ee8c3e 1546 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1547 return err;
1da177e4
LT
1548}
1549EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1550
1551
1552void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1553{
90e41bac
PB
1554#ifdef CONFIG_HOTPLUG_CPU
1555 int cpu;
1556#endif
1557
1da177e4
LT
1558 if (!governor)
1559 return;
1560
90e41bac
PB
1561#ifdef CONFIG_HOTPLUG_CPU
1562 for_each_present_cpu(cpu) {
1563 if (cpu_online(cpu))
1564 continue;
1565 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1566 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1567 }
1568#endif
1569
3fc54d37 1570 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1571 list_del(&governor->governor_list);
3fc54d37 1572 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1573 return;
1574}
1575EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1576
1577
1578
1579/*********************************************************************
1580 * POLICY INTERFACE *
1581 *********************************************************************/
1582
1583/**
1584 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1585 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1586 * is written
1da177e4
LT
1587 *
1588 * Reads the current cpufreq policy.
1589 */
1590int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1591{
1592 struct cpufreq_policy *cpu_policy;
1593 if (!policy)
1594 return -EINVAL;
1595
1596 cpu_policy = cpufreq_cpu_get(cpu);
1597 if (!cpu_policy)
1598 return -EINVAL;
1599
1da177e4 1600 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1601
1602 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1603 return 0;
1604}
1605EXPORT_SYMBOL(cpufreq_get_policy);
1606
1607
153d7f3f 1608/*
e08f5f5b
GS
1609 * data : current policy.
1610 * policy : policy to be set.
153d7f3f 1611 */
e08f5f5b
GS
1612static int __cpufreq_set_policy(struct cpufreq_policy *data,
1613 struct cpufreq_policy *policy)
1da177e4
LT
1614{
1615 int ret = 0;
1616
2d06d8c4 1617 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1618 policy->min, policy->max);
1619
e08f5f5b
GS
1620 memcpy(&policy->cpuinfo, &data->cpuinfo,
1621 sizeof(struct cpufreq_cpuinfo));
1da177e4 1622
53391fa2 1623 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1624 ret = -EINVAL;
1625 goto error_out;
1626 }
1627
1da177e4
LT
1628 /* verify the cpu speed can be set within this limit */
1629 ret = cpufreq_driver->verify(policy);
1630 if (ret)
1631 goto error_out;
1632
1da177e4 1633 /* adjust if necessary - all reasons */
e041c683
AS
1634 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1635 CPUFREQ_ADJUST, policy);
1da177e4
LT
1636
1637 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1638 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1639 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1640
1641 /* verify the cpu speed can be set within this limit,
1642 which might be different to the first one */
1643 ret = cpufreq_driver->verify(policy);
e041c683 1644 if (ret)
1da177e4 1645 goto error_out;
1da177e4
LT
1646
1647 /* notification of the new policy */
e041c683
AS
1648 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1649 CPUFREQ_NOTIFY, policy);
1da177e4 1650
7d5e350f
DJ
1651 data->min = policy->min;
1652 data->max = policy->max;
1da177e4 1653
2d06d8c4 1654 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1655 data->min, data->max);
1da177e4
LT
1656
1657 if (cpufreq_driver->setpolicy) {
1658 data->policy = policy->policy;
2d06d8c4 1659 pr_debug("setting range\n");
1da177e4
LT
1660 ret = cpufreq_driver->setpolicy(policy);
1661 } else {
1662 if (policy->governor != data->governor) {
1663 /* save old, working values */
1664 struct cpufreq_governor *old_gov = data->governor;
1665
2d06d8c4 1666 pr_debug("governor switch\n");
1da177e4
LT
1667
1668 /* end old governor */
ffe6275f 1669 if (data->governor)
1da177e4
LT
1670 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1671
1672 /* start new governor */
1673 data->governor = policy->governor;
1674 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1675 /* new governor failed, so re-start old one */
2d06d8c4 1676 pr_debug("starting governor %s failed\n",
e08f5f5b 1677 data->governor->name);
1da177e4
LT
1678 if (old_gov) {
1679 data->governor = old_gov;
e08f5f5b
GS
1680 __cpufreq_governor(data,
1681 CPUFREQ_GOV_START);
1da177e4
LT
1682 }
1683 ret = -EINVAL;
1684 goto error_out;
1685 }
1686 /* might be a policy change, too, so fall through */
1687 }
2d06d8c4 1688 pr_debug("governor: change or update limits\n");
1da177e4
LT
1689 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1690 }
1691
7d5e350f 1692error_out:
1da177e4
LT
1693 return ret;
1694}
1695
1da177e4
LT
1696/**
1697 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1698 * @cpu: CPU which shall be re-evaluated
1699 *
25985edc 1700 * Useful for policy notifiers which have different necessities
1da177e4
LT
1701 * at different times.
1702 */
1703int cpufreq_update_policy(unsigned int cpu)
1704{
1705 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1706 struct cpufreq_policy policy;
f1829e4a 1707 int ret;
1da177e4 1708
f1829e4a
JL
1709 if (!data) {
1710 ret = -ENODEV;
1711 goto no_policy;
1712 }
1da177e4 1713
f1829e4a
JL
1714 if (unlikely(lock_policy_rwsem_write(cpu))) {
1715 ret = -EINVAL;
1716 goto fail;
1717 }
1da177e4 1718
2d06d8c4 1719 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1720 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1721 policy.min = data->user_policy.min;
1722 policy.max = data->user_policy.max;
1723 policy.policy = data->user_policy.policy;
1724 policy.governor = data->user_policy.governor;
1725
0961dd0d
TR
1726 /* BIOS might change freq behind our back
1727 -> ask driver for current freq and notify governors about a change */
1728 if (cpufreq_driver->get) {
1729 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1730 if (!data->cur) {
2d06d8c4 1731 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1732 data->cur = policy.cur;
1733 } else {
1734 if (data->cur != policy.cur)
e08f5f5b
GS
1735 cpufreq_out_of_sync(cpu, data->cur,
1736 policy.cur);
a85f7bd3 1737 }
0961dd0d
TR
1738 }
1739
1da177e4
LT
1740 ret = __cpufreq_set_policy(data, &policy);
1741
5a01f2e8
VP
1742 unlock_policy_rwsem_write(cpu);
1743
f1829e4a 1744fail:
1da177e4 1745 cpufreq_cpu_put(data);
f1829e4a 1746no_policy:
1da177e4
LT
1747 return ret;
1748}
1749EXPORT_SYMBOL(cpufreq_update_policy);
1750
dd184a01 1751static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1752 unsigned long action, void *hcpu)
1753{
1754 unsigned int cpu = (unsigned long)hcpu;
c32b6b8e
AR
1755 struct sys_device *sys_dev;
1756
1757 sys_dev = get_cpu_sysdev(cpu);
c32b6b8e
AR
1758 if (sys_dev) {
1759 switch (action) {
1760 case CPU_ONLINE:
8bb78442 1761 case CPU_ONLINE_FROZEN:
c32b6b8e
AR
1762 cpufreq_add_dev(sys_dev);
1763 break;
1764 case CPU_DOWN_PREPARE:
8bb78442 1765 case CPU_DOWN_PREPARE_FROZEN:
5a01f2e8
VP
1766 if (unlikely(lock_policy_rwsem_write(cpu)))
1767 BUG();
1768
5a01f2e8 1769 __cpufreq_remove_dev(sys_dev);
c32b6b8e 1770 break;
5a01f2e8 1771 case CPU_DOWN_FAILED:
8bb78442 1772 case CPU_DOWN_FAILED_FROZEN:
5a01f2e8 1773 cpufreq_add_dev(sys_dev);
c32b6b8e
AR
1774 break;
1775 }
1776 }
1777 return NOTIFY_OK;
1778}
1779
9c36f746 1780static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1781 .notifier_call = cpufreq_cpu_callback,
1782};
1da177e4
LT
1783
1784/*********************************************************************
1785 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1786 *********************************************************************/
1787
1788/**
1789 * cpufreq_register_driver - register a CPU Frequency driver
1790 * @driver_data: A struct cpufreq_driver containing the values#
1791 * submitted by the CPU Frequency driver.
1792 *
32ee8c3e 1793 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1794 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1795 * (and isn't unregistered in the meantime).
1da177e4
LT
1796 *
1797 */
221dee28 1798int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1799{
1800 unsigned long flags;
1801 int ret;
1802
1803 if (!driver_data || !driver_data->verify || !driver_data->init ||
1804 ((!driver_data->setpolicy) && (!driver_data->target)))
1805 return -EINVAL;
1806
2d06d8c4 1807 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1808
1809 if (driver_data->setpolicy)
1810 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1811
1812 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1813 if (cpufreq_driver) {
1814 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1815 return -EBUSY;
1816 }
1817 cpufreq_driver = driver_data;
1818 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1819
7a6aedfa
MT
1820 ret = sysdev_driver_register(&cpu_sysdev_class,
1821 &cpufreq_sysdev_driver);
8f5bc2ab
JS
1822 if (ret)
1823 goto err_null_driver;
1da177e4 1824
8f5bc2ab 1825 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1826 int i;
1827 ret = -ENODEV;
1828
1829 /* check for at least one working CPU */
7a6aedfa
MT
1830 for (i = 0; i < nr_cpu_ids; i++)
1831 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1832 ret = 0;
7a6aedfa
MT
1833 break;
1834 }
1da177e4
LT
1835
1836 /* if all ->init() calls failed, unregister */
1837 if (ret) {
2d06d8c4 1838 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1839 driver_data->name);
8f5bc2ab 1840 goto err_sysdev_unreg;
1da177e4
LT
1841 }
1842 }
1843
8f5bc2ab 1844 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1845 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1846
8f5bc2ab
JS
1847 return 0;
1848err_sysdev_unreg:
1849 sysdev_driver_unregister(&cpu_sysdev_class,
1850 &cpufreq_sysdev_driver);
1851err_null_driver:
1852 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1853 cpufreq_driver = NULL;
1854 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 1855 return ret;
1da177e4
LT
1856}
1857EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1858
1859
1860/**
1861 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1862 *
32ee8c3e 1863 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1864 * the right to do so, i.e. if you have succeeded in initialising before!
1865 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1866 * currently not initialised.
1867 */
221dee28 1868int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1869{
1870 unsigned long flags;
1871
2d06d8c4 1872 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 1873 return -EINVAL;
1da177e4 1874
2d06d8c4 1875 pr_debug("unregistering driver %s\n", driver->name);
1da177e4
LT
1876
1877 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
65edc68c 1878 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1879
1880 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1881 cpufreq_driver = NULL;
1882 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1883
1884 return 0;
1885}
1886EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1887
1888static int __init cpufreq_core_init(void)
1889{
1890 int cpu;
1891
1892 for_each_possible_cpu(cpu) {
f1625066 1893 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
1894 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1895 }
8aa84ad8
TR
1896
1897 cpufreq_global_kobject = kobject_create_and_add("cpufreq",
1898 &cpu_sysdev_class.kset.kobj);
1899 BUG_ON(!cpufreq_global_kobject);
e00e56df 1900 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 1901
5a01f2e8
VP
1902 return 0;
1903}
5a01f2e8 1904core_initcall(cpufreq_core_init);
This page took 0.656201 seconds and 5 git commands to generate.