cpufreq: Make sure CPU is running on a freq from freq-table
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
1 /*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7 *
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
32
33 /**
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
37 */
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 DEFINE_MUTEX(cpufreq_governor_lock);
43 static LIST_HEAD(cpufreq_policy_list);
44
45 #ifdef CONFIG_HOTPLUG_CPU
46 /* This one keeps track of the previously set governor of a removed CPU */
47 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48 #endif
49
50 static inline bool has_target(void)
51 {
52 return cpufreq_driver->target_index || cpufreq_driver->target;
53 }
54
55 /*
56 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
57 * sections
58 */
59 static DECLARE_RWSEM(cpufreq_rwsem);
60
61 /* internal prototypes */
62 static int __cpufreq_governor(struct cpufreq_policy *policy,
63 unsigned int event);
64 static unsigned int __cpufreq_get(unsigned int cpu);
65 static void handle_update(struct work_struct *work);
66
67 /**
68 * Two notifier lists: the "policy" list is involved in the
69 * validation process for a new CPU frequency policy; the
70 * "transition" list for kernel code that needs to handle
71 * changes to devices when the CPU clock speed changes.
72 * The mutex locks both lists.
73 */
74 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
75 static struct srcu_notifier_head cpufreq_transition_notifier_list;
76
77 static bool init_cpufreq_transition_notifier_list_called;
78 static int __init init_cpufreq_transition_notifier_list(void)
79 {
80 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
81 init_cpufreq_transition_notifier_list_called = true;
82 return 0;
83 }
84 pure_initcall(init_cpufreq_transition_notifier_list);
85
86 static int off __read_mostly;
87 static int cpufreq_disabled(void)
88 {
89 return off;
90 }
91 void disable_cpufreq(void)
92 {
93 off = 1;
94 }
95 static LIST_HEAD(cpufreq_governor_list);
96 static DEFINE_MUTEX(cpufreq_governor_mutex);
97
98 bool have_governor_per_policy(void)
99 {
100 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
101 }
102 EXPORT_SYMBOL_GPL(have_governor_per_policy);
103
104 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
105 {
106 if (have_governor_per_policy())
107 return &policy->kobj;
108 else
109 return cpufreq_global_kobject;
110 }
111 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
112
113 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
114 {
115 u64 idle_time;
116 u64 cur_wall_time;
117 u64 busy_time;
118
119 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
120
121 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
122 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
123 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
127
128 idle_time = cur_wall_time - busy_time;
129 if (wall)
130 *wall = cputime_to_usecs(cur_wall_time);
131
132 return cputime_to_usecs(idle_time);
133 }
134
135 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
136 {
137 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
138
139 if (idle_time == -1ULL)
140 return get_cpu_idle_time_jiffy(cpu, wall);
141 else if (!io_busy)
142 idle_time += get_cpu_iowait_time_us(cpu, wall);
143
144 return idle_time;
145 }
146 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
147
148 /*
149 * This is a generic cpufreq init() routine which can be used by cpufreq
150 * drivers of SMP systems. It will do following:
151 * - validate & show freq table passed
152 * - set policies transition latency
153 * - policy->cpus with all possible CPUs
154 */
155 int cpufreq_generic_init(struct cpufreq_policy *policy,
156 struct cpufreq_frequency_table *table,
157 unsigned int transition_latency)
158 {
159 int ret;
160
161 ret = cpufreq_table_validate_and_show(policy, table);
162 if (ret) {
163 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
164 return ret;
165 }
166
167 policy->cpuinfo.transition_latency = transition_latency;
168
169 /*
170 * The driver only supports the SMP configuartion where all processors
171 * share the clock and voltage and clock.
172 */
173 cpumask_setall(policy->cpus);
174
175 return 0;
176 }
177 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
178
179 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
180 {
181 struct cpufreq_policy *policy = NULL;
182 unsigned long flags;
183
184 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
185 return NULL;
186
187 if (!down_read_trylock(&cpufreq_rwsem))
188 return NULL;
189
190 /* get the cpufreq driver */
191 read_lock_irqsave(&cpufreq_driver_lock, flags);
192
193 if (cpufreq_driver) {
194 /* get the CPU */
195 policy = per_cpu(cpufreq_cpu_data, cpu);
196 if (policy)
197 kobject_get(&policy->kobj);
198 }
199
200 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
201
202 if (!policy)
203 up_read(&cpufreq_rwsem);
204
205 return policy;
206 }
207 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
208
209 void cpufreq_cpu_put(struct cpufreq_policy *policy)
210 {
211 if (cpufreq_disabled())
212 return;
213
214 kobject_put(&policy->kobj);
215 up_read(&cpufreq_rwsem);
216 }
217 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
218
219 /*********************************************************************
220 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
221 *********************************************************************/
222
223 /**
224 * adjust_jiffies - adjust the system "loops_per_jiffy"
225 *
226 * This function alters the system "loops_per_jiffy" for the clock
227 * speed change. Note that loops_per_jiffy cannot be updated on SMP
228 * systems as each CPU might be scaled differently. So, use the arch
229 * per-CPU loops_per_jiffy value wherever possible.
230 */
231 #ifndef CONFIG_SMP
232 static unsigned long l_p_j_ref;
233 static unsigned int l_p_j_ref_freq;
234
235 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
236 {
237 if (ci->flags & CPUFREQ_CONST_LOOPS)
238 return;
239
240 if (!l_p_j_ref_freq) {
241 l_p_j_ref = loops_per_jiffy;
242 l_p_j_ref_freq = ci->old;
243 pr_debug("saving %lu as reference value for loops_per_jiffy; "
244 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
245 }
246 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
247 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
248 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
249 ci->new);
250 pr_debug("scaling loops_per_jiffy to %lu "
251 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
252 }
253 }
254 #else
255 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
256 {
257 return;
258 }
259 #endif
260
261 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
262 struct cpufreq_freqs *freqs, unsigned int state)
263 {
264 BUG_ON(irqs_disabled());
265
266 if (cpufreq_disabled())
267 return;
268
269 freqs->flags = cpufreq_driver->flags;
270 pr_debug("notification %u of frequency transition to %u kHz\n",
271 state, freqs->new);
272
273 switch (state) {
274
275 case CPUFREQ_PRECHANGE:
276 /* detect if the driver reported a value as "old frequency"
277 * which is not equal to what the cpufreq core thinks is
278 * "old frequency".
279 */
280 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
281 if ((policy) && (policy->cpu == freqs->cpu) &&
282 (policy->cur) && (policy->cur != freqs->old)) {
283 pr_debug("Warning: CPU frequency is"
284 " %u, cpufreq assumed %u kHz.\n",
285 freqs->old, policy->cur);
286 freqs->old = policy->cur;
287 }
288 }
289 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
290 CPUFREQ_PRECHANGE, freqs);
291 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
292 break;
293
294 case CPUFREQ_POSTCHANGE:
295 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
296 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
297 (unsigned long)freqs->cpu);
298 trace_cpu_frequency(freqs->new, freqs->cpu);
299 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
300 CPUFREQ_POSTCHANGE, freqs);
301 if (likely(policy) && likely(policy->cpu == freqs->cpu))
302 policy->cur = freqs->new;
303 break;
304 }
305 }
306
307 /**
308 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
309 * on frequency transition.
310 *
311 * This function calls the transition notifiers and the "adjust_jiffies"
312 * function. It is called twice on all CPU frequency changes that have
313 * external effects.
314 */
315 void cpufreq_notify_transition(struct cpufreq_policy *policy,
316 struct cpufreq_freqs *freqs, unsigned int state)
317 {
318 for_each_cpu(freqs->cpu, policy->cpus)
319 __cpufreq_notify_transition(policy, freqs, state);
320 }
321 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
322
323 /* Do post notifications when there are chances that transition has failed */
324 void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
325 struct cpufreq_freqs *freqs, int transition_failed)
326 {
327 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
328 if (!transition_failed)
329 return;
330
331 swap(freqs->old, freqs->new);
332 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
333 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
334 }
335 EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
336
337
338 /*********************************************************************
339 * SYSFS INTERFACE *
340 *********************************************************************/
341
342 static struct cpufreq_governor *__find_governor(const char *str_governor)
343 {
344 struct cpufreq_governor *t;
345
346 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
347 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
348 return t;
349
350 return NULL;
351 }
352
353 /**
354 * cpufreq_parse_governor - parse a governor string
355 */
356 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
357 struct cpufreq_governor **governor)
358 {
359 int err = -EINVAL;
360
361 if (!cpufreq_driver)
362 goto out;
363
364 if (cpufreq_driver->setpolicy) {
365 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
366 *policy = CPUFREQ_POLICY_PERFORMANCE;
367 err = 0;
368 } else if (!strnicmp(str_governor, "powersave",
369 CPUFREQ_NAME_LEN)) {
370 *policy = CPUFREQ_POLICY_POWERSAVE;
371 err = 0;
372 }
373 } else if (has_target()) {
374 struct cpufreq_governor *t;
375
376 mutex_lock(&cpufreq_governor_mutex);
377
378 t = __find_governor(str_governor);
379
380 if (t == NULL) {
381 int ret;
382
383 mutex_unlock(&cpufreq_governor_mutex);
384 ret = request_module("cpufreq_%s", str_governor);
385 mutex_lock(&cpufreq_governor_mutex);
386
387 if (ret == 0)
388 t = __find_governor(str_governor);
389 }
390
391 if (t != NULL) {
392 *governor = t;
393 err = 0;
394 }
395
396 mutex_unlock(&cpufreq_governor_mutex);
397 }
398 out:
399 return err;
400 }
401
402 /**
403 * cpufreq_per_cpu_attr_read() / show_##file_name() -
404 * print out cpufreq information
405 *
406 * Write out information from cpufreq_driver->policy[cpu]; object must be
407 * "unsigned int".
408 */
409
410 #define show_one(file_name, object) \
411 static ssize_t show_##file_name \
412 (struct cpufreq_policy *policy, char *buf) \
413 { \
414 return sprintf(buf, "%u\n", policy->object); \
415 }
416
417 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
418 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
419 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
420 show_one(scaling_min_freq, min);
421 show_one(scaling_max_freq, max);
422 show_one(scaling_cur_freq, cur);
423
424 static int cpufreq_set_policy(struct cpufreq_policy *policy,
425 struct cpufreq_policy *new_policy);
426
427 /**
428 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
429 */
430 #define store_one(file_name, object) \
431 static ssize_t store_##file_name \
432 (struct cpufreq_policy *policy, const char *buf, size_t count) \
433 { \
434 int ret; \
435 struct cpufreq_policy new_policy; \
436 \
437 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
438 if (ret) \
439 return -EINVAL; \
440 \
441 ret = sscanf(buf, "%u", &new_policy.object); \
442 if (ret != 1) \
443 return -EINVAL; \
444 \
445 ret = cpufreq_set_policy(policy, &new_policy); \
446 policy->user_policy.object = policy->object; \
447 \
448 return ret ? ret : count; \
449 }
450
451 store_one(scaling_min_freq, min);
452 store_one(scaling_max_freq, max);
453
454 /**
455 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
456 */
457 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
458 char *buf)
459 {
460 unsigned int cur_freq = __cpufreq_get(policy->cpu);
461 if (!cur_freq)
462 return sprintf(buf, "<unknown>");
463 return sprintf(buf, "%u\n", cur_freq);
464 }
465
466 /**
467 * show_scaling_governor - show the current policy for the specified CPU
468 */
469 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
470 {
471 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
472 return sprintf(buf, "powersave\n");
473 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
474 return sprintf(buf, "performance\n");
475 else if (policy->governor)
476 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
477 policy->governor->name);
478 return -EINVAL;
479 }
480
481 /**
482 * store_scaling_governor - store policy for the specified CPU
483 */
484 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
485 const char *buf, size_t count)
486 {
487 int ret;
488 char str_governor[16];
489 struct cpufreq_policy new_policy;
490
491 ret = cpufreq_get_policy(&new_policy, policy->cpu);
492 if (ret)
493 return ret;
494
495 ret = sscanf(buf, "%15s", str_governor);
496 if (ret != 1)
497 return -EINVAL;
498
499 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
500 &new_policy.governor))
501 return -EINVAL;
502
503 ret = cpufreq_set_policy(policy, &new_policy);
504
505 policy->user_policy.policy = policy->policy;
506 policy->user_policy.governor = policy->governor;
507
508 if (ret)
509 return ret;
510 else
511 return count;
512 }
513
514 /**
515 * show_scaling_driver - show the cpufreq driver currently loaded
516 */
517 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
518 {
519 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
520 }
521
522 /**
523 * show_scaling_available_governors - show the available CPUfreq governors
524 */
525 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
526 char *buf)
527 {
528 ssize_t i = 0;
529 struct cpufreq_governor *t;
530
531 if (!has_target()) {
532 i += sprintf(buf, "performance powersave");
533 goto out;
534 }
535
536 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
537 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
538 - (CPUFREQ_NAME_LEN + 2)))
539 goto out;
540 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
541 }
542 out:
543 i += sprintf(&buf[i], "\n");
544 return i;
545 }
546
547 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
548 {
549 ssize_t i = 0;
550 unsigned int cpu;
551
552 for_each_cpu(cpu, mask) {
553 if (i)
554 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
555 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
556 if (i >= (PAGE_SIZE - 5))
557 break;
558 }
559 i += sprintf(&buf[i], "\n");
560 return i;
561 }
562 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
563
564 /**
565 * show_related_cpus - show the CPUs affected by each transition even if
566 * hw coordination is in use
567 */
568 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
569 {
570 return cpufreq_show_cpus(policy->related_cpus, buf);
571 }
572
573 /**
574 * show_affected_cpus - show the CPUs affected by each transition
575 */
576 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
577 {
578 return cpufreq_show_cpus(policy->cpus, buf);
579 }
580
581 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
582 const char *buf, size_t count)
583 {
584 unsigned int freq = 0;
585 unsigned int ret;
586
587 if (!policy->governor || !policy->governor->store_setspeed)
588 return -EINVAL;
589
590 ret = sscanf(buf, "%u", &freq);
591 if (ret != 1)
592 return -EINVAL;
593
594 policy->governor->store_setspeed(policy, freq);
595
596 return count;
597 }
598
599 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
600 {
601 if (!policy->governor || !policy->governor->show_setspeed)
602 return sprintf(buf, "<unsupported>\n");
603
604 return policy->governor->show_setspeed(policy, buf);
605 }
606
607 /**
608 * show_bios_limit - show the current cpufreq HW/BIOS limitation
609 */
610 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
611 {
612 unsigned int limit;
613 int ret;
614 if (cpufreq_driver->bios_limit) {
615 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
616 if (!ret)
617 return sprintf(buf, "%u\n", limit);
618 }
619 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
620 }
621
622 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
623 cpufreq_freq_attr_ro(cpuinfo_min_freq);
624 cpufreq_freq_attr_ro(cpuinfo_max_freq);
625 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
626 cpufreq_freq_attr_ro(scaling_available_governors);
627 cpufreq_freq_attr_ro(scaling_driver);
628 cpufreq_freq_attr_ro(scaling_cur_freq);
629 cpufreq_freq_attr_ro(bios_limit);
630 cpufreq_freq_attr_ro(related_cpus);
631 cpufreq_freq_attr_ro(affected_cpus);
632 cpufreq_freq_attr_rw(scaling_min_freq);
633 cpufreq_freq_attr_rw(scaling_max_freq);
634 cpufreq_freq_attr_rw(scaling_governor);
635 cpufreq_freq_attr_rw(scaling_setspeed);
636
637 static struct attribute *default_attrs[] = {
638 &cpuinfo_min_freq.attr,
639 &cpuinfo_max_freq.attr,
640 &cpuinfo_transition_latency.attr,
641 &scaling_min_freq.attr,
642 &scaling_max_freq.attr,
643 &affected_cpus.attr,
644 &related_cpus.attr,
645 &scaling_governor.attr,
646 &scaling_driver.attr,
647 &scaling_available_governors.attr,
648 &scaling_setspeed.attr,
649 NULL
650 };
651
652 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
653 #define to_attr(a) container_of(a, struct freq_attr, attr)
654
655 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
656 {
657 struct cpufreq_policy *policy = to_policy(kobj);
658 struct freq_attr *fattr = to_attr(attr);
659 ssize_t ret;
660
661 if (!down_read_trylock(&cpufreq_rwsem))
662 return -EINVAL;
663
664 down_read(&policy->rwsem);
665
666 if (fattr->show)
667 ret = fattr->show(policy, buf);
668 else
669 ret = -EIO;
670
671 up_read(&policy->rwsem);
672 up_read(&cpufreq_rwsem);
673
674 return ret;
675 }
676
677 static ssize_t store(struct kobject *kobj, struct attribute *attr,
678 const char *buf, size_t count)
679 {
680 struct cpufreq_policy *policy = to_policy(kobj);
681 struct freq_attr *fattr = to_attr(attr);
682 ssize_t ret = -EINVAL;
683
684 get_online_cpus();
685
686 if (!cpu_online(policy->cpu))
687 goto unlock;
688
689 if (!down_read_trylock(&cpufreq_rwsem))
690 goto unlock;
691
692 down_write(&policy->rwsem);
693
694 if (fattr->store)
695 ret = fattr->store(policy, buf, count);
696 else
697 ret = -EIO;
698
699 up_write(&policy->rwsem);
700
701 up_read(&cpufreq_rwsem);
702 unlock:
703 put_online_cpus();
704
705 return ret;
706 }
707
708 static void cpufreq_sysfs_release(struct kobject *kobj)
709 {
710 struct cpufreq_policy *policy = to_policy(kobj);
711 pr_debug("last reference is dropped\n");
712 complete(&policy->kobj_unregister);
713 }
714
715 static const struct sysfs_ops sysfs_ops = {
716 .show = show,
717 .store = store,
718 };
719
720 static struct kobj_type ktype_cpufreq = {
721 .sysfs_ops = &sysfs_ops,
722 .default_attrs = default_attrs,
723 .release = cpufreq_sysfs_release,
724 };
725
726 struct kobject *cpufreq_global_kobject;
727 EXPORT_SYMBOL(cpufreq_global_kobject);
728
729 static int cpufreq_global_kobject_usage;
730
731 int cpufreq_get_global_kobject(void)
732 {
733 if (!cpufreq_global_kobject_usage++)
734 return kobject_add(cpufreq_global_kobject,
735 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
736
737 return 0;
738 }
739 EXPORT_SYMBOL(cpufreq_get_global_kobject);
740
741 void cpufreq_put_global_kobject(void)
742 {
743 if (!--cpufreq_global_kobject_usage)
744 kobject_del(cpufreq_global_kobject);
745 }
746 EXPORT_SYMBOL(cpufreq_put_global_kobject);
747
748 int cpufreq_sysfs_create_file(const struct attribute *attr)
749 {
750 int ret = cpufreq_get_global_kobject();
751
752 if (!ret) {
753 ret = sysfs_create_file(cpufreq_global_kobject, attr);
754 if (ret)
755 cpufreq_put_global_kobject();
756 }
757
758 return ret;
759 }
760 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
761
762 void cpufreq_sysfs_remove_file(const struct attribute *attr)
763 {
764 sysfs_remove_file(cpufreq_global_kobject, attr);
765 cpufreq_put_global_kobject();
766 }
767 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
768
769 /* symlink affected CPUs */
770 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
771 {
772 unsigned int j;
773 int ret = 0;
774
775 for_each_cpu(j, policy->cpus) {
776 struct device *cpu_dev;
777
778 if (j == policy->cpu)
779 continue;
780
781 pr_debug("Adding link for CPU: %u\n", j);
782 cpu_dev = get_cpu_device(j);
783 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
784 "cpufreq");
785 if (ret)
786 break;
787 }
788 return ret;
789 }
790
791 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
792 struct device *dev)
793 {
794 struct freq_attr **drv_attr;
795 int ret = 0;
796
797 /* prepare interface data */
798 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
799 &dev->kobj, "cpufreq");
800 if (ret)
801 return ret;
802
803 /* set up files for this cpu device */
804 drv_attr = cpufreq_driver->attr;
805 while ((drv_attr) && (*drv_attr)) {
806 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
807 if (ret)
808 goto err_out_kobj_put;
809 drv_attr++;
810 }
811 if (cpufreq_driver->get) {
812 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
813 if (ret)
814 goto err_out_kobj_put;
815 }
816 if (has_target()) {
817 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
818 if (ret)
819 goto err_out_kobj_put;
820 }
821 if (cpufreq_driver->bios_limit) {
822 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
823 if (ret)
824 goto err_out_kobj_put;
825 }
826
827 ret = cpufreq_add_dev_symlink(policy);
828 if (ret)
829 goto err_out_kobj_put;
830
831 return ret;
832
833 err_out_kobj_put:
834 kobject_put(&policy->kobj);
835 wait_for_completion(&policy->kobj_unregister);
836 return ret;
837 }
838
839 static void cpufreq_init_policy(struct cpufreq_policy *policy)
840 {
841 struct cpufreq_policy new_policy;
842 int ret = 0;
843
844 memcpy(&new_policy, policy, sizeof(*policy));
845
846 /* Use the default policy if its valid. */
847 if (cpufreq_driver->setpolicy)
848 cpufreq_parse_governor(policy->governor->name,
849 &new_policy.policy, NULL);
850
851 /* assure that the starting sequence is run in cpufreq_set_policy */
852 policy->governor = NULL;
853
854 /* set default policy */
855 ret = cpufreq_set_policy(policy, &new_policy);
856 if (ret) {
857 pr_debug("setting policy failed\n");
858 if (cpufreq_driver->exit)
859 cpufreq_driver->exit(policy);
860 }
861 }
862
863 #ifdef CONFIG_HOTPLUG_CPU
864 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
865 unsigned int cpu, struct device *dev)
866 {
867 int ret = 0;
868 unsigned long flags;
869
870 if (has_target()) {
871 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
872 if (ret) {
873 pr_err("%s: Failed to stop governor\n", __func__);
874 return ret;
875 }
876 }
877
878 down_write(&policy->rwsem);
879
880 write_lock_irqsave(&cpufreq_driver_lock, flags);
881
882 cpumask_set_cpu(cpu, policy->cpus);
883 per_cpu(cpufreq_cpu_data, cpu) = policy;
884 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
885
886 up_write(&policy->rwsem);
887
888 if (has_target()) {
889 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
890 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
891 pr_err("%s: Failed to start governor\n", __func__);
892 return ret;
893 }
894 }
895
896 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
897 }
898 #endif
899
900 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
901 {
902 struct cpufreq_policy *policy;
903 unsigned long flags;
904
905 read_lock_irqsave(&cpufreq_driver_lock, flags);
906
907 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
908
909 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
910
911 return policy;
912 }
913
914 static struct cpufreq_policy *cpufreq_policy_alloc(void)
915 {
916 struct cpufreq_policy *policy;
917
918 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
919 if (!policy)
920 return NULL;
921
922 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
923 goto err_free_policy;
924
925 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
926 goto err_free_cpumask;
927
928 INIT_LIST_HEAD(&policy->policy_list);
929 init_rwsem(&policy->rwsem);
930
931 return policy;
932
933 err_free_cpumask:
934 free_cpumask_var(policy->cpus);
935 err_free_policy:
936 kfree(policy);
937
938 return NULL;
939 }
940
941 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
942 {
943 struct kobject *kobj;
944 struct completion *cmp;
945
946 down_read(&policy->rwsem);
947 kobj = &policy->kobj;
948 cmp = &policy->kobj_unregister;
949 up_read(&policy->rwsem);
950 kobject_put(kobj);
951
952 /*
953 * We need to make sure that the underlying kobj is
954 * actually not referenced anymore by anybody before we
955 * proceed with unloading.
956 */
957 pr_debug("waiting for dropping of refcount\n");
958 wait_for_completion(cmp);
959 pr_debug("wait complete\n");
960 }
961
962 static void cpufreq_policy_free(struct cpufreq_policy *policy)
963 {
964 free_cpumask_var(policy->related_cpus);
965 free_cpumask_var(policy->cpus);
966 kfree(policy);
967 }
968
969 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
970 {
971 if (WARN_ON(cpu == policy->cpu))
972 return;
973
974 down_write(&policy->rwsem);
975
976 policy->last_cpu = policy->cpu;
977 policy->cpu = cpu;
978
979 up_write(&policy->rwsem);
980
981 cpufreq_frequency_table_update_policy_cpu(policy);
982 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
983 CPUFREQ_UPDATE_POLICY_CPU, policy);
984 }
985
986 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
987 bool frozen)
988 {
989 unsigned int j, cpu = dev->id;
990 int ret = -ENOMEM;
991 struct cpufreq_policy *policy;
992 unsigned long flags;
993 #ifdef CONFIG_HOTPLUG_CPU
994 struct cpufreq_policy *tpolicy;
995 struct cpufreq_governor *gov;
996 #endif
997
998 if (cpu_is_offline(cpu))
999 return 0;
1000
1001 pr_debug("adding CPU %u\n", cpu);
1002
1003 #ifdef CONFIG_SMP
1004 /* check whether a different CPU already registered this
1005 * CPU because it is in the same boat. */
1006 policy = cpufreq_cpu_get(cpu);
1007 if (unlikely(policy)) {
1008 cpufreq_cpu_put(policy);
1009 return 0;
1010 }
1011 #endif
1012
1013 if (!down_read_trylock(&cpufreq_rwsem))
1014 return 0;
1015
1016 #ifdef CONFIG_HOTPLUG_CPU
1017 /* Check if this cpu was hot-unplugged earlier and has siblings */
1018 read_lock_irqsave(&cpufreq_driver_lock, flags);
1019 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1020 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1021 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1022 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
1023 up_read(&cpufreq_rwsem);
1024 return ret;
1025 }
1026 }
1027 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1028 #endif
1029
1030 /*
1031 * Restore the saved policy when doing light-weight init and fall back
1032 * to the full init if that fails.
1033 */
1034 policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
1035 if (!policy) {
1036 frozen = false;
1037 policy = cpufreq_policy_alloc();
1038 if (!policy)
1039 goto nomem_out;
1040 }
1041
1042 /*
1043 * In the resume path, since we restore a saved policy, the assignment
1044 * to policy->cpu is like an update of the existing policy, rather than
1045 * the creation of a brand new one. So we need to perform this update
1046 * by invoking update_policy_cpu().
1047 */
1048 if (frozen && cpu != policy->cpu)
1049 update_policy_cpu(policy, cpu);
1050 else
1051 policy->cpu = cpu;
1052
1053 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1054 cpumask_copy(policy->cpus, cpumask_of(cpu));
1055
1056 init_completion(&policy->kobj_unregister);
1057 INIT_WORK(&policy->update, handle_update);
1058
1059 /* call driver. From then on the cpufreq must be able
1060 * to accept all calls to ->verify and ->setpolicy for this CPU
1061 */
1062 ret = cpufreq_driver->init(policy);
1063 if (ret) {
1064 pr_debug("initialization failed\n");
1065 goto err_set_policy_cpu;
1066 }
1067
1068 if (cpufreq_driver->get) {
1069 policy->cur = cpufreq_driver->get(policy->cpu);
1070 if (!policy->cur) {
1071 pr_err("%s: ->get() failed\n", __func__);
1072 goto err_get_freq;
1073 }
1074 }
1075
1076 /*
1077 * Sometimes boot loaders set CPU frequency to a value outside of
1078 * frequency table present with cpufreq core. In such cases CPU might be
1079 * unstable if it has to run on that frequency for long duration of time
1080 * and so its better to set it to a frequency which is specified in
1081 * freq-table. This also makes cpufreq stats inconsistent as
1082 * cpufreq-stats would fail to register because current frequency of CPU
1083 * isn't found in freq-table.
1084 *
1085 * Because we don't want this change to effect boot process badly, we go
1086 * for the next freq which is >= policy->cur ('cur' must be set by now,
1087 * otherwise we will end up setting freq to lowest of the table as 'cur'
1088 * is initialized to zero).
1089 *
1090 * We are passing target-freq as "policy->cur - 1" otherwise
1091 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1092 * equal to target-freq.
1093 */
1094 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1095 && has_target()) {
1096 /* Are we running at unknown frequency ? */
1097 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1098 if (ret == -EINVAL) {
1099 /* Warn user and fix it */
1100 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1101 __func__, policy->cpu, policy->cur);
1102 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1103 CPUFREQ_RELATION_L);
1104
1105 /*
1106 * Reaching here after boot in a few seconds may not
1107 * mean that system will remain stable at "unknown"
1108 * frequency for longer duration. Hence, a BUG_ON().
1109 */
1110 BUG_ON(ret);
1111 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1112 __func__, policy->cpu, policy->cur);
1113 }
1114 }
1115
1116 /* related cpus should atleast have policy->cpus */
1117 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1118
1119 /*
1120 * affected cpus must always be the one, which are online. We aren't
1121 * managing offline cpus here.
1122 */
1123 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1124
1125 if (!frozen) {
1126 policy->user_policy.min = policy->min;
1127 policy->user_policy.max = policy->max;
1128 }
1129
1130 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1131 CPUFREQ_START, policy);
1132
1133 #ifdef CONFIG_HOTPLUG_CPU
1134 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1135 if (gov) {
1136 policy->governor = gov;
1137 pr_debug("Restoring governor %s for cpu %d\n",
1138 policy->governor->name, cpu);
1139 }
1140 #endif
1141
1142 write_lock_irqsave(&cpufreq_driver_lock, flags);
1143 for_each_cpu(j, policy->cpus)
1144 per_cpu(cpufreq_cpu_data, j) = policy;
1145 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1146
1147 if (!frozen) {
1148 ret = cpufreq_add_dev_interface(policy, dev);
1149 if (ret)
1150 goto err_out_unregister;
1151 }
1152
1153 write_lock_irqsave(&cpufreq_driver_lock, flags);
1154 list_add(&policy->policy_list, &cpufreq_policy_list);
1155 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1156
1157 cpufreq_init_policy(policy);
1158
1159 if (!frozen) {
1160 policy->user_policy.policy = policy->policy;
1161 policy->user_policy.governor = policy->governor;
1162 }
1163
1164 kobject_uevent(&policy->kobj, KOBJ_ADD);
1165 up_read(&cpufreq_rwsem);
1166
1167 pr_debug("initialization complete\n");
1168
1169 return 0;
1170
1171 err_out_unregister:
1172 write_lock_irqsave(&cpufreq_driver_lock, flags);
1173 for_each_cpu(j, policy->cpus)
1174 per_cpu(cpufreq_cpu_data, j) = NULL;
1175 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1176
1177 err_get_freq:
1178 if (cpufreq_driver->exit)
1179 cpufreq_driver->exit(policy);
1180 err_set_policy_cpu:
1181 if (frozen) {
1182 /* Do not leave stale fallback data behind. */
1183 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1184 cpufreq_policy_put_kobj(policy);
1185 }
1186 cpufreq_policy_free(policy);
1187
1188 nomem_out:
1189 up_read(&cpufreq_rwsem);
1190
1191 return ret;
1192 }
1193
1194 /**
1195 * cpufreq_add_dev - add a CPU device
1196 *
1197 * Adds the cpufreq interface for a CPU device.
1198 *
1199 * The Oracle says: try running cpufreq registration/unregistration concurrently
1200 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1201 * mess up, but more thorough testing is needed. - Mathieu
1202 */
1203 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1204 {
1205 return __cpufreq_add_dev(dev, sif, false);
1206 }
1207
1208 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1209 unsigned int old_cpu)
1210 {
1211 struct device *cpu_dev;
1212 int ret;
1213
1214 /* first sibling now owns the new sysfs dir */
1215 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1216
1217 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1218 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1219 if (ret) {
1220 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1221
1222 down_write(&policy->rwsem);
1223 cpumask_set_cpu(old_cpu, policy->cpus);
1224 up_write(&policy->rwsem);
1225
1226 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1227 "cpufreq");
1228
1229 return -EINVAL;
1230 }
1231
1232 return cpu_dev->id;
1233 }
1234
1235 static int __cpufreq_remove_dev_prepare(struct device *dev,
1236 struct subsys_interface *sif,
1237 bool frozen)
1238 {
1239 unsigned int cpu = dev->id, cpus;
1240 int new_cpu, ret;
1241 unsigned long flags;
1242 struct cpufreq_policy *policy;
1243
1244 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1245
1246 write_lock_irqsave(&cpufreq_driver_lock, flags);
1247
1248 policy = per_cpu(cpufreq_cpu_data, cpu);
1249
1250 /* Save the policy somewhere when doing a light-weight tear-down */
1251 if (frozen)
1252 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1253
1254 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1255
1256 if (!policy) {
1257 pr_debug("%s: No cpu_data found\n", __func__);
1258 return -EINVAL;
1259 }
1260
1261 if (has_target()) {
1262 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1263 if (ret) {
1264 pr_err("%s: Failed to stop governor\n", __func__);
1265 return ret;
1266 }
1267 }
1268
1269 #ifdef CONFIG_HOTPLUG_CPU
1270 if (!cpufreq_driver->setpolicy)
1271 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1272 policy->governor->name, CPUFREQ_NAME_LEN);
1273 #endif
1274
1275 down_read(&policy->rwsem);
1276 cpus = cpumask_weight(policy->cpus);
1277 up_read(&policy->rwsem);
1278
1279 if (cpu != policy->cpu) {
1280 if (!frozen)
1281 sysfs_remove_link(&dev->kobj, "cpufreq");
1282 } else if (cpus > 1) {
1283 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
1284 if (new_cpu >= 0) {
1285 update_policy_cpu(policy, new_cpu);
1286
1287 if (!frozen) {
1288 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1289 __func__, new_cpu, cpu);
1290 }
1291 }
1292 }
1293
1294 return 0;
1295 }
1296
1297 static int __cpufreq_remove_dev_finish(struct device *dev,
1298 struct subsys_interface *sif,
1299 bool frozen)
1300 {
1301 unsigned int cpu = dev->id, cpus;
1302 int ret;
1303 unsigned long flags;
1304 struct cpufreq_policy *policy;
1305
1306 read_lock_irqsave(&cpufreq_driver_lock, flags);
1307 policy = per_cpu(cpufreq_cpu_data, cpu);
1308 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1309
1310 if (!policy) {
1311 pr_debug("%s: No cpu_data found\n", __func__);
1312 return -EINVAL;
1313 }
1314
1315 down_write(&policy->rwsem);
1316 cpus = cpumask_weight(policy->cpus);
1317
1318 if (cpus > 1)
1319 cpumask_clear_cpu(cpu, policy->cpus);
1320 up_write(&policy->rwsem);
1321
1322 /* If cpu is last user of policy, free policy */
1323 if (cpus == 1) {
1324 if (has_target()) {
1325 ret = __cpufreq_governor(policy,
1326 CPUFREQ_GOV_POLICY_EXIT);
1327 if (ret) {
1328 pr_err("%s: Failed to exit governor\n",
1329 __func__);
1330 return ret;
1331 }
1332 }
1333
1334 if (!frozen)
1335 cpufreq_policy_put_kobj(policy);
1336
1337 /*
1338 * Perform the ->exit() even during light-weight tear-down,
1339 * since this is a core component, and is essential for the
1340 * subsequent light-weight ->init() to succeed.
1341 */
1342 if (cpufreq_driver->exit)
1343 cpufreq_driver->exit(policy);
1344
1345 /* Remove policy from list of active policies */
1346 write_lock_irqsave(&cpufreq_driver_lock, flags);
1347 list_del(&policy->policy_list);
1348 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1349
1350 if (!frozen)
1351 cpufreq_policy_free(policy);
1352 } else {
1353 if (has_target()) {
1354 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1355 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1356 pr_err("%s: Failed to start governor\n",
1357 __func__);
1358 return ret;
1359 }
1360 }
1361 }
1362
1363 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1364 return 0;
1365 }
1366
1367 /**
1368 * cpufreq_remove_dev - remove a CPU device
1369 *
1370 * Removes the cpufreq interface for a CPU device.
1371 */
1372 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1373 {
1374 unsigned int cpu = dev->id;
1375 int ret;
1376
1377 if (cpu_is_offline(cpu))
1378 return 0;
1379
1380 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1381
1382 if (!ret)
1383 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1384
1385 return ret;
1386 }
1387
1388 static void handle_update(struct work_struct *work)
1389 {
1390 struct cpufreq_policy *policy =
1391 container_of(work, struct cpufreq_policy, update);
1392 unsigned int cpu = policy->cpu;
1393 pr_debug("handle_update for cpu %u called\n", cpu);
1394 cpufreq_update_policy(cpu);
1395 }
1396
1397 /**
1398 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1399 * in deep trouble.
1400 * @cpu: cpu number
1401 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1402 * @new_freq: CPU frequency the CPU actually runs at
1403 *
1404 * We adjust to current frequency first, and need to clean up later.
1405 * So either call to cpufreq_update_policy() or schedule handle_update()).
1406 */
1407 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1408 unsigned int new_freq)
1409 {
1410 struct cpufreq_policy *policy;
1411 struct cpufreq_freqs freqs;
1412 unsigned long flags;
1413
1414 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1415 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1416
1417 freqs.old = old_freq;
1418 freqs.new = new_freq;
1419
1420 read_lock_irqsave(&cpufreq_driver_lock, flags);
1421 policy = per_cpu(cpufreq_cpu_data, cpu);
1422 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1423
1424 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1425 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1426 }
1427
1428 /**
1429 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1430 * @cpu: CPU number
1431 *
1432 * This is the last known freq, without actually getting it from the driver.
1433 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1434 */
1435 unsigned int cpufreq_quick_get(unsigned int cpu)
1436 {
1437 struct cpufreq_policy *policy;
1438 unsigned int ret_freq = 0;
1439
1440 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1441 return cpufreq_driver->get(cpu);
1442
1443 policy = cpufreq_cpu_get(cpu);
1444 if (policy) {
1445 ret_freq = policy->cur;
1446 cpufreq_cpu_put(policy);
1447 }
1448
1449 return ret_freq;
1450 }
1451 EXPORT_SYMBOL(cpufreq_quick_get);
1452
1453 /**
1454 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1455 * @cpu: CPU number
1456 *
1457 * Just return the max possible frequency for a given CPU.
1458 */
1459 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1460 {
1461 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1462 unsigned int ret_freq = 0;
1463
1464 if (policy) {
1465 ret_freq = policy->max;
1466 cpufreq_cpu_put(policy);
1467 }
1468
1469 return ret_freq;
1470 }
1471 EXPORT_SYMBOL(cpufreq_quick_get_max);
1472
1473 static unsigned int __cpufreq_get(unsigned int cpu)
1474 {
1475 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1476 unsigned int ret_freq = 0;
1477
1478 if (!cpufreq_driver->get)
1479 return ret_freq;
1480
1481 ret_freq = cpufreq_driver->get(cpu);
1482
1483 if (ret_freq && policy->cur &&
1484 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1485 /* verify no discrepancy between actual and
1486 saved value exists */
1487 if (unlikely(ret_freq != policy->cur)) {
1488 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1489 schedule_work(&policy->update);
1490 }
1491 }
1492
1493 return ret_freq;
1494 }
1495
1496 /**
1497 * cpufreq_get - get the current CPU frequency (in kHz)
1498 * @cpu: CPU number
1499 *
1500 * Get the CPU current (static) CPU frequency
1501 */
1502 unsigned int cpufreq_get(unsigned int cpu)
1503 {
1504 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1505 unsigned int ret_freq = 0;
1506
1507 if (cpufreq_disabled() || !cpufreq_driver)
1508 return -ENOENT;
1509
1510 BUG_ON(!policy);
1511
1512 if (!down_read_trylock(&cpufreq_rwsem))
1513 return 0;
1514
1515 down_read(&policy->rwsem);
1516
1517 ret_freq = __cpufreq_get(cpu);
1518
1519 up_read(&policy->rwsem);
1520 up_read(&cpufreq_rwsem);
1521
1522 return ret_freq;
1523 }
1524 EXPORT_SYMBOL(cpufreq_get);
1525
1526 static struct subsys_interface cpufreq_interface = {
1527 .name = "cpufreq",
1528 .subsys = &cpu_subsys,
1529 .add_dev = cpufreq_add_dev,
1530 .remove_dev = cpufreq_remove_dev,
1531 };
1532
1533 /**
1534 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1535 *
1536 * This function is only executed for the boot processor. The other CPUs
1537 * have been put offline by means of CPU hotplug.
1538 */
1539 static int cpufreq_bp_suspend(void)
1540 {
1541 int ret = 0;
1542
1543 int cpu = smp_processor_id();
1544 struct cpufreq_policy *policy;
1545
1546 pr_debug("suspending cpu %u\n", cpu);
1547
1548 /* If there's no policy for the boot CPU, we have nothing to do. */
1549 policy = cpufreq_cpu_get(cpu);
1550 if (!policy)
1551 return 0;
1552
1553 if (cpufreq_driver->suspend) {
1554 ret = cpufreq_driver->suspend(policy);
1555 if (ret)
1556 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1557 "step on CPU %u\n", policy->cpu);
1558 }
1559
1560 cpufreq_cpu_put(policy);
1561 return ret;
1562 }
1563
1564 /**
1565 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1566 *
1567 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1568 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1569 * restored. It will verify that the current freq is in sync with
1570 * what we believe it to be. This is a bit later than when it
1571 * should be, but nonethteless it's better than calling
1572 * cpufreq_driver->get() here which might re-enable interrupts...
1573 *
1574 * This function is only executed for the boot CPU. The other CPUs have not
1575 * been turned on yet.
1576 */
1577 static void cpufreq_bp_resume(void)
1578 {
1579 int ret = 0;
1580
1581 int cpu = smp_processor_id();
1582 struct cpufreq_policy *policy;
1583
1584 pr_debug("resuming cpu %u\n", cpu);
1585
1586 /* If there's no policy for the boot CPU, we have nothing to do. */
1587 policy = cpufreq_cpu_get(cpu);
1588 if (!policy)
1589 return;
1590
1591 if (cpufreq_driver->resume) {
1592 ret = cpufreq_driver->resume(policy);
1593 if (ret) {
1594 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1595 "step on CPU %u\n", policy->cpu);
1596 goto fail;
1597 }
1598 }
1599
1600 schedule_work(&policy->update);
1601
1602 fail:
1603 cpufreq_cpu_put(policy);
1604 }
1605
1606 static struct syscore_ops cpufreq_syscore_ops = {
1607 .suspend = cpufreq_bp_suspend,
1608 .resume = cpufreq_bp_resume,
1609 };
1610
1611 /**
1612 * cpufreq_get_current_driver - return current driver's name
1613 *
1614 * Return the name string of the currently loaded cpufreq driver
1615 * or NULL, if none.
1616 */
1617 const char *cpufreq_get_current_driver(void)
1618 {
1619 if (cpufreq_driver)
1620 return cpufreq_driver->name;
1621
1622 return NULL;
1623 }
1624 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1625
1626 /*********************************************************************
1627 * NOTIFIER LISTS INTERFACE *
1628 *********************************************************************/
1629
1630 /**
1631 * cpufreq_register_notifier - register a driver with cpufreq
1632 * @nb: notifier function to register
1633 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1634 *
1635 * Add a driver to one of two lists: either a list of drivers that
1636 * are notified about clock rate changes (once before and once after
1637 * the transition), or a list of drivers that are notified about
1638 * changes in cpufreq policy.
1639 *
1640 * This function may sleep, and has the same return conditions as
1641 * blocking_notifier_chain_register.
1642 */
1643 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1644 {
1645 int ret;
1646
1647 if (cpufreq_disabled())
1648 return -EINVAL;
1649
1650 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1651
1652 switch (list) {
1653 case CPUFREQ_TRANSITION_NOTIFIER:
1654 ret = srcu_notifier_chain_register(
1655 &cpufreq_transition_notifier_list, nb);
1656 break;
1657 case CPUFREQ_POLICY_NOTIFIER:
1658 ret = blocking_notifier_chain_register(
1659 &cpufreq_policy_notifier_list, nb);
1660 break;
1661 default:
1662 ret = -EINVAL;
1663 }
1664
1665 return ret;
1666 }
1667 EXPORT_SYMBOL(cpufreq_register_notifier);
1668
1669 /**
1670 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1671 * @nb: notifier block to be unregistered
1672 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1673 *
1674 * Remove a driver from the CPU frequency notifier list.
1675 *
1676 * This function may sleep, and has the same return conditions as
1677 * blocking_notifier_chain_unregister.
1678 */
1679 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1680 {
1681 int ret;
1682
1683 if (cpufreq_disabled())
1684 return -EINVAL;
1685
1686 switch (list) {
1687 case CPUFREQ_TRANSITION_NOTIFIER:
1688 ret = srcu_notifier_chain_unregister(
1689 &cpufreq_transition_notifier_list, nb);
1690 break;
1691 case CPUFREQ_POLICY_NOTIFIER:
1692 ret = blocking_notifier_chain_unregister(
1693 &cpufreq_policy_notifier_list, nb);
1694 break;
1695 default:
1696 ret = -EINVAL;
1697 }
1698
1699 return ret;
1700 }
1701 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1702
1703
1704 /*********************************************************************
1705 * GOVERNORS *
1706 *********************************************************************/
1707
1708 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1709 unsigned int target_freq,
1710 unsigned int relation)
1711 {
1712 int retval = -EINVAL;
1713 unsigned int old_target_freq = target_freq;
1714
1715 if (cpufreq_disabled())
1716 return -ENODEV;
1717
1718 /* Make sure that target_freq is within supported range */
1719 if (target_freq > policy->max)
1720 target_freq = policy->max;
1721 if (target_freq < policy->min)
1722 target_freq = policy->min;
1723
1724 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1725 policy->cpu, target_freq, relation, old_target_freq);
1726
1727 /*
1728 * This might look like a redundant call as we are checking it again
1729 * after finding index. But it is left intentionally for cases where
1730 * exactly same freq is called again and so we can save on few function
1731 * calls.
1732 */
1733 if (target_freq == policy->cur)
1734 return 0;
1735
1736 if (cpufreq_driver->target)
1737 retval = cpufreq_driver->target(policy, target_freq, relation);
1738 else if (cpufreq_driver->target_index) {
1739 struct cpufreq_frequency_table *freq_table;
1740 struct cpufreq_freqs freqs;
1741 bool notify;
1742 int index;
1743
1744 freq_table = cpufreq_frequency_get_table(policy->cpu);
1745 if (unlikely(!freq_table)) {
1746 pr_err("%s: Unable to find freq_table\n", __func__);
1747 goto out;
1748 }
1749
1750 retval = cpufreq_frequency_table_target(policy, freq_table,
1751 target_freq, relation, &index);
1752 if (unlikely(retval)) {
1753 pr_err("%s: Unable to find matching freq\n", __func__);
1754 goto out;
1755 }
1756
1757 if (freq_table[index].frequency == policy->cur) {
1758 retval = 0;
1759 goto out;
1760 }
1761
1762 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1763
1764 if (notify) {
1765 freqs.old = policy->cur;
1766 freqs.new = freq_table[index].frequency;
1767 freqs.flags = 0;
1768
1769 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1770 __func__, policy->cpu, freqs.old,
1771 freqs.new);
1772
1773 cpufreq_notify_transition(policy, &freqs,
1774 CPUFREQ_PRECHANGE);
1775 }
1776
1777 retval = cpufreq_driver->target_index(policy, index);
1778 if (retval)
1779 pr_err("%s: Failed to change cpu frequency: %d\n",
1780 __func__, retval);
1781
1782 if (notify)
1783 cpufreq_notify_post_transition(policy, &freqs, retval);
1784 }
1785
1786 out:
1787 return retval;
1788 }
1789 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1790
1791 int cpufreq_driver_target(struct cpufreq_policy *policy,
1792 unsigned int target_freq,
1793 unsigned int relation)
1794 {
1795 int ret = -EINVAL;
1796
1797 down_write(&policy->rwsem);
1798
1799 ret = __cpufreq_driver_target(policy, target_freq, relation);
1800
1801 up_write(&policy->rwsem);
1802
1803 return ret;
1804 }
1805 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1806
1807 /*
1808 * when "event" is CPUFREQ_GOV_LIMITS
1809 */
1810
1811 static int __cpufreq_governor(struct cpufreq_policy *policy,
1812 unsigned int event)
1813 {
1814 int ret;
1815
1816 /* Only must be defined when default governor is known to have latency
1817 restrictions, like e.g. conservative or ondemand.
1818 That this is the case is already ensured in Kconfig
1819 */
1820 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1821 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1822 #else
1823 struct cpufreq_governor *gov = NULL;
1824 #endif
1825
1826 if (policy->governor->max_transition_latency &&
1827 policy->cpuinfo.transition_latency >
1828 policy->governor->max_transition_latency) {
1829 if (!gov)
1830 return -EINVAL;
1831 else {
1832 printk(KERN_WARNING "%s governor failed, too long"
1833 " transition latency of HW, fallback"
1834 " to %s governor\n",
1835 policy->governor->name,
1836 gov->name);
1837 policy->governor = gov;
1838 }
1839 }
1840
1841 if (event == CPUFREQ_GOV_POLICY_INIT)
1842 if (!try_module_get(policy->governor->owner))
1843 return -EINVAL;
1844
1845 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1846 policy->cpu, event);
1847
1848 mutex_lock(&cpufreq_governor_lock);
1849 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1850 || (!policy->governor_enabled
1851 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1852 mutex_unlock(&cpufreq_governor_lock);
1853 return -EBUSY;
1854 }
1855
1856 if (event == CPUFREQ_GOV_STOP)
1857 policy->governor_enabled = false;
1858 else if (event == CPUFREQ_GOV_START)
1859 policy->governor_enabled = true;
1860
1861 mutex_unlock(&cpufreq_governor_lock);
1862
1863 ret = policy->governor->governor(policy, event);
1864
1865 if (!ret) {
1866 if (event == CPUFREQ_GOV_POLICY_INIT)
1867 policy->governor->initialized++;
1868 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1869 policy->governor->initialized--;
1870 } else {
1871 /* Restore original values */
1872 mutex_lock(&cpufreq_governor_lock);
1873 if (event == CPUFREQ_GOV_STOP)
1874 policy->governor_enabled = true;
1875 else if (event == CPUFREQ_GOV_START)
1876 policy->governor_enabled = false;
1877 mutex_unlock(&cpufreq_governor_lock);
1878 }
1879
1880 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1881 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1882 module_put(policy->governor->owner);
1883
1884 return ret;
1885 }
1886
1887 int cpufreq_register_governor(struct cpufreq_governor *governor)
1888 {
1889 int err;
1890
1891 if (!governor)
1892 return -EINVAL;
1893
1894 if (cpufreq_disabled())
1895 return -ENODEV;
1896
1897 mutex_lock(&cpufreq_governor_mutex);
1898
1899 governor->initialized = 0;
1900 err = -EBUSY;
1901 if (__find_governor(governor->name) == NULL) {
1902 err = 0;
1903 list_add(&governor->governor_list, &cpufreq_governor_list);
1904 }
1905
1906 mutex_unlock(&cpufreq_governor_mutex);
1907 return err;
1908 }
1909 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1910
1911 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1912 {
1913 #ifdef CONFIG_HOTPLUG_CPU
1914 int cpu;
1915 #endif
1916
1917 if (!governor)
1918 return;
1919
1920 if (cpufreq_disabled())
1921 return;
1922
1923 #ifdef CONFIG_HOTPLUG_CPU
1924 for_each_present_cpu(cpu) {
1925 if (cpu_online(cpu))
1926 continue;
1927 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1928 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1929 }
1930 #endif
1931
1932 mutex_lock(&cpufreq_governor_mutex);
1933 list_del(&governor->governor_list);
1934 mutex_unlock(&cpufreq_governor_mutex);
1935 return;
1936 }
1937 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1938
1939
1940 /*********************************************************************
1941 * POLICY INTERFACE *
1942 *********************************************************************/
1943
1944 /**
1945 * cpufreq_get_policy - get the current cpufreq_policy
1946 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1947 * is written
1948 *
1949 * Reads the current cpufreq policy.
1950 */
1951 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1952 {
1953 struct cpufreq_policy *cpu_policy;
1954 if (!policy)
1955 return -EINVAL;
1956
1957 cpu_policy = cpufreq_cpu_get(cpu);
1958 if (!cpu_policy)
1959 return -EINVAL;
1960
1961 memcpy(policy, cpu_policy, sizeof(*policy));
1962
1963 cpufreq_cpu_put(cpu_policy);
1964 return 0;
1965 }
1966 EXPORT_SYMBOL(cpufreq_get_policy);
1967
1968 /*
1969 * policy : current policy.
1970 * new_policy: policy to be set.
1971 */
1972 static int cpufreq_set_policy(struct cpufreq_policy *policy,
1973 struct cpufreq_policy *new_policy)
1974 {
1975 int ret = 0, failed = 1;
1976
1977 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1978 new_policy->min, new_policy->max);
1979
1980 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1981
1982 if (new_policy->min > policy->max || new_policy->max < policy->min) {
1983 ret = -EINVAL;
1984 goto error_out;
1985 }
1986
1987 /* verify the cpu speed can be set within this limit */
1988 ret = cpufreq_driver->verify(new_policy);
1989 if (ret)
1990 goto error_out;
1991
1992 /* adjust if necessary - all reasons */
1993 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1994 CPUFREQ_ADJUST, new_policy);
1995
1996 /* adjust if necessary - hardware incompatibility*/
1997 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1998 CPUFREQ_INCOMPATIBLE, new_policy);
1999
2000 /*
2001 * verify the cpu speed can be set within this limit, which might be
2002 * different to the first one
2003 */
2004 ret = cpufreq_driver->verify(new_policy);
2005 if (ret)
2006 goto error_out;
2007
2008 /* notification of the new policy */
2009 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2010 CPUFREQ_NOTIFY, new_policy);
2011
2012 policy->min = new_policy->min;
2013 policy->max = new_policy->max;
2014
2015 pr_debug("new min and max freqs are %u - %u kHz\n",
2016 policy->min, policy->max);
2017
2018 if (cpufreq_driver->setpolicy) {
2019 policy->policy = new_policy->policy;
2020 pr_debug("setting range\n");
2021 ret = cpufreq_driver->setpolicy(new_policy);
2022 } else {
2023 if (new_policy->governor != policy->governor) {
2024 /* save old, working values */
2025 struct cpufreq_governor *old_gov = policy->governor;
2026
2027 pr_debug("governor switch\n");
2028
2029 /* end old governor */
2030 if (policy->governor) {
2031 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2032 up_write(&policy->rwsem);
2033 __cpufreq_governor(policy,
2034 CPUFREQ_GOV_POLICY_EXIT);
2035 down_write(&policy->rwsem);
2036 }
2037
2038 /* start new governor */
2039 policy->governor = new_policy->governor;
2040 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2041 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
2042 failed = 0;
2043 } else {
2044 up_write(&policy->rwsem);
2045 __cpufreq_governor(policy,
2046 CPUFREQ_GOV_POLICY_EXIT);
2047 down_write(&policy->rwsem);
2048 }
2049 }
2050
2051 if (failed) {
2052 /* new governor failed, so re-start old one */
2053 pr_debug("starting governor %s failed\n",
2054 policy->governor->name);
2055 if (old_gov) {
2056 policy->governor = old_gov;
2057 __cpufreq_governor(policy,
2058 CPUFREQ_GOV_POLICY_INIT);
2059 __cpufreq_governor(policy,
2060 CPUFREQ_GOV_START);
2061 }
2062 ret = -EINVAL;
2063 goto error_out;
2064 }
2065 /* might be a policy change, too, so fall through */
2066 }
2067 pr_debug("governor: change or update limits\n");
2068 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2069 }
2070
2071 error_out:
2072 return ret;
2073 }
2074
2075 /**
2076 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2077 * @cpu: CPU which shall be re-evaluated
2078 *
2079 * Useful for policy notifiers which have different necessities
2080 * at different times.
2081 */
2082 int cpufreq_update_policy(unsigned int cpu)
2083 {
2084 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2085 struct cpufreq_policy new_policy;
2086 int ret;
2087
2088 if (!policy) {
2089 ret = -ENODEV;
2090 goto no_policy;
2091 }
2092
2093 down_write(&policy->rwsem);
2094
2095 pr_debug("updating policy for CPU %u\n", cpu);
2096 memcpy(&new_policy, policy, sizeof(*policy));
2097 new_policy.min = policy->user_policy.min;
2098 new_policy.max = policy->user_policy.max;
2099 new_policy.policy = policy->user_policy.policy;
2100 new_policy.governor = policy->user_policy.governor;
2101
2102 /*
2103 * BIOS might change freq behind our back
2104 * -> ask driver for current freq and notify governors about a change
2105 */
2106 if (cpufreq_driver->get) {
2107 new_policy.cur = cpufreq_driver->get(cpu);
2108 if (!policy->cur) {
2109 pr_debug("Driver did not initialize current freq");
2110 policy->cur = new_policy.cur;
2111 } else {
2112 if (policy->cur != new_policy.cur && has_target())
2113 cpufreq_out_of_sync(cpu, policy->cur,
2114 new_policy.cur);
2115 }
2116 }
2117
2118 ret = cpufreq_set_policy(policy, &new_policy);
2119
2120 up_write(&policy->rwsem);
2121
2122 cpufreq_cpu_put(policy);
2123 no_policy:
2124 return ret;
2125 }
2126 EXPORT_SYMBOL(cpufreq_update_policy);
2127
2128 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2129 unsigned long action, void *hcpu)
2130 {
2131 unsigned int cpu = (unsigned long)hcpu;
2132 struct device *dev;
2133 bool frozen = false;
2134
2135 dev = get_cpu_device(cpu);
2136 if (dev) {
2137
2138 if (action & CPU_TASKS_FROZEN)
2139 frozen = true;
2140
2141 switch (action & ~CPU_TASKS_FROZEN) {
2142 case CPU_ONLINE:
2143 __cpufreq_add_dev(dev, NULL, frozen);
2144 cpufreq_update_policy(cpu);
2145 break;
2146
2147 case CPU_DOWN_PREPARE:
2148 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
2149 break;
2150
2151 case CPU_POST_DEAD:
2152 __cpufreq_remove_dev_finish(dev, NULL, frozen);
2153 break;
2154
2155 case CPU_DOWN_FAILED:
2156 __cpufreq_add_dev(dev, NULL, frozen);
2157 break;
2158 }
2159 }
2160 return NOTIFY_OK;
2161 }
2162
2163 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2164 .notifier_call = cpufreq_cpu_callback,
2165 };
2166
2167 /*********************************************************************
2168 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2169 *********************************************************************/
2170
2171 /**
2172 * cpufreq_register_driver - register a CPU Frequency driver
2173 * @driver_data: A struct cpufreq_driver containing the values#
2174 * submitted by the CPU Frequency driver.
2175 *
2176 * Registers a CPU Frequency driver to this core code. This code
2177 * returns zero on success, -EBUSY when another driver got here first
2178 * (and isn't unregistered in the meantime).
2179 *
2180 */
2181 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2182 {
2183 unsigned long flags;
2184 int ret;
2185
2186 if (cpufreq_disabled())
2187 return -ENODEV;
2188
2189 if (!driver_data || !driver_data->verify || !driver_data->init ||
2190 !(driver_data->setpolicy || driver_data->target_index ||
2191 driver_data->target))
2192 return -EINVAL;
2193
2194 pr_debug("trying to register driver %s\n", driver_data->name);
2195
2196 if (driver_data->setpolicy)
2197 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2198
2199 write_lock_irqsave(&cpufreq_driver_lock, flags);
2200 if (cpufreq_driver) {
2201 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2202 return -EEXIST;
2203 }
2204 cpufreq_driver = driver_data;
2205 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2206
2207 ret = subsys_interface_register(&cpufreq_interface);
2208 if (ret)
2209 goto err_null_driver;
2210
2211 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2212 int i;
2213 ret = -ENODEV;
2214
2215 /* check for at least one working CPU */
2216 for (i = 0; i < nr_cpu_ids; i++)
2217 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2218 ret = 0;
2219 break;
2220 }
2221
2222 /* if all ->init() calls failed, unregister */
2223 if (ret) {
2224 pr_debug("no CPU initialized for driver %s\n",
2225 driver_data->name);
2226 goto err_if_unreg;
2227 }
2228 }
2229
2230 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2231 pr_debug("driver %s up and running\n", driver_data->name);
2232
2233 return 0;
2234 err_if_unreg:
2235 subsys_interface_unregister(&cpufreq_interface);
2236 err_null_driver:
2237 write_lock_irqsave(&cpufreq_driver_lock, flags);
2238 cpufreq_driver = NULL;
2239 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2240 return ret;
2241 }
2242 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2243
2244 /**
2245 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2246 *
2247 * Unregister the current CPUFreq driver. Only call this if you have
2248 * the right to do so, i.e. if you have succeeded in initialising before!
2249 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2250 * currently not initialised.
2251 */
2252 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2253 {
2254 unsigned long flags;
2255
2256 if (!cpufreq_driver || (driver != cpufreq_driver))
2257 return -EINVAL;
2258
2259 pr_debug("unregistering driver %s\n", driver->name);
2260
2261 subsys_interface_unregister(&cpufreq_interface);
2262 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2263
2264 down_write(&cpufreq_rwsem);
2265 write_lock_irqsave(&cpufreq_driver_lock, flags);
2266
2267 cpufreq_driver = NULL;
2268
2269 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2270 up_write(&cpufreq_rwsem);
2271
2272 return 0;
2273 }
2274 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2275
2276 static int __init cpufreq_core_init(void)
2277 {
2278 if (cpufreq_disabled())
2279 return -ENODEV;
2280
2281 cpufreq_global_kobject = kobject_create();
2282 BUG_ON(!cpufreq_global_kobject);
2283 register_syscore_ops(&cpufreq_syscore_ops);
2284
2285 return 0;
2286 }
2287 core_initcall(cpufreq_core_init);
This page took 0.281385 seconds and 5 git commands to generate.