Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
1 /*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7 *
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
33
34 static LIST_HEAD(cpufreq_policy_list);
35
36 static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37 {
38 return cpumask_empty(policy->cpus);
39 }
40
41 /* Macros to iterate over CPU policies */
42 #define for_each_suitable_policy(__policy, __active) \
43 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
44 if ((__active) == !policy_is_inactive(__policy))
45
46 #define for_each_active_policy(__policy) \
47 for_each_suitable_policy(__policy, true)
48 #define for_each_inactive_policy(__policy) \
49 for_each_suitable_policy(__policy, false)
50
51 #define for_each_policy(__policy) \
52 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
53
54 /* Iterate over governors */
55 static LIST_HEAD(cpufreq_governor_list);
56 #define for_each_governor(__governor) \
57 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
58
59 /**
60 * The "cpufreq driver" - the arch- or hardware-dependent low
61 * level driver of CPUFreq support, and its spinlock. This lock
62 * also protects the cpufreq_cpu_data array.
63 */
64 static struct cpufreq_driver *cpufreq_driver;
65 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
66 static DEFINE_RWLOCK(cpufreq_driver_lock);
67
68 /* Flag to suspend/resume CPUFreq governors */
69 static bool cpufreq_suspended;
70
71 static inline bool has_target(void)
72 {
73 return cpufreq_driver->target_index || cpufreq_driver->target;
74 }
75
76 /* internal prototypes */
77 static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
78 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
79
80 /**
81 * Two notifier lists: the "policy" list is involved in the
82 * validation process for a new CPU frequency policy; the
83 * "transition" list for kernel code that needs to handle
84 * changes to devices when the CPU clock speed changes.
85 * The mutex locks both lists.
86 */
87 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
88 static struct srcu_notifier_head cpufreq_transition_notifier_list;
89
90 static bool init_cpufreq_transition_notifier_list_called;
91 static int __init init_cpufreq_transition_notifier_list(void)
92 {
93 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
94 init_cpufreq_transition_notifier_list_called = true;
95 return 0;
96 }
97 pure_initcall(init_cpufreq_transition_notifier_list);
98
99 static int off __read_mostly;
100 static int cpufreq_disabled(void)
101 {
102 return off;
103 }
104 void disable_cpufreq(void)
105 {
106 off = 1;
107 }
108 static DEFINE_MUTEX(cpufreq_governor_mutex);
109
110 bool have_governor_per_policy(void)
111 {
112 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
113 }
114 EXPORT_SYMBOL_GPL(have_governor_per_policy);
115
116 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
117 {
118 if (have_governor_per_policy())
119 return &policy->kobj;
120 else
121 return cpufreq_global_kobject;
122 }
123 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
124
125 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
126 {
127 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
128
129 return policy && !policy_is_inactive(policy) ?
130 policy->freq_table : NULL;
131 }
132 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
133
134 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
135 {
136 u64 idle_time;
137 u64 cur_wall_time;
138 u64 busy_time;
139
140 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
141
142 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
143 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
144 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
145 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
146 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
147 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
148
149 idle_time = cur_wall_time - busy_time;
150 if (wall)
151 *wall = cputime_to_usecs(cur_wall_time);
152
153 return cputime_to_usecs(idle_time);
154 }
155
156 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
157 {
158 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
159
160 if (idle_time == -1ULL)
161 return get_cpu_idle_time_jiffy(cpu, wall);
162 else if (!io_busy)
163 idle_time += get_cpu_iowait_time_us(cpu, wall);
164
165 return idle_time;
166 }
167 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
168
169 /*
170 * This is a generic cpufreq init() routine which can be used by cpufreq
171 * drivers of SMP systems. It will do following:
172 * - validate & show freq table passed
173 * - set policies transition latency
174 * - policy->cpus with all possible CPUs
175 */
176 int cpufreq_generic_init(struct cpufreq_policy *policy,
177 struct cpufreq_frequency_table *table,
178 unsigned int transition_latency)
179 {
180 int ret;
181
182 ret = cpufreq_table_validate_and_show(policy, table);
183 if (ret) {
184 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
185 return ret;
186 }
187
188 policy->cpuinfo.transition_latency = transition_latency;
189
190 /*
191 * The driver only supports the SMP configuration where all processors
192 * share the clock and voltage and clock.
193 */
194 cpumask_setall(policy->cpus);
195
196 return 0;
197 }
198 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
199
200 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
201 {
202 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
203
204 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
205 }
206 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
207
208 unsigned int cpufreq_generic_get(unsigned int cpu)
209 {
210 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
211
212 if (!policy || IS_ERR(policy->clk)) {
213 pr_err("%s: No %s associated to cpu: %d\n",
214 __func__, policy ? "clk" : "policy", cpu);
215 return 0;
216 }
217
218 return clk_get_rate(policy->clk) / 1000;
219 }
220 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
221
222 /**
223 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
224 *
225 * @cpu: cpu to find policy for.
226 *
227 * This returns policy for 'cpu', returns NULL if it doesn't exist.
228 * It also increments the kobject reference count to mark it busy and so would
229 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
230 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
231 * freed as that depends on the kobj count.
232 *
233 * Return: A valid policy on success, otherwise NULL on failure.
234 */
235 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
236 {
237 struct cpufreq_policy *policy = NULL;
238 unsigned long flags;
239
240 if (WARN_ON(cpu >= nr_cpu_ids))
241 return NULL;
242
243 /* get the cpufreq driver */
244 read_lock_irqsave(&cpufreq_driver_lock, flags);
245
246 if (cpufreq_driver) {
247 /* get the CPU */
248 policy = cpufreq_cpu_get_raw(cpu);
249 if (policy)
250 kobject_get(&policy->kobj);
251 }
252
253 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
254
255 return policy;
256 }
257 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
258
259 /**
260 * cpufreq_cpu_put: Decrements the usage count of a policy
261 *
262 * @policy: policy earlier returned by cpufreq_cpu_get().
263 *
264 * This decrements the kobject reference count incremented earlier by calling
265 * cpufreq_cpu_get().
266 */
267 void cpufreq_cpu_put(struct cpufreq_policy *policy)
268 {
269 kobject_put(&policy->kobj);
270 }
271 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
272
273 /*********************************************************************
274 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
275 *********************************************************************/
276
277 /**
278 * adjust_jiffies - adjust the system "loops_per_jiffy"
279 *
280 * This function alters the system "loops_per_jiffy" for the clock
281 * speed change. Note that loops_per_jiffy cannot be updated on SMP
282 * systems as each CPU might be scaled differently. So, use the arch
283 * per-CPU loops_per_jiffy value wherever possible.
284 */
285 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
286 {
287 #ifndef CONFIG_SMP
288 static unsigned long l_p_j_ref;
289 static unsigned int l_p_j_ref_freq;
290
291 if (ci->flags & CPUFREQ_CONST_LOOPS)
292 return;
293
294 if (!l_p_j_ref_freq) {
295 l_p_j_ref = loops_per_jiffy;
296 l_p_j_ref_freq = ci->old;
297 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
298 l_p_j_ref, l_p_j_ref_freq);
299 }
300 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
301 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
302 ci->new);
303 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
304 loops_per_jiffy, ci->new);
305 }
306 #endif
307 }
308
309 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
310 struct cpufreq_freqs *freqs, unsigned int state)
311 {
312 BUG_ON(irqs_disabled());
313
314 if (cpufreq_disabled())
315 return;
316
317 freqs->flags = cpufreq_driver->flags;
318 pr_debug("notification %u of frequency transition to %u kHz\n",
319 state, freqs->new);
320
321 switch (state) {
322
323 case CPUFREQ_PRECHANGE:
324 /* detect if the driver reported a value as "old frequency"
325 * which is not equal to what the cpufreq core thinks is
326 * "old frequency".
327 */
328 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
329 if ((policy) && (policy->cpu == freqs->cpu) &&
330 (policy->cur) && (policy->cur != freqs->old)) {
331 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
332 freqs->old, policy->cur);
333 freqs->old = policy->cur;
334 }
335 }
336 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
337 CPUFREQ_PRECHANGE, freqs);
338 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
339 break;
340
341 case CPUFREQ_POSTCHANGE:
342 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
343 pr_debug("FREQ: %lu - CPU: %lu\n",
344 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
345 trace_cpu_frequency(freqs->new, freqs->cpu);
346 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
347 CPUFREQ_POSTCHANGE, freqs);
348 if (likely(policy) && likely(policy->cpu == freqs->cpu))
349 policy->cur = freqs->new;
350 break;
351 }
352 }
353
354 /**
355 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
356 * on frequency transition.
357 *
358 * This function calls the transition notifiers and the "adjust_jiffies"
359 * function. It is called twice on all CPU frequency changes that have
360 * external effects.
361 */
362 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
363 struct cpufreq_freqs *freqs, unsigned int state)
364 {
365 for_each_cpu(freqs->cpu, policy->cpus)
366 __cpufreq_notify_transition(policy, freqs, state);
367 }
368
369 /* Do post notifications when there are chances that transition has failed */
370 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
371 struct cpufreq_freqs *freqs, int transition_failed)
372 {
373 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
374 if (!transition_failed)
375 return;
376
377 swap(freqs->old, freqs->new);
378 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
379 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
380 }
381
382 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
383 struct cpufreq_freqs *freqs)
384 {
385
386 /*
387 * Catch double invocations of _begin() which lead to self-deadlock.
388 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
389 * doesn't invoke _begin() on their behalf, and hence the chances of
390 * double invocations are very low. Moreover, there are scenarios
391 * where these checks can emit false-positive warnings in these
392 * drivers; so we avoid that by skipping them altogether.
393 */
394 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
395 && current == policy->transition_task);
396
397 wait:
398 wait_event(policy->transition_wait, !policy->transition_ongoing);
399
400 spin_lock(&policy->transition_lock);
401
402 if (unlikely(policy->transition_ongoing)) {
403 spin_unlock(&policy->transition_lock);
404 goto wait;
405 }
406
407 policy->transition_ongoing = true;
408 policy->transition_task = current;
409
410 spin_unlock(&policy->transition_lock);
411
412 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
413 }
414 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
415
416 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
417 struct cpufreq_freqs *freqs, int transition_failed)
418 {
419 if (unlikely(WARN_ON(!policy->transition_ongoing)))
420 return;
421
422 cpufreq_notify_post_transition(policy, freqs, transition_failed);
423
424 policy->transition_ongoing = false;
425 policy->transition_task = NULL;
426
427 wake_up(&policy->transition_wait);
428 }
429 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
430
431
432 /*********************************************************************
433 * SYSFS INTERFACE *
434 *********************************************************************/
435 static ssize_t show_boost(struct kobject *kobj,
436 struct attribute *attr, char *buf)
437 {
438 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
439 }
440
441 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
442 const char *buf, size_t count)
443 {
444 int ret, enable;
445
446 ret = sscanf(buf, "%d", &enable);
447 if (ret != 1 || enable < 0 || enable > 1)
448 return -EINVAL;
449
450 if (cpufreq_boost_trigger_state(enable)) {
451 pr_err("%s: Cannot %s BOOST!\n",
452 __func__, enable ? "enable" : "disable");
453 return -EINVAL;
454 }
455
456 pr_debug("%s: cpufreq BOOST %s\n",
457 __func__, enable ? "enabled" : "disabled");
458
459 return count;
460 }
461 define_one_global_rw(boost);
462
463 static struct cpufreq_governor *find_governor(const char *str_governor)
464 {
465 struct cpufreq_governor *t;
466
467 for_each_governor(t)
468 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
469 return t;
470
471 return NULL;
472 }
473
474 /**
475 * cpufreq_parse_governor - parse a governor string
476 */
477 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
478 struct cpufreq_governor **governor)
479 {
480 int err = -EINVAL;
481
482 if (cpufreq_driver->setpolicy) {
483 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
484 *policy = CPUFREQ_POLICY_PERFORMANCE;
485 err = 0;
486 } else if (!strncasecmp(str_governor, "powersave",
487 CPUFREQ_NAME_LEN)) {
488 *policy = CPUFREQ_POLICY_POWERSAVE;
489 err = 0;
490 }
491 } else {
492 struct cpufreq_governor *t;
493
494 mutex_lock(&cpufreq_governor_mutex);
495
496 t = find_governor(str_governor);
497
498 if (t == NULL) {
499 int ret;
500
501 mutex_unlock(&cpufreq_governor_mutex);
502 ret = request_module("cpufreq_%s", str_governor);
503 mutex_lock(&cpufreq_governor_mutex);
504
505 if (ret == 0)
506 t = find_governor(str_governor);
507 }
508
509 if (t != NULL) {
510 *governor = t;
511 err = 0;
512 }
513
514 mutex_unlock(&cpufreq_governor_mutex);
515 }
516 return err;
517 }
518
519 /**
520 * cpufreq_per_cpu_attr_read() / show_##file_name() -
521 * print out cpufreq information
522 *
523 * Write out information from cpufreq_driver->policy[cpu]; object must be
524 * "unsigned int".
525 */
526
527 #define show_one(file_name, object) \
528 static ssize_t show_##file_name \
529 (struct cpufreq_policy *policy, char *buf) \
530 { \
531 return sprintf(buf, "%u\n", policy->object); \
532 }
533
534 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
535 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
536 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
537 show_one(scaling_min_freq, min);
538 show_one(scaling_max_freq, max);
539
540 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
541 {
542 ssize_t ret;
543
544 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
545 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
546 else
547 ret = sprintf(buf, "%u\n", policy->cur);
548 return ret;
549 }
550
551 static int cpufreq_set_policy(struct cpufreq_policy *policy,
552 struct cpufreq_policy *new_policy);
553
554 /**
555 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
556 */
557 #define store_one(file_name, object) \
558 static ssize_t store_##file_name \
559 (struct cpufreq_policy *policy, const char *buf, size_t count) \
560 { \
561 int ret, temp; \
562 struct cpufreq_policy new_policy; \
563 \
564 memcpy(&new_policy, policy, sizeof(*policy)); \
565 \
566 ret = sscanf(buf, "%u", &new_policy.object); \
567 if (ret != 1) \
568 return -EINVAL; \
569 \
570 temp = new_policy.object; \
571 ret = cpufreq_set_policy(policy, &new_policy); \
572 if (!ret) \
573 policy->user_policy.object = temp; \
574 \
575 return ret ? ret : count; \
576 }
577
578 store_one(scaling_min_freq, min);
579 store_one(scaling_max_freq, max);
580
581 /**
582 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
583 */
584 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
585 char *buf)
586 {
587 unsigned int cur_freq = __cpufreq_get(policy);
588 if (!cur_freq)
589 return sprintf(buf, "<unknown>");
590 return sprintf(buf, "%u\n", cur_freq);
591 }
592
593 /**
594 * show_scaling_governor - show the current policy for the specified CPU
595 */
596 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
597 {
598 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
599 return sprintf(buf, "powersave\n");
600 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
601 return sprintf(buf, "performance\n");
602 else if (policy->governor)
603 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
604 policy->governor->name);
605 return -EINVAL;
606 }
607
608 /**
609 * store_scaling_governor - store policy for the specified CPU
610 */
611 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
612 const char *buf, size_t count)
613 {
614 int ret;
615 char str_governor[16];
616 struct cpufreq_policy new_policy;
617
618 memcpy(&new_policy, policy, sizeof(*policy));
619
620 ret = sscanf(buf, "%15s", str_governor);
621 if (ret != 1)
622 return -EINVAL;
623
624 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
625 &new_policy.governor))
626 return -EINVAL;
627
628 ret = cpufreq_set_policy(policy, &new_policy);
629 return ret ? ret : count;
630 }
631
632 /**
633 * show_scaling_driver - show the cpufreq driver currently loaded
634 */
635 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
636 {
637 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
638 }
639
640 /**
641 * show_scaling_available_governors - show the available CPUfreq governors
642 */
643 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
644 char *buf)
645 {
646 ssize_t i = 0;
647 struct cpufreq_governor *t;
648
649 if (!has_target()) {
650 i += sprintf(buf, "performance powersave");
651 goto out;
652 }
653
654 for_each_governor(t) {
655 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
656 - (CPUFREQ_NAME_LEN + 2)))
657 goto out;
658 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
659 }
660 out:
661 i += sprintf(&buf[i], "\n");
662 return i;
663 }
664
665 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
666 {
667 ssize_t i = 0;
668 unsigned int cpu;
669
670 for_each_cpu(cpu, mask) {
671 if (i)
672 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
673 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
674 if (i >= (PAGE_SIZE - 5))
675 break;
676 }
677 i += sprintf(&buf[i], "\n");
678 return i;
679 }
680 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
681
682 /**
683 * show_related_cpus - show the CPUs affected by each transition even if
684 * hw coordination is in use
685 */
686 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
687 {
688 return cpufreq_show_cpus(policy->related_cpus, buf);
689 }
690
691 /**
692 * show_affected_cpus - show the CPUs affected by each transition
693 */
694 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
695 {
696 return cpufreq_show_cpus(policy->cpus, buf);
697 }
698
699 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
700 const char *buf, size_t count)
701 {
702 unsigned int freq = 0;
703 unsigned int ret;
704
705 if (!policy->governor || !policy->governor->store_setspeed)
706 return -EINVAL;
707
708 ret = sscanf(buf, "%u", &freq);
709 if (ret != 1)
710 return -EINVAL;
711
712 policy->governor->store_setspeed(policy, freq);
713
714 return count;
715 }
716
717 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
718 {
719 if (!policy->governor || !policy->governor->show_setspeed)
720 return sprintf(buf, "<unsupported>\n");
721
722 return policy->governor->show_setspeed(policy, buf);
723 }
724
725 /**
726 * show_bios_limit - show the current cpufreq HW/BIOS limitation
727 */
728 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
729 {
730 unsigned int limit;
731 int ret;
732 if (cpufreq_driver->bios_limit) {
733 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
734 if (!ret)
735 return sprintf(buf, "%u\n", limit);
736 }
737 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
738 }
739
740 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
741 cpufreq_freq_attr_ro(cpuinfo_min_freq);
742 cpufreq_freq_attr_ro(cpuinfo_max_freq);
743 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
744 cpufreq_freq_attr_ro(scaling_available_governors);
745 cpufreq_freq_attr_ro(scaling_driver);
746 cpufreq_freq_attr_ro(scaling_cur_freq);
747 cpufreq_freq_attr_ro(bios_limit);
748 cpufreq_freq_attr_ro(related_cpus);
749 cpufreq_freq_attr_ro(affected_cpus);
750 cpufreq_freq_attr_rw(scaling_min_freq);
751 cpufreq_freq_attr_rw(scaling_max_freq);
752 cpufreq_freq_attr_rw(scaling_governor);
753 cpufreq_freq_attr_rw(scaling_setspeed);
754
755 static struct attribute *default_attrs[] = {
756 &cpuinfo_min_freq.attr,
757 &cpuinfo_max_freq.attr,
758 &cpuinfo_transition_latency.attr,
759 &scaling_min_freq.attr,
760 &scaling_max_freq.attr,
761 &affected_cpus.attr,
762 &related_cpus.attr,
763 &scaling_governor.attr,
764 &scaling_driver.attr,
765 &scaling_available_governors.attr,
766 &scaling_setspeed.attr,
767 NULL
768 };
769
770 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
771 #define to_attr(a) container_of(a, struct freq_attr, attr)
772
773 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
774 {
775 struct cpufreq_policy *policy = to_policy(kobj);
776 struct freq_attr *fattr = to_attr(attr);
777 ssize_t ret;
778
779 down_read(&policy->rwsem);
780 ret = fattr->show(policy, buf);
781 up_read(&policy->rwsem);
782
783 return ret;
784 }
785
786 static ssize_t store(struct kobject *kobj, struct attribute *attr,
787 const char *buf, size_t count)
788 {
789 struct cpufreq_policy *policy = to_policy(kobj);
790 struct freq_attr *fattr = to_attr(attr);
791 ssize_t ret = -EINVAL;
792
793 get_online_cpus();
794
795 if (cpu_online(policy->cpu)) {
796 down_write(&policy->rwsem);
797 ret = fattr->store(policy, buf, count);
798 up_write(&policy->rwsem);
799 }
800
801 put_online_cpus();
802
803 return ret;
804 }
805
806 static void cpufreq_sysfs_release(struct kobject *kobj)
807 {
808 struct cpufreq_policy *policy = to_policy(kobj);
809 pr_debug("last reference is dropped\n");
810 complete(&policy->kobj_unregister);
811 }
812
813 static const struct sysfs_ops sysfs_ops = {
814 .show = show,
815 .store = store,
816 };
817
818 static struct kobj_type ktype_cpufreq = {
819 .sysfs_ops = &sysfs_ops,
820 .default_attrs = default_attrs,
821 .release = cpufreq_sysfs_release,
822 };
823
824 static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
825 {
826 struct device *cpu_dev;
827
828 pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
829
830 if (!policy)
831 return 0;
832
833 cpu_dev = get_cpu_device(cpu);
834 if (WARN_ON(!cpu_dev))
835 return 0;
836
837 return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
838 }
839
840 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
841 {
842 struct device *cpu_dev;
843
844 pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
845
846 cpu_dev = get_cpu_device(cpu);
847 if (WARN_ON(!cpu_dev))
848 return;
849
850 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
851 }
852
853 /* Add/remove symlinks for all related CPUs */
854 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
855 {
856 unsigned int j;
857 int ret = 0;
858
859 /* Some related CPUs might not be present (physically hotplugged) */
860 for_each_cpu(j, policy->real_cpus) {
861 ret = add_cpu_dev_symlink(policy, j);
862 if (ret)
863 break;
864 }
865
866 return ret;
867 }
868
869 static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
870 {
871 unsigned int j;
872
873 /* Some related CPUs might not be present (physically hotplugged) */
874 for_each_cpu(j, policy->real_cpus)
875 remove_cpu_dev_symlink(policy, j);
876 }
877
878 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
879 {
880 struct freq_attr **drv_attr;
881 int ret = 0;
882
883 /* set up files for this cpu device */
884 drv_attr = cpufreq_driver->attr;
885 while (drv_attr && *drv_attr) {
886 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
887 if (ret)
888 return ret;
889 drv_attr++;
890 }
891 if (cpufreq_driver->get) {
892 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
893 if (ret)
894 return ret;
895 }
896
897 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
898 if (ret)
899 return ret;
900
901 if (cpufreq_driver->bios_limit) {
902 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
903 if (ret)
904 return ret;
905 }
906
907 return cpufreq_add_dev_symlink(policy);
908 }
909
910 __weak struct cpufreq_governor *cpufreq_default_governor(void)
911 {
912 return NULL;
913 }
914
915 static int cpufreq_init_policy(struct cpufreq_policy *policy)
916 {
917 struct cpufreq_governor *gov = NULL;
918 struct cpufreq_policy new_policy;
919
920 memcpy(&new_policy, policy, sizeof(*policy));
921
922 /* Update governor of new_policy to the governor used before hotplug */
923 gov = find_governor(policy->last_governor);
924 if (gov) {
925 pr_debug("Restoring governor %s for cpu %d\n",
926 policy->governor->name, policy->cpu);
927 } else {
928 gov = cpufreq_default_governor();
929 if (!gov)
930 return -ENODATA;
931 }
932
933 new_policy.governor = gov;
934
935 /* Use the default policy if there is no last_policy. */
936 if (cpufreq_driver->setpolicy) {
937 if (policy->last_policy)
938 new_policy.policy = policy->last_policy;
939 else
940 cpufreq_parse_governor(gov->name, &new_policy.policy,
941 NULL);
942 }
943 /* set default policy */
944 return cpufreq_set_policy(policy, &new_policy);
945 }
946
947 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
948 {
949 int ret = 0;
950
951 /* Has this CPU been taken care of already? */
952 if (cpumask_test_cpu(cpu, policy->cpus))
953 return 0;
954
955 down_write(&policy->rwsem);
956 if (has_target()) {
957 ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
958 if (ret) {
959 pr_err("%s: Failed to stop governor\n", __func__);
960 goto unlock;
961 }
962 }
963
964 cpumask_set_cpu(cpu, policy->cpus);
965
966 if (has_target()) {
967 ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
968 if (!ret)
969 ret = cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
970
971 if (ret)
972 pr_err("%s: Failed to start governor\n", __func__);
973 }
974
975 unlock:
976 up_write(&policy->rwsem);
977 return ret;
978 }
979
980 static void handle_update(struct work_struct *work)
981 {
982 struct cpufreq_policy *policy =
983 container_of(work, struct cpufreq_policy, update);
984 unsigned int cpu = policy->cpu;
985 pr_debug("handle_update for cpu %u called\n", cpu);
986 cpufreq_update_policy(cpu);
987 }
988
989 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
990 {
991 struct device *dev = get_cpu_device(cpu);
992 struct cpufreq_policy *policy;
993 int ret;
994
995 if (WARN_ON(!dev))
996 return NULL;
997
998 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
999 if (!policy)
1000 return NULL;
1001
1002 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1003 goto err_free_policy;
1004
1005 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1006 goto err_free_cpumask;
1007
1008 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1009 goto err_free_rcpumask;
1010
1011 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1012 cpufreq_global_kobject, "policy%u", cpu);
1013 if (ret) {
1014 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1015 goto err_free_real_cpus;
1016 }
1017
1018 INIT_LIST_HEAD(&policy->policy_list);
1019 init_rwsem(&policy->rwsem);
1020 spin_lock_init(&policy->transition_lock);
1021 init_waitqueue_head(&policy->transition_wait);
1022 init_completion(&policy->kobj_unregister);
1023 INIT_WORK(&policy->update, handle_update);
1024
1025 policy->cpu = cpu;
1026 return policy;
1027
1028 err_free_real_cpus:
1029 free_cpumask_var(policy->real_cpus);
1030 err_free_rcpumask:
1031 free_cpumask_var(policy->related_cpus);
1032 err_free_cpumask:
1033 free_cpumask_var(policy->cpus);
1034 err_free_policy:
1035 kfree(policy);
1036
1037 return NULL;
1038 }
1039
1040 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
1041 {
1042 struct kobject *kobj;
1043 struct completion *cmp;
1044
1045 if (notify)
1046 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1047 CPUFREQ_REMOVE_POLICY, policy);
1048
1049 down_write(&policy->rwsem);
1050 cpufreq_remove_dev_symlink(policy);
1051 kobj = &policy->kobj;
1052 cmp = &policy->kobj_unregister;
1053 up_write(&policy->rwsem);
1054 kobject_put(kobj);
1055
1056 /*
1057 * We need to make sure that the underlying kobj is
1058 * actually not referenced anymore by anybody before we
1059 * proceed with unloading.
1060 */
1061 pr_debug("waiting for dropping of refcount\n");
1062 wait_for_completion(cmp);
1063 pr_debug("wait complete\n");
1064 }
1065
1066 static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1067 {
1068 unsigned long flags;
1069 int cpu;
1070
1071 /* Remove policy from list */
1072 write_lock_irqsave(&cpufreq_driver_lock, flags);
1073 list_del(&policy->policy_list);
1074
1075 for_each_cpu(cpu, policy->related_cpus)
1076 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1077 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1078
1079 cpufreq_policy_put_kobj(policy, notify);
1080 free_cpumask_var(policy->real_cpus);
1081 free_cpumask_var(policy->related_cpus);
1082 free_cpumask_var(policy->cpus);
1083 kfree(policy);
1084 }
1085
1086 static int cpufreq_online(unsigned int cpu)
1087 {
1088 struct cpufreq_policy *policy;
1089 bool new_policy;
1090 unsigned long flags;
1091 unsigned int j;
1092 int ret;
1093
1094 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1095
1096 /* Check if this CPU already has a policy to manage it */
1097 policy = per_cpu(cpufreq_cpu_data, cpu);
1098 if (policy) {
1099 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1100 if (!policy_is_inactive(policy))
1101 return cpufreq_add_policy_cpu(policy, cpu);
1102
1103 /* This is the only online CPU for the policy. Start over. */
1104 new_policy = false;
1105 down_write(&policy->rwsem);
1106 policy->cpu = cpu;
1107 policy->governor = NULL;
1108 up_write(&policy->rwsem);
1109 } else {
1110 new_policy = true;
1111 policy = cpufreq_policy_alloc(cpu);
1112 if (!policy)
1113 return -ENOMEM;
1114 }
1115
1116 cpumask_copy(policy->cpus, cpumask_of(cpu));
1117
1118 /* call driver. From then on the cpufreq must be able
1119 * to accept all calls to ->verify and ->setpolicy for this CPU
1120 */
1121 ret = cpufreq_driver->init(policy);
1122 if (ret) {
1123 pr_debug("initialization failed\n");
1124 goto out_free_policy;
1125 }
1126
1127 down_write(&policy->rwsem);
1128
1129 if (new_policy) {
1130 /* related_cpus should at least include policy->cpus. */
1131 cpumask_copy(policy->related_cpus, policy->cpus);
1132 /* Remember CPUs present at the policy creation time. */
1133 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1134 }
1135
1136 /*
1137 * affected cpus must always be the one, which are online. We aren't
1138 * managing offline cpus here.
1139 */
1140 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1141
1142 if (new_policy) {
1143 policy->user_policy.min = policy->min;
1144 policy->user_policy.max = policy->max;
1145
1146 write_lock_irqsave(&cpufreq_driver_lock, flags);
1147 for_each_cpu(j, policy->related_cpus)
1148 per_cpu(cpufreq_cpu_data, j) = policy;
1149 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1150 }
1151
1152 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1153 policy->cur = cpufreq_driver->get(policy->cpu);
1154 if (!policy->cur) {
1155 pr_err("%s: ->get() failed\n", __func__);
1156 goto out_exit_policy;
1157 }
1158 }
1159
1160 /*
1161 * Sometimes boot loaders set CPU frequency to a value outside of
1162 * frequency table present with cpufreq core. In such cases CPU might be
1163 * unstable if it has to run on that frequency for long duration of time
1164 * and so its better to set it to a frequency which is specified in
1165 * freq-table. This also makes cpufreq stats inconsistent as
1166 * cpufreq-stats would fail to register because current frequency of CPU
1167 * isn't found in freq-table.
1168 *
1169 * Because we don't want this change to effect boot process badly, we go
1170 * for the next freq which is >= policy->cur ('cur' must be set by now,
1171 * otherwise we will end up setting freq to lowest of the table as 'cur'
1172 * is initialized to zero).
1173 *
1174 * We are passing target-freq as "policy->cur - 1" otherwise
1175 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1176 * equal to target-freq.
1177 */
1178 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1179 && has_target()) {
1180 /* Are we running at unknown frequency ? */
1181 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1182 if (ret == -EINVAL) {
1183 /* Warn user and fix it */
1184 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1185 __func__, policy->cpu, policy->cur);
1186 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1187 CPUFREQ_RELATION_L);
1188
1189 /*
1190 * Reaching here after boot in a few seconds may not
1191 * mean that system will remain stable at "unknown"
1192 * frequency for longer duration. Hence, a BUG_ON().
1193 */
1194 BUG_ON(ret);
1195 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1196 __func__, policy->cpu, policy->cur);
1197 }
1198 }
1199
1200 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1201 CPUFREQ_START, policy);
1202
1203 if (new_policy) {
1204 ret = cpufreq_add_dev_interface(policy);
1205 if (ret)
1206 goto out_exit_policy;
1207 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1208 CPUFREQ_CREATE_POLICY, policy);
1209
1210 write_lock_irqsave(&cpufreq_driver_lock, flags);
1211 list_add(&policy->policy_list, &cpufreq_policy_list);
1212 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1213 }
1214
1215 ret = cpufreq_init_policy(policy);
1216 if (ret) {
1217 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1218 __func__, cpu, ret);
1219 /* cpufreq_policy_free() will notify based on this */
1220 new_policy = false;
1221 goto out_exit_policy;
1222 }
1223
1224 up_write(&policy->rwsem);
1225
1226 kobject_uevent(&policy->kobj, KOBJ_ADD);
1227
1228 /* Callback for handling stuff after policy is ready */
1229 if (cpufreq_driver->ready)
1230 cpufreq_driver->ready(policy);
1231
1232 pr_debug("initialization complete\n");
1233
1234 return 0;
1235
1236 out_exit_policy:
1237 up_write(&policy->rwsem);
1238
1239 if (cpufreq_driver->exit)
1240 cpufreq_driver->exit(policy);
1241 out_free_policy:
1242 cpufreq_policy_free(policy, !new_policy);
1243 return ret;
1244 }
1245
1246 /**
1247 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1248 * @dev: CPU device.
1249 * @sif: Subsystem interface structure pointer (not used)
1250 */
1251 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1252 {
1253 unsigned cpu = dev->id;
1254 int ret;
1255
1256 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1257
1258 if (cpu_online(cpu)) {
1259 ret = cpufreq_online(cpu);
1260 } else {
1261 /*
1262 * A hotplug notifier will follow and we will handle it as CPU
1263 * online then. For now, just create the sysfs link, unless
1264 * there is no policy or the link is already present.
1265 */
1266 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1267
1268 ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1269 ? add_cpu_dev_symlink(policy, cpu) : 0;
1270 }
1271
1272 return ret;
1273 }
1274
1275 static void cpufreq_offline(unsigned int cpu)
1276 {
1277 struct cpufreq_policy *policy;
1278 int ret;
1279
1280 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1281
1282 policy = cpufreq_cpu_get_raw(cpu);
1283 if (!policy) {
1284 pr_debug("%s: No cpu_data found\n", __func__);
1285 return;
1286 }
1287
1288 down_write(&policy->rwsem);
1289 if (has_target()) {
1290 ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1291 if (ret)
1292 pr_err("%s: Failed to stop governor\n", __func__);
1293 }
1294
1295 cpumask_clear_cpu(cpu, policy->cpus);
1296
1297 if (policy_is_inactive(policy)) {
1298 if (has_target())
1299 strncpy(policy->last_governor, policy->governor->name,
1300 CPUFREQ_NAME_LEN);
1301 else
1302 policy->last_policy = policy->policy;
1303 } else if (cpu == policy->cpu) {
1304 /* Nominate new CPU */
1305 policy->cpu = cpumask_any(policy->cpus);
1306 }
1307
1308 /* Start governor again for active policy */
1309 if (!policy_is_inactive(policy)) {
1310 if (has_target()) {
1311 ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
1312 if (!ret)
1313 ret = cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1314
1315 if (ret)
1316 pr_err("%s: Failed to start governor\n", __func__);
1317 }
1318
1319 goto unlock;
1320 }
1321
1322 if (cpufreq_driver->stop_cpu)
1323 cpufreq_driver->stop_cpu(policy);
1324
1325 /* If cpu is last user of policy, free policy */
1326 if (has_target()) {
1327 ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1328 if (ret)
1329 pr_err("%s: Failed to exit governor\n", __func__);
1330 }
1331
1332 /*
1333 * Perform the ->exit() even during light-weight tear-down,
1334 * since this is a core component, and is essential for the
1335 * subsequent light-weight ->init() to succeed.
1336 */
1337 if (cpufreq_driver->exit) {
1338 cpufreq_driver->exit(policy);
1339 policy->freq_table = NULL;
1340 }
1341
1342 unlock:
1343 up_write(&policy->rwsem);
1344 }
1345
1346 /**
1347 * cpufreq_remove_dev - remove a CPU device
1348 *
1349 * Removes the cpufreq interface for a CPU device.
1350 */
1351 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1352 {
1353 unsigned int cpu = dev->id;
1354 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1355
1356 if (!policy)
1357 return;
1358
1359 if (cpu_online(cpu))
1360 cpufreq_offline(cpu);
1361
1362 cpumask_clear_cpu(cpu, policy->real_cpus);
1363 remove_cpu_dev_symlink(policy, cpu);
1364
1365 if (cpumask_empty(policy->real_cpus))
1366 cpufreq_policy_free(policy, true);
1367 }
1368
1369 /**
1370 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1371 * in deep trouble.
1372 * @policy: policy managing CPUs
1373 * @new_freq: CPU frequency the CPU actually runs at
1374 *
1375 * We adjust to current frequency first, and need to clean up later.
1376 * So either call to cpufreq_update_policy() or schedule handle_update()).
1377 */
1378 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1379 unsigned int new_freq)
1380 {
1381 struct cpufreq_freqs freqs;
1382
1383 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1384 policy->cur, new_freq);
1385
1386 freqs.old = policy->cur;
1387 freqs.new = new_freq;
1388
1389 cpufreq_freq_transition_begin(policy, &freqs);
1390 cpufreq_freq_transition_end(policy, &freqs, 0);
1391 }
1392
1393 /**
1394 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1395 * @cpu: CPU number
1396 *
1397 * This is the last known freq, without actually getting it from the driver.
1398 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1399 */
1400 unsigned int cpufreq_quick_get(unsigned int cpu)
1401 {
1402 struct cpufreq_policy *policy;
1403 unsigned int ret_freq = 0;
1404
1405 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1406 return cpufreq_driver->get(cpu);
1407
1408 policy = cpufreq_cpu_get(cpu);
1409 if (policy) {
1410 ret_freq = policy->cur;
1411 cpufreq_cpu_put(policy);
1412 }
1413
1414 return ret_freq;
1415 }
1416 EXPORT_SYMBOL(cpufreq_quick_get);
1417
1418 /**
1419 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1420 * @cpu: CPU number
1421 *
1422 * Just return the max possible frequency for a given CPU.
1423 */
1424 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1425 {
1426 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1427 unsigned int ret_freq = 0;
1428
1429 if (policy) {
1430 ret_freq = policy->max;
1431 cpufreq_cpu_put(policy);
1432 }
1433
1434 return ret_freq;
1435 }
1436 EXPORT_SYMBOL(cpufreq_quick_get_max);
1437
1438 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1439 {
1440 unsigned int ret_freq = 0;
1441
1442 if (!cpufreq_driver->get)
1443 return ret_freq;
1444
1445 ret_freq = cpufreq_driver->get(policy->cpu);
1446
1447 /* Updating inactive policies is invalid, so avoid doing that. */
1448 if (unlikely(policy_is_inactive(policy)))
1449 return ret_freq;
1450
1451 if (ret_freq && policy->cur &&
1452 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1453 /* verify no discrepancy between actual and
1454 saved value exists */
1455 if (unlikely(ret_freq != policy->cur)) {
1456 cpufreq_out_of_sync(policy, ret_freq);
1457 schedule_work(&policy->update);
1458 }
1459 }
1460
1461 return ret_freq;
1462 }
1463
1464 /**
1465 * cpufreq_get - get the current CPU frequency (in kHz)
1466 * @cpu: CPU number
1467 *
1468 * Get the CPU current (static) CPU frequency
1469 */
1470 unsigned int cpufreq_get(unsigned int cpu)
1471 {
1472 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1473 unsigned int ret_freq = 0;
1474
1475 if (policy) {
1476 down_read(&policy->rwsem);
1477 ret_freq = __cpufreq_get(policy);
1478 up_read(&policy->rwsem);
1479
1480 cpufreq_cpu_put(policy);
1481 }
1482
1483 return ret_freq;
1484 }
1485 EXPORT_SYMBOL(cpufreq_get);
1486
1487 static struct subsys_interface cpufreq_interface = {
1488 .name = "cpufreq",
1489 .subsys = &cpu_subsys,
1490 .add_dev = cpufreq_add_dev,
1491 .remove_dev = cpufreq_remove_dev,
1492 };
1493
1494 /*
1495 * In case platform wants some specific frequency to be configured
1496 * during suspend..
1497 */
1498 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1499 {
1500 int ret;
1501
1502 if (!policy->suspend_freq) {
1503 pr_debug("%s: suspend_freq not defined\n", __func__);
1504 return 0;
1505 }
1506
1507 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1508 policy->suspend_freq);
1509
1510 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1511 CPUFREQ_RELATION_H);
1512 if (ret)
1513 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1514 __func__, policy->suspend_freq, ret);
1515
1516 return ret;
1517 }
1518 EXPORT_SYMBOL(cpufreq_generic_suspend);
1519
1520 /**
1521 * cpufreq_suspend() - Suspend CPUFreq governors
1522 *
1523 * Called during system wide Suspend/Hibernate cycles for suspending governors
1524 * as some platforms can't change frequency after this point in suspend cycle.
1525 * Because some of the devices (like: i2c, regulators, etc) they use for
1526 * changing frequency are suspended quickly after this point.
1527 */
1528 void cpufreq_suspend(void)
1529 {
1530 struct cpufreq_policy *policy;
1531 int ret;
1532
1533 if (!cpufreq_driver)
1534 return;
1535
1536 if (!has_target())
1537 goto suspend;
1538
1539 pr_debug("%s: Suspending Governors\n", __func__);
1540
1541 for_each_active_policy(policy) {
1542 down_write(&policy->rwsem);
1543 ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1544 up_write(&policy->rwsem);
1545
1546 if (ret)
1547 pr_err("%s: Failed to stop governor for policy: %p\n",
1548 __func__, policy);
1549 else if (cpufreq_driver->suspend
1550 && cpufreq_driver->suspend(policy))
1551 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1552 policy);
1553 }
1554
1555 suspend:
1556 cpufreq_suspended = true;
1557 }
1558
1559 /**
1560 * cpufreq_resume() - Resume CPUFreq governors
1561 *
1562 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1563 * are suspended with cpufreq_suspend().
1564 */
1565 void cpufreq_resume(void)
1566 {
1567 struct cpufreq_policy *policy;
1568 int ret;
1569
1570 if (!cpufreq_driver)
1571 return;
1572
1573 cpufreq_suspended = false;
1574
1575 if (!has_target())
1576 return;
1577
1578 pr_debug("%s: Resuming Governors\n", __func__);
1579
1580 for_each_active_policy(policy) {
1581 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1582 pr_err("%s: Failed to resume driver: %p\n", __func__,
1583 policy);
1584 } else {
1585 down_write(&policy->rwsem);
1586 ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
1587 if (!ret)
1588 cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1589 up_write(&policy->rwsem);
1590
1591 if (ret)
1592 pr_err("%s: Failed to start governor for policy: %p\n",
1593 __func__, policy);
1594 }
1595 }
1596
1597 /*
1598 * schedule call cpufreq_update_policy() for first-online CPU, as that
1599 * wouldn't be hotplugged-out on suspend. It will verify that the
1600 * current freq is in sync with what we believe it to be.
1601 */
1602 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1603 if (WARN_ON(!policy))
1604 return;
1605
1606 schedule_work(&policy->update);
1607 }
1608
1609 /**
1610 * cpufreq_get_current_driver - return current driver's name
1611 *
1612 * Return the name string of the currently loaded cpufreq driver
1613 * or NULL, if none.
1614 */
1615 const char *cpufreq_get_current_driver(void)
1616 {
1617 if (cpufreq_driver)
1618 return cpufreq_driver->name;
1619
1620 return NULL;
1621 }
1622 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1623
1624 /**
1625 * cpufreq_get_driver_data - return current driver data
1626 *
1627 * Return the private data of the currently loaded cpufreq
1628 * driver, or NULL if no cpufreq driver is loaded.
1629 */
1630 void *cpufreq_get_driver_data(void)
1631 {
1632 if (cpufreq_driver)
1633 return cpufreq_driver->driver_data;
1634
1635 return NULL;
1636 }
1637 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1638
1639 /*********************************************************************
1640 * NOTIFIER LISTS INTERFACE *
1641 *********************************************************************/
1642
1643 /**
1644 * cpufreq_register_notifier - register a driver with cpufreq
1645 * @nb: notifier function to register
1646 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1647 *
1648 * Add a driver to one of two lists: either a list of drivers that
1649 * are notified about clock rate changes (once before and once after
1650 * the transition), or a list of drivers that are notified about
1651 * changes in cpufreq policy.
1652 *
1653 * This function may sleep, and has the same return conditions as
1654 * blocking_notifier_chain_register.
1655 */
1656 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1657 {
1658 int ret;
1659
1660 if (cpufreq_disabled())
1661 return -EINVAL;
1662
1663 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1664
1665 switch (list) {
1666 case CPUFREQ_TRANSITION_NOTIFIER:
1667 ret = srcu_notifier_chain_register(
1668 &cpufreq_transition_notifier_list, nb);
1669 break;
1670 case CPUFREQ_POLICY_NOTIFIER:
1671 ret = blocking_notifier_chain_register(
1672 &cpufreq_policy_notifier_list, nb);
1673 break;
1674 default:
1675 ret = -EINVAL;
1676 }
1677
1678 return ret;
1679 }
1680 EXPORT_SYMBOL(cpufreq_register_notifier);
1681
1682 /**
1683 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1684 * @nb: notifier block to be unregistered
1685 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1686 *
1687 * Remove a driver from the CPU frequency notifier list.
1688 *
1689 * This function may sleep, and has the same return conditions as
1690 * blocking_notifier_chain_unregister.
1691 */
1692 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1693 {
1694 int ret;
1695
1696 if (cpufreq_disabled())
1697 return -EINVAL;
1698
1699 switch (list) {
1700 case CPUFREQ_TRANSITION_NOTIFIER:
1701 ret = srcu_notifier_chain_unregister(
1702 &cpufreq_transition_notifier_list, nb);
1703 break;
1704 case CPUFREQ_POLICY_NOTIFIER:
1705 ret = blocking_notifier_chain_unregister(
1706 &cpufreq_policy_notifier_list, nb);
1707 break;
1708 default:
1709 ret = -EINVAL;
1710 }
1711
1712 return ret;
1713 }
1714 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1715
1716
1717 /*********************************************************************
1718 * GOVERNORS *
1719 *********************************************************************/
1720
1721 /* Must set freqs->new to intermediate frequency */
1722 static int __target_intermediate(struct cpufreq_policy *policy,
1723 struct cpufreq_freqs *freqs, int index)
1724 {
1725 int ret;
1726
1727 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1728
1729 /* We don't need to switch to intermediate freq */
1730 if (!freqs->new)
1731 return 0;
1732
1733 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1734 __func__, policy->cpu, freqs->old, freqs->new);
1735
1736 cpufreq_freq_transition_begin(policy, freqs);
1737 ret = cpufreq_driver->target_intermediate(policy, index);
1738 cpufreq_freq_transition_end(policy, freqs, ret);
1739
1740 if (ret)
1741 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1742 __func__, ret);
1743
1744 return ret;
1745 }
1746
1747 static int __target_index(struct cpufreq_policy *policy,
1748 struct cpufreq_frequency_table *freq_table, int index)
1749 {
1750 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1751 unsigned int intermediate_freq = 0;
1752 int retval = -EINVAL;
1753 bool notify;
1754
1755 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1756 if (notify) {
1757 /* Handle switching to intermediate frequency */
1758 if (cpufreq_driver->get_intermediate) {
1759 retval = __target_intermediate(policy, &freqs, index);
1760 if (retval)
1761 return retval;
1762
1763 intermediate_freq = freqs.new;
1764 /* Set old freq to intermediate */
1765 if (intermediate_freq)
1766 freqs.old = freqs.new;
1767 }
1768
1769 freqs.new = freq_table[index].frequency;
1770 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1771 __func__, policy->cpu, freqs.old, freqs.new);
1772
1773 cpufreq_freq_transition_begin(policy, &freqs);
1774 }
1775
1776 retval = cpufreq_driver->target_index(policy, index);
1777 if (retval)
1778 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1779 retval);
1780
1781 if (notify) {
1782 cpufreq_freq_transition_end(policy, &freqs, retval);
1783
1784 /*
1785 * Failed after setting to intermediate freq? Driver should have
1786 * reverted back to initial frequency and so should we. Check
1787 * here for intermediate_freq instead of get_intermediate, in
1788 * case we haven't switched to intermediate freq at all.
1789 */
1790 if (unlikely(retval && intermediate_freq)) {
1791 freqs.old = intermediate_freq;
1792 freqs.new = policy->restore_freq;
1793 cpufreq_freq_transition_begin(policy, &freqs);
1794 cpufreq_freq_transition_end(policy, &freqs, 0);
1795 }
1796 }
1797
1798 return retval;
1799 }
1800
1801 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1802 unsigned int target_freq,
1803 unsigned int relation)
1804 {
1805 unsigned int old_target_freq = target_freq;
1806 struct cpufreq_frequency_table *freq_table;
1807 int index, retval;
1808
1809 if (cpufreq_disabled())
1810 return -ENODEV;
1811
1812 /* Make sure that target_freq is within supported range */
1813 if (target_freq > policy->max)
1814 target_freq = policy->max;
1815 if (target_freq < policy->min)
1816 target_freq = policy->min;
1817
1818 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1819 policy->cpu, target_freq, relation, old_target_freq);
1820
1821 /*
1822 * This might look like a redundant call as we are checking it again
1823 * after finding index. But it is left intentionally for cases where
1824 * exactly same freq is called again and so we can save on few function
1825 * calls.
1826 */
1827 if (target_freq == policy->cur)
1828 return 0;
1829
1830 /* Save last value to restore later on errors */
1831 policy->restore_freq = policy->cur;
1832
1833 if (cpufreq_driver->target)
1834 return cpufreq_driver->target(policy, target_freq, relation);
1835
1836 if (!cpufreq_driver->target_index)
1837 return -EINVAL;
1838
1839 freq_table = cpufreq_frequency_get_table(policy->cpu);
1840 if (unlikely(!freq_table)) {
1841 pr_err("%s: Unable to find freq_table\n", __func__);
1842 return -EINVAL;
1843 }
1844
1845 retval = cpufreq_frequency_table_target(policy, freq_table, target_freq,
1846 relation, &index);
1847 if (unlikely(retval)) {
1848 pr_err("%s: Unable to find matching freq\n", __func__);
1849 return retval;
1850 }
1851
1852 if (freq_table[index].frequency == policy->cur)
1853 return 0;
1854
1855 return __target_index(policy, freq_table, index);
1856 }
1857 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1858
1859 int cpufreq_driver_target(struct cpufreq_policy *policy,
1860 unsigned int target_freq,
1861 unsigned int relation)
1862 {
1863 int ret = -EINVAL;
1864
1865 down_write(&policy->rwsem);
1866
1867 ret = __cpufreq_driver_target(policy, target_freq, relation);
1868
1869 up_write(&policy->rwsem);
1870
1871 return ret;
1872 }
1873 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1874
1875 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
1876 {
1877 return NULL;
1878 }
1879
1880 static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1881 {
1882 int ret;
1883
1884 /* Don't start any governor operations if we are entering suspend */
1885 if (cpufreq_suspended)
1886 return 0;
1887 /*
1888 * Governor might not be initiated here if ACPI _PPC changed
1889 * notification happened, so check it.
1890 */
1891 if (!policy->governor)
1892 return -EINVAL;
1893
1894 if (policy->governor->max_transition_latency &&
1895 policy->cpuinfo.transition_latency >
1896 policy->governor->max_transition_latency) {
1897 struct cpufreq_governor *gov = cpufreq_fallback_governor();
1898
1899 if (gov) {
1900 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
1901 policy->governor->name, gov->name);
1902 policy->governor = gov;
1903 } else {
1904 return -EINVAL;
1905 }
1906 }
1907
1908 if (event == CPUFREQ_GOV_POLICY_INIT)
1909 if (!try_module_get(policy->governor->owner))
1910 return -EINVAL;
1911
1912 pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
1913
1914 ret = policy->governor->governor(policy, event);
1915
1916 if (!ret) {
1917 if (event == CPUFREQ_GOV_POLICY_INIT)
1918 policy->governor->initialized++;
1919 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1920 policy->governor->initialized--;
1921 }
1922
1923 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1924 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1925 module_put(policy->governor->owner);
1926
1927 return ret;
1928 }
1929
1930 int cpufreq_register_governor(struct cpufreq_governor *governor)
1931 {
1932 int err;
1933
1934 if (!governor)
1935 return -EINVAL;
1936
1937 if (cpufreq_disabled())
1938 return -ENODEV;
1939
1940 mutex_lock(&cpufreq_governor_mutex);
1941
1942 governor->initialized = 0;
1943 err = -EBUSY;
1944 if (!find_governor(governor->name)) {
1945 err = 0;
1946 list_add(&governor->governor_list, &cpufreq_governor_list);
1947 }
1948
1949 mutex_unlock(&cpufreq_governor_mutex);
1950 return err;
1951 }
1952 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1953
1954 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1955 {
1956 struct cpufreq_policy *policy;
1957 unsigned long flags;
1958
1959 if (!governor)
1960 return;
1961
1962 if (cpufreq_disabled())
1963 return;
1964
1965 /* clear last_governor for all inactive policies */
1966 read_lock_irqsave(&cpufreq_driver_lock, flags);
1967 for_each_inactive_policy(policy) {
1968 if (!strcmp(policy->last_governor, governor->name)) {
1969 policy->governor = NULL;
1970 strcpy(policy->last_governor, "\0");
1971 }
1972 }
1973 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1974
1975 mutex_lock(&cpufreq_governor_mutex);
1976 list_del(&governor->governor_list);
1977 mutex_unlock(&cpufreq_governor_mutex);
1978 return;
1979 }
1980 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1981
1982
1983 /*********************************************************************
1984 * POLICY INTERFACE *
1985 *********************************************************************/
1986
1987 /**
1988 * cpufreq_get_policy - get the current cpufreq_policy
1989 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1990 * is written
1991 *
1992 * Reads the current cpufreq policy.
1993 */
1994 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1995 {
1996 struct cpufreq_policy *cpu_policy;
1997 if (!policy)
1998 return -EINVAL;
1999
2000 cpu_policy = cpufreq_cpu_get(cpu);
2001 if (!cpu_policy)
2002 return -EINVAL;
2003
2004 memcpy(policy, cpu_policy, sizeof(*policy));
2005
2006 cpufreq_cpu_put(cpu_policy);
2007 return 0;
2008 }
2009 EXPORT_SYMBOL(cpufreq_get_policy);
2010
2011 /*
2012 * policy : current policy.
2013 * new_policy: policy to be set.
2014 */
2015 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2016 struct cpufreq_policy *new_policy)
2017 {
2018 struct cpufreq_governor *old_gov;
2019 int ret;
2020
2021 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2022 new_policy->cpu, new_policy->min, new_policy->max);
2023
2024 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2025
2026 /*
2027 * This check works well when we store new min/max freq attributes,
2028 * because new_policy is a copy of policy with one field updated.
2029 */
2030 if (new_policy->min > new_policy->max)
2031 return -EINVAL;
2032
2033 /* verify the cpu speed can be set within this limit */
2034 ret = cpufreq_driver->verify(new_policy);
2035 if (ret)
2036 return ret;
2037
2038 /* adjust if necessary - all reasons */
2039 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2040 CPUFREQ_ADJUST, new_policy);
2041
2042 /*
2043 * verify the cpu speed can be set within this limit, which might be
2044 * different to the first one
2045 */
2046 ret = cpufreq_driver->verify(new_policy);
2047 if (ret)
2048 return ret;
2049
2050 /* notification of the new policy */
2051 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2052 CPUFREQ_NOTIFY, new_policy);
2053
2054 policy->min = new_policy->min;
2055 policy->max = new_policy->max;
2056
2057 pr_debug("new min and max freqs are %u - %u kHz\n",
2058 policy->min, policy->max);
2059
2060 if (cpufreq_driver->setpolicy) {
2061 policy->policy = new_policy->policy;
2062 pr_debug("setting range\n");
2063 return cpufreq_driver->setpolicy(new_policy);
2064 }
2065
2066 if (new_policy->governor == policy->governor)
2067 goto out;
2068
2069 pr_debug("governor switch\n");
2070
2071 /* save old, working values */
2072 old_gov = policy->governor;
2073 /* end old governor */
2074 if (old_gov) {
2075 ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2076 if (ret) {
2077 /* This can happen due to race with other operations */
2078 pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
2079 __func__, old_gov->name, ret);
2080 return ret;
2081 }
2082
2083 ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2084 if (ret) {
2085 pr_err("%s: Failed to Exit Governor: %s (%d)\n",
2086 __func__, old_gov->name, ret);
2087 return ret;
2088 }
2089 }
2090
2091 /* start new governor */
2092 policy->governor = new_policy->governor;
2093 ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2094 if (!ret) {
2095 ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
2096 if (!ret)
2097 goto out;
2098
2099 cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2100 }
2101
2102 /* new governor failed, so re-start old one */
2103 pr_debug("starting governor %s failed\n", policy->governor->name);
2104 if (old_gov) {
2105 policy->governor = old_gov;
2106 if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
2107 policy->governor = NULL;
2108 else
2109 cpufreq_governor(policy, CPUFREQ_GOV_START);
2110 }
2111
2112 return ret;
2113
2114 out:
2115 pr_debug("governor: change or update limits\n");
2116 return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2117 }
2118
2119 /**
2120 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2121 * @cpu: CPU which shall be re-evaluated
2122 *
2123 * Useful for policy notifiers which have different necessities
2124 * at different times.
2125 */
2126 int cpufreq_update_policy(unsigned int cpu)
2127 {
2128 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2129 struct cpufreq_policy new_policy;
2130 int ret;
2131
2132 if (!policy)
2133 return -ENODEV;
2134
2135 down_write(&policy->rwsem);
2136
2137 pr_debug("updating policy for CPU %u\n", cpu);
2138 memcpy(&new_policy, policy, sizeof(*policy));
2139 new_policy.min = policy->user_policy.min;
2140 new_policy.max = policy->user_policy.max;
2141
2142 /*
2143 * BIOS might change freq behind our back
2144 * -> ask driver for current freq and notify governors about a change
2145 */
2146 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2147 new_policy.cur = cpufreq_driver->get(cpu);
2148 if (WARN_ON(!new_policy.cur)) {
2149 ret = -EIO;
2150 goto unlock;
2151 }
2152
2153 if (!policy->cur) {
2154 pr_debug("Driver did not initialize current freq\n");
2155 policy->cur = new_policy.cur;
2156 } else {
2157 if (policy->cur != new_policy.cur && has_target())
2158 cpufreq_out_of_sync(policy, new_policy.cur);
2159 }
2160 }
2161
2162 ret = cpufreq_set_policy(policy, &new_policy);
2163
2164 unlock:
2165 up_write(&policy->rwsem);
2166
2167 cpufreq_cpu_put(policy);
2168 return ret;
2169 }
2170 EXPORT_SYMBOL(cpufreq_update_policy);
2171
2172 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2173 unsigned long action, void *hcpu)
2174 {
2175 unsigned int cpu = (unsigned long)hcpu;
2176
2177 switch (action & ~CPU_TASKS_FROZEN) {
2178 case CPU_ONLINE:
2179 cpufreq_online(cpu);
2180 break;
2181
2182 case CPU_DOWN_PREPARE:
2183 cpufreq_offline(cpu);
2184 break;
2185
2186 case CPU_DOWN_FAILED:
2187 cpufreq_online(cpu);
2188 break;
2189 }
2190 return NOTIFY_OK;
2191 }
2192
2193 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2194 .notifier_call = cpufreq_cpu_callback,
2195 };
2196
2197 /*********************************************************************
2198 * BOOST *
2199 *********************************************************************/
2200 static int cpufreq_boost_set_sw(int state)
2201 {
2202 struct cpufreq_frequency_table *freq_table;
2203 struct cpufreq_policy *policy;
2204 int ret = -EINVAL;
2205
2206 for_each_active_policy(policy) {
2207 freq_table = cpufreq_frequency_get_table(policy->cpu);
2208 if (freq_table) {
2209 ret = cpufreq_frequency_table_cpuinfo(policy,
2210 freq_table);
2211 if (ret) {
2212 pr_err("%s: Policy frequency update failed\n",
2213 __func__);
2214 break;
2215 }
2216
2217 down_write(&policy->rwsem);
2218 policy->user_policy.max = policy->max;
2219 cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2220 up_write(&policy->rwsem);
2221 }
2222 }
2223
2224 return ret;
2225 }
2226
2227 int cpufreq_boost_trigger_state(int state)
2228 {
2229 unsigned long flags;
2230 int ret = 0;
2231
2232 if (cpufreq_driver->boost_enabled == state)
2233 return 0;
2234
2235 write_lock_irqsave(&cpufreq_driver_lock, flags);
2236 cpufreq_driver->boost_enabled = state;
2237 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2238
2239 ret = cpufreq_driver->set_boost(state);
2240 if (ret) {
2241 write_lock_irqsave(&cpufreq_driver_lock, flags);
2242 cpufreq_driver->boost_enabled = !state;
2243 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2244
2245 pr_err("%s: Cannot %s BOOST\n",
2246 __func__, state ? "enable" : "disable");
2247 }
2248
2249 return ret;
2250 }
2251
2252 static bool cpufreq_boost_supported(void)
2253 {
2254 return likely(cpufreq_driver) && cpufreq_driver->set_boost;
2255 }
2256
2257 static int create_boost_sysfs_file(void)
2258 {
2259 int ret;
2260
2261 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2262 if (ret)
2263 pr_err("%s: cannot register global BOOST sysfs file\n",
2264 __func__);
2265
2266 return ret;
2267 }
2268
2269 static void remove_boost_sysfs_file(void)
2270 {
2271 if (cpufreq_boost_supported())
2272 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2273 }
2274
2275 int cpufreq_enable_boost_support(void)
2276 {
2277 if (!cpufreq_driver)
2278 return -EINVAL;
2279
2280 if (cpufreq_boost_supported())
2281 return 0;
2282
2283 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2284
2285 /* This will get removed on driver unregister */
2286 return create_boost_sysfs_file();
2287 }
2288 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2289
2290 int cpufreq_boost_enabled(void)
2291 {
2292 return cpufreq_driver->boost_enabled;
2293 }
2294 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2295
2296 /*********************************************************************
2297 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2298 *********************************************************************/
2299
2300 /**
2301 * cpufreq_register_driver - register a CPU Frequency driver
2302 * @driver_data: A struct cpufreq_driver containing the values#
2303 * submitted by the CPU Frequency driver.
2304 *
2305 * Registers a CPU Frequency driver to this core code. This code
2306 * returns zero on success, -EEXIST when another driver got here first
2307 * (and isn't unregistered in the meantime).
2308 *
2309 */
2310 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2311 {
2312 unsigned long flags;
2313 int ret;
2314
2315 if (cpufreq_disabled())
2316 return -ENODEV;
2317
2318 if (!driver_data || !driver_data->verify || !driver_data->init ||
2319 !(driver_data->setpolicy || driver_data->target_index ||
2320 driver_data->target) ||
2321 (driver_data->setpolicy && (driver_data->target_index ||
2322 driver_data->target)) ||
2323 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2324 return -EINVAL;
2325
2326 pr_debug("trying to register driver %s\n", driver_data->name);
2327
2328 /* Protect against concurrent CPU online/offline. */
2329 get_online_cpus();
2330
2331 write_lock_irqsave(&cpufreq_driver_lock, flags);
2332 if (cpufreq_driver) {
2333 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2334 ret = -EEXIST;
2335 goto out;
2336 }
2337 cpufreq_driver = driver_data;
2338 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2339
2340 if (driver_data->setpolicy)
2341 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2342
2343 if (cpufreq_boost_supported()) {
2344 ret = create_boost_sysfs_file();
2345 if (ret)
2346 goto err_null_driver;
2347 }
2348
2349 ret = subsys_interface_register(&cpufreq_interface);
2350 if (ret)
2351 goto err_boost_unreg;
2352
2353 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2354 list_empty(&cpufreq_policy_list)) {
2355 /* if all ->init() calls failed, unregister */
2356 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2357 driver_data->name);
2358 goto err_if_unreg;
2359 }
2360
2361 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2362 pr_debug("driver %s up and running\n", driver_data->name);
2363
2364 out:
2365 put_online_cpus();
2366 return ret;
2367
2368 err_if_unreg:
2369 subsys_interface_unregister(&cpufreq_interface);
2370 err_boost_unreg:
2371 remove_boost_sysfs_file();
2372 err_null_driver:
2373 write_lock_irqsave(&cpufreq_driver_lock, flags);
2374 cpufreq_driver = NULL;
2375 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2376 goto out;
2377 }
2378 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2379
2380 /**
2381 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2382 *
2383 * Unregister the current CPUFreq driver. Only call this if you have
2384 * the right to do so, i.e. if you have succeeded in initialising before!
2385 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2386 * currently not initialised.
2387 */
2388 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2389 {
2390 unsigned long flags;
2391
2392 if (!cpufreq_driver || (driver != cpufreq_driver))
2393 return -EINVAL;
2394
2395 pr_debug("unregistering driver %s\n", driver->name);
2396
2397 /* Protect against concurrent cpu hotplug */
2398 get_online_cpus();
2399 subsys_interface_unregister(&cpufreq_interface);
2400 remove_boost_sysfs_file();
2401 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2402
2403 write_lock_irqsave(&cpufreq_driver_lock, flags);
2404
2405 cpufreq_driver = NULL;
2406
2407 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2408 put_online_cpus();
2409
2410 return 0;
2411 }
2412 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2413
2414 /*
2415 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2416 * or mutexes when secondary CPUs are halted.
2417 */
2418 static struct syscore_ops cpufreq_syscore_ops = {
2419 .shutdown = cpufreq_suspend,
2420 };
2421
2422 struct kobject *cpufreq_global_kobject;
2423 EXPORT_SYMBOL(cpufreq_global_kobject);
2424
2425 static int __init cpufreq_core_init(void)
2426 {
2427 if (cpufreq_disabled())
2428 return -ENODEV;
2429
2430 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2431 BUG_ON(!cpufreq_global_kobject);
2432
2433 register_syscore_ops(&cpufreq_syscore_ops);
2434
2435 return 0;
2436 }
2437 core_initcall(cpufreq_core_init);
This page took 0.081636 seconds and 5 git commands to generate.