Merge branches 'acpi-soc', 'acpi-misc', 'acpi-pci' and 'device-properties'
[deliverable/linux.git] / drivers / cpufreq / cpufreq_governor.c
1 /*
2 * drivers/cpufreq/cpufreq_governor.c
3 *
4 * CPUFREQ governors common code
5 *
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/export.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23
24 #include "cpufreq_governor.h"
25
26 static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
27
28 static DEFINE_MUTEX(gov_dbs_data_mutex);
29
30 /* Common sysfs tunables */
31 /**
32 * store_sampling_rate - update sampling rate effective immediately if needed.
33 *
34 * If new rate is smaller than the old, simply updating
35 * dbs.sampling_rate might not be appropriate. For example, if the
36 * original sampling_rate was 1 second and the requested new sampling rate is 10
37 * ms because the user needs immediate reaction from ondemand governor, but not
38 * sure if higher frequency will be required or not, then, the governor may
39 * change the sampling rate too late; up to 1 second later. Thus, if we are
40 * reducing the sampling rate, we need to make the new value effective
41 * immediately.
42 *
43 * This must be called with dbs_data->mutex held, otherwise traversing
44 * policy_dbs_list isn't safe.
45 */
46 ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
47 size_t count)
48 {
49 struct policy_dbs_info *policy_dbs;
50 unsigned int rate;
51 int ret;
52 ret = sscanf(buf, "%u", &rate);
53 if (ret != 1)
54 return -EINVAL;
55
56 dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);
57
58 /*
59 * We are operating under dbs_data->mutex and so the list and its
60 * entries can't be freed concurrently.
61 */
62 list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
63 mutex_lock(&policy_dbs->timer_mutex);
64 /*
65 * On 32-bit architectures this may race with the
66 * sample_delay_ns read in dbs_update_util_handler(), but that
67 * really doesn't matter. If the read returns a value that's
68 * too big, the sample will be skipped, but the next invocation
69 * of dbs_update_util_handler() (when the update has been
70 * completed) will take a sample.
71 *
72 * If this runs in parallel with dbs_work_handler(), we may end
73 * up overwriting the sample_delay_ns value that it has just
74 * written, but it will be corrected next time a sample is
75 * taken, so it shouldn't be significant.
76 */
77 gov_update_sample_delay(policy_dbs, 0);
78 mutex_unlock(&policy_dbs->timer_mutex);
79 }
80
81 return count;
82 }
83 EXPORT_SYMBOL_GPL(store_sampling_rate);
84
85 /**
86 * gov_update_cpu_data - Update CPU load data.
87 * @dbs_data: Top-level governor data pointer.
88 *
89 * Update CPU load data for all CPUs in the domain governed by @dbs_data
90 * (that may be a single policy or a bunch of them if governor tunables are
91 * system-wide).
92 *
93 * Call under the @dbs_data mutex.
94 */
95 void gov_update_cpu_data(struct dbs_data *dbs_data)
96 {
97 struct policy_dbs_info *policy_dbs;
98
99 list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
100 unsigned int j;
101
102 for_each_cpu(j, policy_dbs->policy->cpus) {
103 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
104
105 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall,
106 dbs_data->io_is_busy);
107 if (dbs_data->ignore_nice_load)
108 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
109 }
110 }
111 }
112 EXPORT_SYMBOL_GPL(gov_update_cpu_data);
113
114 static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
115 {
116 return container_of(kobj, struct dbs_data, kobj);
117 }
118
119 static inline struct governor_attr *to_gov_attr(struct attribute *attr)
120 {
121 return container_of(attr, struct governor_attr, attr);
122 }
123
124 static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
125 char *buf)
126 {
127 struct dbs_data *dbs_data = to_dbs_data(kobj);
128 struct governor_attr *gattr = to_gov_attr(attr);
129
130 return gattr->show(dbs_data, buf);
131 }
132
133 static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
134 const char *buf, size_t count)
135 {
136 struct dbs_data *dbs_data = to_dbs_data(kobj);
137 struct governor_attr *gattr = to_gov_attr(attr);
138 int ret = -EBUSY;
139
140 mutex_lock(&dbs_data->mutex);
141
142 if (dbs_data->usage_count)
143 ret = gattr->store(dbs_data, buf, count);
144
145 mutex_unlock(&dbs_data->mutex);
146
147 return ret;
148 }
149
150 /*
151 * Sysfs Ops for accessing governor attributes.
152 *
153 * All show/store invocations for governor specific sysfs attributes, will first
154 * call the below show/store callbacks and the attribute specific callback will
155 * be called from within it.
156 */
157 static const struct sysfs_ops governor_sysfs_ops = {
158 .show = governor_show,
159 .store = governor_store,
160 };
161
162 unsigned int dbs_update(struct cpufreq_policy *policy)
163 {
164 struct policy_dbs_info *policy_dbs = policy->governor_data;
165 struct dbs_data *dbs_data = policy_dbs->dbs_data;
166 unsigned int ignore_nice = dbs_data->ignore_nice_load;
167 unsigned int max_load = 0;
168 unsigned int sampling_rate, io_busy, j;
169
170 /*
171 * Sometimes governors may use an additional multiplier to increase
172 * sample delays temporarily. Apply that multiplier to sampling_rate
173 * so as to keep the wake-up-from-idle detection logic a bit
174 * conservative.
175 */
176 sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
177 /*
178 * For the purpose of ondemand, waiting for disk IO is an indication
179 * that you're performance critical, and not that the system is actually
180 * idle, so do not add the iowait time to the CPU idle time then.
181 */
182 io_busy = dbs_data->io_is_busy;
183
184 /* Get Absolute Load */
185 for_each_cpu(j, policy->cpus) {
186 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
187 u64 cur_wall_time, cur_idle_time;
188 unsigned int idle_time, wall_time;
189 unsigned int load;
190
191 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
192
193 wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
194 j_cdbs->prev_cpu_wall = cur_wall_time;
195
196 if (cur_idle_time <= j_cdbs->prev_cpu_idle) {
197 idle_time = 0;
198 } else {
199 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
200 j_cdbs->prev_cpu_idle = cur_idle_time;
201 }
202
203 if (ignore_nice) {
204 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
205
206 idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
207 j_cdbs->prev_cpu_nice = cur_nice;
208 }
209
210 if (unlikely(!wall_time || wall_time < idle_time))
211 continue;
212
213 /*
214 * If the CPU had gone completely idle, and a task just woke up
215 * on this CPU now, it would be unfair to calculate 'load' the
216 * usual way for this elapsed time-window, because it will show
217 * near-zero load, irrespective of how CPU intensive that task
218 * actually is. This is undesirable for latency-sensitive bursty
219 * workloads.
220 *
221 * To avoid this, we reuse the 'load' from the previous
222 * time-window and give this task a chance to start with a
223 * reasonably high CPU frequency. (However, we shouldn't over-do
224 * this copy, lest we get stuck at a high load (high frequency)
225 * for too long, even when the current system load has actually
226 * dropped down. So we perform the copy only once, upon the
227 * first wake-up from idle.)
228 *
229 * Detecting this situation is easy: the governor's utilization
230 * update handler would not have run during CPU-idle periods.
231 * Hence, an unusually large 'wall_time' (as compared to the
232 * sampling rate) indicates this scenario.
233 *
234 * prev_load can be zero in two cases and we must recalculate it
235 * for both cases:
236 * - during long idle intervals
237 * - explicitly set to zero
238 */
239 if (unlikely(wall_time > (2 * sampling_rate) &&
240 j_cdbs->prev_load)) {
241 load = j_cdbs->prev_load;
242
243 /*
244 * Perform a destructive copy, to ensure that we copy
245 * the previous load only once, upon the first wake-up
246 * from idle.
247 */
248 j_cdbs->prev_load = 0;
249 } else {
250 load = 100 * (wall_time - idle_time) / wall_time;
251 j_cdbs->prev_load = load;
252 }
253
254 if (load > max_load)
255 max_load = load;
256 }
257 return max_load;
258 }
259 EXPORT_SYMBOL_GPL(dbs_update);
260
261 static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
262 unsigned int delay_us)
263 {
264 struct cpufreq_policy *policy = policy_dbs->policy;
265 int cpu;
266
267 gov_update_sample_delay(policy_dbs, delay_us);
268 policy_dbs->last_sample_time = 0;
269
270 for_each_cpu(cpu, policy->cpus) {
271 struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
272
273 cpufreq_set_update_util_data(cpu, &cdbs->update_util);
274 }
275 }
276
277 static inline void gov_clear_update_util(struct cpufreq_policy *policy)
278 {
279 int i;
280
281 for_each_cpu(i, policy->cpus)
282 cpufreq_set_update_util_data(i, NULL);
283
284 synchronize_sched();
285 }
286
287 static void gov_cancel_work(struct cpufreq_policy *policy)
288 {
289 struct policy_dbs_info *policy_dbs = policy->governor_data;
290
291 gov_clear_update_util(policy_dbs->policy);
292 irq_work_sync(&policy_dbs->irq_work);
293 cancel_work_sync(&policy_dbs->work);
294 atomic_set(&policy_dbs->work_count, 0);
295 policy_dbs->work_in_progress = false;
296 }
297
298 static void dbs_work_handler(struct work_struct *work)
299 {
300 struct policy_dbs_info *policy_dbs;
301 struct cpufreq_policy *policy;
302 struct dbs_governor *gov;
303
304 policy_dbs = container_of(work, struct policy_dbs_info, work);
305 policy = policy_dbs->policy;
306 gov = dbs_governor_of(policy);
307
308 /*
309 * Make sure cpufreq_governor_limits() isn't evaluating load or the
310 * ondemand governor isn't updating the sampling rate in parallel.
311 */
312 mutex_lock(&policy_dbs->timer_mutex);
313 gov_update_sample_delay(policy_dbs, gov->gov_dbs_timer(policy));
314 mutex_unlock(&policy_dbs->timer_mutex);
315
316 /* Allow the utilization update handler to queue up more work. */
317 atomic_set(&policy_dbs->work_count, 0);
318 /*
319 * If the update below is reordered with respect to the sample delay
320 * modification, the utilization update handler may end up using a stale
321 * sample delay value.
322 */
323 smp_wmb();
324 policy_dbs->work_in_progress = false;
325 }
326
327 static void dbs_irq_work(struct irq_work *irq_work)
328 {
329 struct policy_dbs_info *policy_dbs;
330
331 policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
332 schedule_work_on(smp_processor_id(), &policy_dbs->work);
333 }
334
335 static void dbs_update_util_handler(struct update_util_data *data, u64 time,
336 unsigned long util, unsigned long max)
337 {
338 struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
339 struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
340 u64 delta_ns, lst;
341
342 /*
343 * The work may not be allowed to be queued up right now.
344 * Possible reasons:
345 * - Work has already been queued up or is in progress.
346 * - It is too early (too little time from the previous sample).
347 */
348 if (policy_dbs->work_in_progress)
349 return;
350
351 /*
352 * If the reads below are reordered before the check above, the value
353 * of sample_delay_ns used in the computation may be stale.
354 */
355 smp_rmb();
356 lst = READ_ONCE(policy_dbs->last_sample_time);
357 delta_ns = time - lst;
358 if ((s64)delta_ns < policy_dbs->sample_delay_ns)
359 return;
360
361 /*
362 * If the policy is not shared, the irq_work may be queued up right away
363 * at this point. Otherwise, we need to ensure that only one of the
364 * CPUs sharing the policy will do that.
365 */
366 if (policy_dbs->is_shared) {
367 if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
368 return;
369
370 /*
371 * If another CPU updated last_sample_time in the meantime, we
372 * shouldn't be here, so clear the work counter and bail out.
373 */
374 if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
375 atomic_set(&policy_dbs->work_count, 0);
376 return;
377 }
378 }
379
380 policy_dbs->last_sample_time = time;
381 policy_dbs->work_in_progress = true;
382 irq_work_queue(&policy_dbs->irq_work);
383 }
384
385 static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
386 struct dbs_governor *gov)
387 {
388 struct policy_dbs_info *policy_dbs;
389 int j;
390
391 /* Allocate memory for per-policy governor data. */
392 policy_dbs = gov->alloc();
393 if (!policy_dbs)
394 return NULL;
395
396 policy_dbs->policy = policy;
397 mutex_init(&policy_dbs->timer_mutex);
398 atomic_set(&policy_dbs->work_count, 0);
399 init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
400 INIT_WORK(&policy_dbs->work, dbs_work_handler);
401
402 /* Set policy_dbs for all CPUs, online+offline */
403 for_each_cpu(j, policy->related_cpus) {
404 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
405
406 j_cdbs->policy_dbs = policy_dbs;
407 j_cdbs->update_util.func = dbs_update_util_handler;
408 }
409 return policy_dbs;
410 }
411
412 static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
413 struct dbs_governor *gov)
414 {
415 int j;
416
417 mutex_destroy(&policy_dbs->timer_mutex);
418
419 for_each_cpu(j, policy_dbs->policy->related_cpus) {
420 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
421
422 j_cdbs->policy_dbs = NULL;
423 j_cdbs->update_util.func = NULL;
424 }
425 gov->free(policy_dbs);
426 }
427
428 static int cpufreq_governor_init(struct cpufreq_policy *policy)
429 {
430 struct dbs_governor *gov = dbs_governor_of(policy);
431 struct dbs_data *dbs_data;
432 struct policy_dbs_info *policy_dbs;
433 unsigned int latency;
434 int ret = 0;
435
436 /* State should be equivalent to EXIT */
437 if (policy->governor_data)
438 return -EBUSY;
439
440 policy_dbs = alloc_policy_dbs_info(policy, gov);
441 if (!policy_dbs)
442 return -ENOMEM;
443
444 /* Protect gov->gdbs_data against concurrent updates. */
445 mutex_lock(&gov_dbs_data_mutex);
446
447 dbs_data = gov->gdbs_data;
448 if (dbs_data) {
449 if (WARN_ON(have_governor_per_policy())) {
450 ret = -EINVAL;
451 goto free_policy_dbs_info;
452 }
453 policy_dbs->dbs_data = dbs_data;
454 policy->governor_data = policy_dbs;
455
456 mutex_lock(&dbs_data->mutex);
457 dbs_data->usage_count++;
458 list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
459 mutex_unlock(&dbs_data->mutex);
460 goto out;
461 }
462
463 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
464 if (!dbs_data) {
465 ret = -ENOMEM;
466 goto free_policy_dbs_info;
467 }
468
469 INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
470 mutex_init(&dbs_data->mutex);
471
472 ret = gov->init(dbs_data, !policy->governor->initialized);
473 if (ret)
474 goto free_policy_dbs_info;
475
476 /* policy latency is in ns. Convert it to us first */
477 latency = policy->cpuinfo.transition_latency / 1000;
478 if (latency == 0)
479 latency = 1;
480
481 /* Bring kernel and HW constraints together */
482 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
483 MIN_LATENCY_MULTIPLIER * latency);
484 dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
485 LATENCY_MULTIPLIER * latency);
486
487 if (!have_governor_per_policy())
488 gov->gdbs_data = dbs_data;
489
490 policy->governor_data = policy_dbs;
491
492 policy_dbs->dbs_data = dbs_data;
493 dbs_data->usage_count = 1;
494 list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
495
496 gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
497 ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
498 get_governor_parent_kobj(policy),
499 "%s", gov->gov.name);
500 if (!ret)
501 goto out;
502
503 /* Failure, so roll back. */
504 pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret);
505
506 policy->governor_data = NULL;
507
508 if (!have_governor_per_policy())
509 gov->gdbs_data = NULL;
510 gov->exit(dbs_data, !policy->governor->initialized);
511 kfree(dbs_data);
512
513 free_policy_dbs_info:
514 free_policy_dbs_info(policy_dbs, gov);
515
516 out:
517 mutex_unlock(&gov_dbs_data_mutex);
518 return ret;
519 }
520
521 static int cpufreq_governor_exit(struct cpufreq_policy *policy)
522 {
523 struct dbs_governor *gov = dbs_governor_of(policy);
524 struct policy_dbs_info *policy_dbs = policy->governor_data;
525 struct dbs_data *dbs_data = policy_dbs->dbs_data;
526 int count;
527
528 /* Protect gov->gdbs_data against concurrent updates. */
529 mutex_lock(&gov_dbs_data_mutex);
530
531 mutex_lock(&dbs_data->mutex);
532 list_del(&policy_dbs->list);
533 count = --dbs_data->usage_count;
534 mutex_unlock(&dbs_data->mutex);
535
536 if (!count) {
537 kobject_put(&dbs_data->kobj);
538
539 policy->governor_data = NULL;
540
541 if (!have_governor_per_policy())
542 gov->gdbs_data = NULL;
543
544 gov->exit(dbs_data, policy->governor->initialized == 1);
545 mutex_destroy(&dbs_data->mutex);
546 kfree(dbs_data);
547 } else {
548 policy->governor_data = NULL;
549 }
550
551 free_policy_dbs_info(policy_dbs, gov);
552
553 mutex_unlock(&gov_dbs_data_mutex);
554 return 0;
555 }
556
557 static int cpufreq_governor_start(struct cpufreq_policy *policy)
558 {
559 struct dbs_governor *gov = dbs_governor_of(policy);
560 struct policy_dbs_info *policy_dbs = policy->governor_data;
561 struct dbs_data *dbs_data = policy_dbs->dbs_data;
562 unsigned int sampling_rate, ignore_nice, j;
563 unsigned int io_busy;
564
565 if (!policy->cur)
566 return -EINVAL;
567
568 policy_dbs->is_shared = policy_is_shared(policy);
569 policy_dbs->rate_mult = 1;
570
571 sampling_rate = dbs_data->sampling_rate;
572 ignore_nice = dbs_data->ignore_nice_load;
573 io_busy = dbs_data->io_is_busy;
574
575 for_each_cpu(j, policy->cpus) {
576 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
577 unsigned int prev_load;
578
579 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
580
581 prev_load = j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle;
582 j_cdbs->prev_load = 100 * prev_load / (unsigned int)j_cdbs->prev_cpu_wall;
583
584 if (ignore_nice)
585 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
586 }
587
588 gov->start(policy);
589
590 gov_set_update_util(policy_dbs, sampling_rate);
591 return 0;
592 }
593
594 static int cpufreq_governor_stop(struct cpufreq_policy *policy)
595 {
596 gov_cancel_work(policy);
597 return 0;
598 }
599
600 static int cpufreq_governor_limits(struct cpufreq_policy *policy)
601 {
602 struct policy_dbs_info *policy_dbs = policy->governor_data;
603
604 mutex_lock(&policy_dbs->timer_mutex);
605
606 if (policy->max < policy->cur)
607 __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
608 else if (policy->min > policy->cur)
609 __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
610
611 gov_update_sample_delay(policy_dbs, 0);
612
613 mutex_unlock(&policy_dbs->timer_mutex);
614
615 return 0;
616 }
617
618 int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
619 {
620 if (event == CPUFREQ_GOV_POLICY_INIT) {
621 return cpufreq_governor_init(policy);
622 } else if (policy->governor_data) {
623 switch (event) {
624 case CPUFREQ_GOV_POLICY_EXIT:
625 return cpufreq_governor_exit(policy);
626 case CPUFREQ_GOV_START:
627 return cpufreq_governor_start(policy);
628 case CPUFREQ_GOV_STOP:
629 return cpufreq_governor_stop(policy);
630 case CPUFREQ_GOV_LIMITS:
631 return cpufreq_governor_limits(policy);
632 }
633 }
634 return -EINVAL;
635 }
636 EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
This page took 0.044123 seconds and 6 git commands to generate.