Commit | Line | Data |
---|---|---|
2aacdfff | 1 | /* |
2 | * drivers/cpufreq/cpufreq_governor.c | |
3 | * | |
4 | * CPUFREQ governors common code | |
5 | * | |
4471a34f VK |
6 | * Copyright (C) 2001 Russell King |
7 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | |
8 | * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> | |
9 | * (C) 2009 Alexander Clouter <alex@digriz.org.uk> | |
10 | * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> | |
11 | * | |
2aacdfff | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
4471a34f VK |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
18 | ||
2aacdfff | 19 | #include <linux/export.h> |
20 | #include <linux/kernel_stat.h> | |
4d5dcc42 | 21 | #include <linux/slab.h> |
4471a34f VK |
22 | |
23 | #include "cpufreq_governor.h" | |
24 | ||
4d5dcc42 VK |
25 | static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data) |
26 | { | |
27 | if (have_governor_per_policy()) | |
28 | return dbs_data->cdata->attr_group_gov_pol; | |
29 | else | |
30 | return dbs_data->cdata->attr_group_gov_sys; | |
31 | } | |
32 | ||
4471a34f VK |
33 | void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) |
34 | { | |
4d5dcc42 | 35 | struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); |
4471a34f VK |
36 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
37 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | |
38 | struct cpufreq_policy *policy; | |
18b46abd | 39 | unsigned int sampling_rate; |
4471a34f VK |
40 | unsigned int max_load = 0; |
41 | unsigned int ignore_nice; | |
42 | unsigned int j; | |
43 | ||
18b46abd SB |
44 | if (dbs_data->cdata->governor == GOV_ONDEMAND) { |
45 | struct od_cpu_dbs_info_s *od_dbs_info = | |
46 | dbs_data->cdata->get_cpu_dbs_info_s(cpu); | |
47 | ||
48 | /* | |
49 | * Sometimes, the ondemand governor uses an additional | |
50 | * multiplier to give long delays. So apply this multiplier to | |
51 | * the 'sampling_rate', so as to keep the wake-up-from-idle | |
52 | * detection logic a bit conservative. | |
53 | */ | |
54 | sampling_rate = od_tuners->sampling_rate; | |
55 | sampling_rate *= od_dbs_info->rate_mult; | |
56 | ||
6c4640c3 | 57 | ignore_nice = od_tuners->ignore_nice_load; |
18b46abd SB |
58 | } else { |
59 | sampling_rate = cs_tuners->sampling_rate; | |
6c4640c3 | 60 | ignore_nice = cs_tuners->ignore_nice_load; |
18b46abd | 61 | } |
4471a34f VK |
62 | |
63 | policy = cdbs->cur_policy; | |
64 | ||
dfa5bb62 | 65 | /* Get Absolute Load */ |
4471a34f VK |
66 | for_each_cpu(j, policy->cpus) { |
67 | struct cpu_dbs_common_info *j_cdbs; | |
9366d840 SK |
68 | u64 cur_wall_time, cur_idle_time; |
69 | unsigned int idle_time, wall_time; | |
4471a34f | 70 | unsigned int load; |
9366d840 | 71 | int io_busy = 0; |
4471a34f | 72 | |
4d5dcc42 | 73 | j_cdbs = dbs_data->cdata->get_cpu_cdbs(j); |
4471a34f | 74 | |
9366d840 SK |
75 | /* |
76 | * For the purpose of ondemand, waiting for disk IO is | |
77 | * an indication that you're performance critical, and | |
78 | * not that the system is actually idle. So do not add | |
79 | * the iowait time to the cpu idle time. | |
80 | */ | |
81 | if (dbs_data->cdata->governor == GOV_ONDEMAND) | |
82 | io_busy = od_tuners->io_is_busy; | |
83 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy); | |
4471a34f VK |
84 | |
85 | wall_time = (unsigned int) | |
86 | (cur_wall_time - j_cdbs->prev_cpu_wall); | |
87 | j_cdbs->prev_cpu_wall = cur_wall_time; | |
88 | ||
89 | idle_time = (unsigned int) | |
90 | (cur_idle_time - j_cdbs->prev_cpu_idle); | |
91 | j_cdbs->prev_cpu_idle = cur_idle_time; | |
92 | ||
93 | if (ignore_nice) { | |
94 | u64 cur_nice; | |
95 | unsigned long cur_nice_jiffies; | |
96 | ||
97 | cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - | |
98 | cdbs->prev_cpu_nice; | |
99 | /* | |
100 | * Assumption: nice time between sampling periods will | |
101 | * be less than 2^32 jiffies for 32 bit sys | |
102 | */ | |
103 | cur_nice_jiffies = (unsigned long) | |
104 | cputime64_to_jiffies64(cur_nice); | |
105 | ||
106 | cdbs->prev_cpu_nice = | |
107 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | |
108 | idle_time += jiffies_to_usecs(cur_nice_jiffies); | |
109 | } | |
110 | ||
4471a34f VK |
111 | if (unlikely(!wall_time || wall_time < idle_time)) |
112 | continue; | |
113 | ||
18b46abd SB |
114 | /* |
115 | * If the CPU had gone completely idle, and a task just woke up | |
116 | * on this CPU now, it would be unfair to calculate 'load' the | |
117 | * usual way for this elapsed time-window, because it will show | |
118 | * near-zero load, irrespective of how CPU intensive that task | |
119 | * actually is. This is undesirable for latency-sensitive bursty | |
120 | * workloads. | |
121 | * | |
122 | * To avoid this, we reuse the 'load' from the previous | |
123 | * time-window and give this task a chance to start with a | |
124 | * reasonably high CPU frequency. (However, we shouldn't over-do | |
125 | * this copy, lest we get stuck at a high load (high frequency) | |
126 | * for too long, even when the current system load has actually | |
127 | * dropped down. So we perform the copy only once, upon the | |
128 | * first wake-up from idle.) | |
129 | * | |
130 | * Detecting this situation is easy: the governor's deferrable | |
131 | * timer would not have fired during CPU-idle periods. Hence | |
132 | * an unusually large 'wall_time' (as compared to the sampling | |
133 | * rate) indicates this scenario. | |
c8ae481b VK |
134 | * |
135 | * prev_load can be zero in two cases and we must recalculate it | |
136 | * for both cases: | |
137 | * - during long idle intervals | |
138 | * - explicitly set to zero | |
18b46abd | 139 | */ |
c8ae481b VK |
140 | if (unlikely(wall_time > (2 * sampling_rate) && |
141 | j_cdbs->prev_load)) { | |
18b46abd | 142 | load = j_cdbs->prev_load; |
c8ae481b VK |
143 | |
144 | /* | |
145 | * Perform a destructive copy, to ensure that we copy | |
146 | * the previous load only once, upon the first wake-up | |
147 | * from idle. | |
148 | */ | |
149 | j_cdbs->prev_load = 0; | |
18b46abd SB |
150 | } else { |
151 | load = 100 * (wall_time - idle_time) / wall_time; | |
152 | j_cdbs->prev_load = load; | |
18b46abd | 153 | } |
4471a34f | 154 | |
4471a34f VK |
155 | if (load > max_load) |
156 | max_load = load; | |
157 | } | |
158 | ||
4d5dcc42 | 159 | dbs_data->cdata->gov_check_cpu(cpu, max_load); |
4471a34f VK |
160 | } |
161 | EXPORT_SYMBOL_GPL(dbs_check_cpu); | |
162 | ||
031299b3 VK |
163 | static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data, |
164 | unsigned int delay) | |
4471a34f | 165 | { |
4d5dcc42 | 166 | struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); |
4471a34f | 167 | |
031299b3 | 168 | mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay); |
4471a34f VK |
169 | } |
170 | ||
031299b3 VK |
171 | void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, |
172 | unsigned int delay, bool all_cpus) | |
4471a34f | 173 | { |
031299b3 VK |
174 | int i; |
175 | ||
6f1e4efd | 176 | mutex_lock(&cpufreq_governor_lock); |
3617f2ca | 177 | if (!policy->governor_enabled) |
6f1e4efd | 178 | goto out_unlock; |
3617f2ca | 179 | |
031299b3 | 180 | if (!all_cpus) { |
69320783 SB |
181 | /* |
182 | * Use raw_smp_processor_id() to avoid preemptible warnings. | |
183 | * We know that this is only called with all_cpus == false from | |
184 | * works that have been queued with *_work_on() functions and | |
185 | * those works are canceled during CPU_DOWN_PREPARE so they | |
186 | * can't possibly run on any other CPU. | |
187 | */ | |
188 | __gov_queue_work(raw_smp_processor_id(), dbs_data, delay); | |
031299b3 VK |
189 | } else { |
190 | for_each_cpu(i, policy->cpus) | |
191 | __gov_queue_work(i, dbs_data, delay); | |
192 | } | |
6f1e4efd JL |
193 | |
194 | out_unlock: | |
195 | mutex_unlock(&cpufreq_governor_lock); | |
031299b3 VK |
196 | } |
197 | EXPORT_SYMBOL_GPL(gov_queue_work); | |
198 | ||
199 | static inline void gov_cancel_work(struct dbs_data *dbs_data, | |
200 | struct cpufreq_policy *policy) | |
201 | { | |
202 | struct cpu_dbs_common_info *cdbs; | |
203 | int i; | |
58ddcead | 204 | |
031299b3 VK |
205 | for_each_cpu(i, policy->cpus) { |
206 | cdbs = dbs_data->cdata->get_cpu_cdbs(i); | |
207 | cancel_delayed_work_sync(&cdbs->work); | |
208 | } | |
4471a34f VK |
209 | } |
210 | ||
4447266b VK |
211 | /* Will return if we need to evaluate cpu load again or not */ |
212 | bool need_load_eval(struct cpu_dbs_common_info *cdbs, | |
213 | unsigned int sampling_rate) | |
214 | { | |
215 | if (policy_is_shared(cdbs->cur_policy)) { | |
216 | ktime_t time_now = ktime_get(); | |
217 | s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp); | |
218 | ||
219 | /* Do nothing if we recently have sampled */ | |
220 | if (delta_us < (s64)(sampling_rate / 2)) | |
221 | return false; | |
222 | else | |
223 | cdbs->time_stamp = time_now; | |
224 | } | |
225 | ||
226 | return true; | |
227 | } | |
228 | EXPORT_SYMBOL_GPL(need_load_eval); | |
229 | ||
4d5dcc42 VK |
230 | static void set_sampling_rate(struct dbs_data *dbs_data, |
231 | unsigned int sampling_rate) | |
232 | { | |
233 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { | |
234 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | |
235 | cs_tuners->sampling_rate = sampling_rate; | |
236 | } else { | |
237 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | |
238 | od_tuners->sampling_rate = sampling_rate; | |
239 | } | |
240 | } | |
241 | ||
242 | int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |
243 | struct common_dbs_data *cdata, unsigned int event) | |
4471a34f | 244 | { |
4d5dcc42 | 245 | struct dbs_data *dbs_data; |
4471a34f VK |
246 | struct od_cpu_dbs_info_s *od_dbs_info = NULL; |
247 | struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; | |
8eeed095 | 248 | struct od_ops *od_ops = NULL; |
4d5dcc42 VK |
249 | struct od_dbs_tuners *od_tuners = NULL; |
250 | struct cs_dbs_tuners *cs_tuners = NULL; | |
4471a34f | 251 | struct cpu_dbs_common_info *cpu_cdbs; |
4d5dcc42 | 252 | unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu; |
9366d840 | 253 | int io_busy = 0; |
4471a34f VK |
254 | int rc; |
255 | ||
4d5dcc42 VK |
256 | if (have_governor_per_policy()) |
257 | dbs_data = policy->governor_data; | |
258 | else | |
259 | dbs_data = cdata->gdbs_data; | |
260 | ||
261 | WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)); | |
262 | ||
263 | switch (event) { | |
264 | case CPUFREQ_GOV_POLICY_INIT: | |
265 | if (have_governor_per_policy()) { | |
266 | WARN_ON(dbs_data); | |
267 | } else if (dbs_data) { | |
a97c98ad | 268 | dbs_data->usage_count++; |
4d5dcc42 VK |
269 | policy->governor_data = dbs_data; |
270 | return 0; | |
271 | } | |
272 | ||
273 | dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL); | |
274 | if (!dbs_data) { | |
275 | pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__); | |
276 | return -ENOMEM; | |
277 | } | |
278 | ||
279 | dbs_data->cdata = cdata; | |
a97c98ad | 280 | dbs_data->usage_count = 1; |
4d5dcc42 VK |
281 | rc = cdata->init(dbs_data); |
282 | if (rc) { | |
283 | pr_err("%s: POLICY_INIT: init() failed\n", __func__); | |
284 | kfree(dbs_data); | |
285 | return rc; | |
286 | } | |
287 | ||
2361be23 VK |
288 | if (!have_governor_per_policy()) |
289 | WARN_ON(cpufreq_get_global_kobject()); | |
290 | ||
4d5dcc42 VK |
291 | rc = sysfs_create_group(get_governor_parent_kobj(policy), |
292 | get_sysfs_attr(dbs_data)); | |
293 | if (rc) { | |
294 | cdata->exit(dbs_data); | |
295 | kfree(dbs_data); | |
296 | return rc; | |
297 | } | |
298 | ||
299 | policy->governor_data = dbs_data; | |
300 | ||
c4afc410 | 301 | /* policy latency is in ns. Convert it to us first */ |
4d5dcc42 VK |
302 | latency = policy->cpuinfo.transition_latency / 1000; |
303 | if (latency == 0) | |
304 | latency = 1; | |
305 | ||
306 | /* Bring kernel and HW constraints together */ | |
307 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, | |
308 | MIN_LATENCY_MULTIPLIER * latency); | |
309 | set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, | |
310 | latency * LATENCY_MULTIPLIER)); | |
311 | ||
a97c98ad VK |
312 | if ((cdata->governor == GOV_CONSERVATIVE) && |
313 | (!policy->governor->initialized)) { | |
4d5dcc42 VK |
314 | struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; |
315 | ||
316 | cpufreq_register_notifier(cs_ops->notifier_block, | |
317 | CPUFREQ_TRANSITION_NOTIFIER); | |
318 | } | |
319 | ||
320 | if (!have_governor_per_policy()) | |
321 | cdata->gdbs_data = dbs_data; | |
322 | ||
323 | return 0; | |
324 | case CPUFREQ_GOV_POLICY_EXIT: | |
a97c98ad | 325 | if (!--dbs_data->usage_count) { |
4d5dcc42 VK |
326 | sysfs_remove_group(get_governor_parent_kobj(policy), |
327 | get_sysfs_attr(dbs_data)); | |
328 | ||
2361be23 VK |
329 | if (!have_governor_per_policy()) |
330 | cpufreq_put_global_kobject(); | |
331 | ||
a97c98ad VK |
332 | if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) && |
333 | (policy->governor->initialized == 1)) { | |
4d5dcc42 VK |
334 | struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; |
335 | ||
336 | cpufreq_unregister_notifier(cs_ops->notifier_block, | |
337 | CPUFREQ_TRANSITION_NOTIFIER); | |
338 | } | |
339 | ||
340 | cdata->exit(dbs_data); | |
341 | kfree(dbs_data); | |
342 | cdata->gdbs_data = NULL; | |
343 | } | |
4471a34f | 344 | |
4d5dcc42 VK |
345 | policy->governor_data = NULL; |
346 | return 0; | |
347 | } | |
348 | ||
349 | cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); | |
350 | ||
351 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { | |
352 | cs_tuners = dbs_data->tuners; | |
353 | cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); | |
354 | sampling_rate = cs_tuners->sampling_rate; | |
6c4640c3 | 355 | ignore_nice = cs_tuners->ignore_nice_load; |
4471a34f | 356 | } else { |
4d5dcc42 VK |
357 | od_tuners = dbs_data->tuners; |
358 | od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); | |
359 | sampling_rate = od_tuners->sampling_rate; | |
6c4640c3 | 360 | ignore_nice = od_tuners->ignore_nice_load; |
4d5dcc42 | 361 | od_ops = dbs_data->cdata->gov_ops; |
9366d840 | 362 | io_busy = od_tuners->io_is_busy; |
4471a34f VK |
363 | } |
364 | ||
365 | switch (event) { | |
366 | case CPUFREQ_GOV_START: | |
3361b7b1 | 367 | if (!policy->cur) |
4471a34f VK |
368 | return -EINVAL; |
369 | ||
370 | mutex_lock(&dbs_data->mutex); | |
371 | ||
4471a34f | 372 | for_each_cpu(j, policy->cpus) { |
8eeed095 | 373 | struct cpu_dbs_common_info *j_cdbs = |
4d5dcc42 | 374 | dbs_data->cdata->get_cpu_cdbs(j); |
18b46abd | 375 | unsigned int prev_load; |
4471a34f | 376 | |
09dca5ae | 377 | j_cdbs->cpu = j; |
4471a34f VK |
378 | j_cdbs->cur_policy = policy; |
379 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, | |
9366d840 | 380 | &j_cdbs->prev_cpu_wall, io_busy); |
18b46abd SB |
381 | |
382 | prev_load = (unsigned int) | |
383 | (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle); | |
384 | j_cdbs->prev_load = 100 * prev_load / | |
385 | (unsigned int) j_cdbs->prev_cpu_wall; | |
18b46abd | 386 | |
4471a34f VK |
387 | if (ignore_nice) |
388 | j_cdbs->prev_cpu_nice = | |
389 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | |
2abfa876 RA |
390 | |
391 | mutex_init(&j_cdbs->timer_mutex); | |
392 | INIT_DEFERRABLE_WORK(&j_cdbs->work, | |
4d5dcc42 | 393 | dbs_data->cdata->gov_dbs_timer); |
4471a34f VK |
394 | } |
395 | ||
4d5dcc42 | 396 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { |
8eeed095 VK |
397 | cs_dbs_info->down_skip = 0; |
398 | cs_dbs_info->enable = 1; | |
399 | cs_dbs_info->requested_freq = policy->cur; | |
4471a34f | 400 | } else { |
8eeed095 VK |
401 | od_dbs_info->rate_mult = 1; |
402 | od_dbs_info->sample_type = OD_NORMAL_SAMPLE; | |
403 | od_ops->powersave_bias_init_cpu(cpu); | |
4471a34f VK |
404 | } |
405 | ||
4471a34f VK |
406 | mutex_unlock(&dbs_data->mutex); |
407 | ||
58ddcead FB |
408 | /* Initiate timer time stamp */ |
409 | cpu_cdbs->time_stamp = ktime_get(); | |
da53d61e | 410 | |
031299b3 VK |
411 | gov_queue_work(dbs_data, policy, |
412 | delay_for_sampling_rate(sampling_rate), true); | |
4471a34f VK |
413 | break; |
414 | ||
415 | case CPUFREQ_GOV_STOP: | |
4d5dcc42 | 416 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) |
4471a34f VK |
417 | cs_dbs_info->enable = 0; |
418 | ||
031299b3 | 419 | gov_cancel_work(dbs_data, policy); |
4471a34f VK |
420 | |
421 | mutex_lock(&dbs_data->mutex); | |
422 | mutex_destroy(&cpu_cdbs->timer_mutex); | |
419e1721 | 423 | cpu_cdbs->cur_policy = NULL; |
8eeed095 | 424 | |
4471a34f VK |
425 | mutex_unlock(&dbs_data->mutex); |
426 | ||
427 | break; | |
428 | ||
429 | case CPUFREQ_GOV_LIMITS: | |
c5450db8 BB |
430 | mutex_lock(&dbs_data->mutex); |
431 | if (!cpu_cdbs->cur_policy) { | |
432 | mutex_unlock(&dbs_data->mutex); | |
433 | break; | |
434 | } | |
4471a34f VK |
435 | mutex_lock(&cpu_cdbs->timer_mutex); |
436 | if (policy->max < cpu_cdbs->cur_policy->cur) | |
437 | __cpufreq_driver_target(cpu_cdbs->cur_policy, | |
438 | policy->max, CPUFREQ_RELATION_H); | |
439 | else if (policy->min > cpu_cdbs->cur_policy->cur) | |
440 | __cpufreq_driver_target(cpu_cdbs->cur_policy, | |
441 | policy->min, CPUFREQ_RELATION_L); | |
442 | dbs_check_cpu(dbs_data, cpu); | |
443 | mutex_unlock(&cpu_cdbs->timer_mutex); | |
c5450db8 | 444 | mutex_unlock(&dbs_data->mutex); |
4471a34f VK |
445 | break; |
446 | } | |
447 | return 0; | |
448 | } | |
449 | EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); |