Commit | Line | Data |
---|---|---|
2aacdfff | 1 | /* |
2 | * drivers/cpufreq/cpufreq_governor.c | |
3 | * | |
4 | * CPUFREQ governors common code | |
5 | * | |
4471a34f VK |
6 | * Copyright (C) 2001 Russell King |
7 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | |
8 | * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> | |
9 | * (C) 2009 Alexander Clouter <alex@digriz.org.uk> | |
10 | * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> | |
11 | * | |
2aacdfff | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
4471a34f VK |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
18 | ||
2aacdfff | 19 | #include <linux/export.h> |
20 | #include <linux/kernel_stat.h> | |
4d5dcc42 | 21 | #include <linux/slab.h> |
4471a34f VK |
22 | |
23 | #include "cpufreq_governor.h" | |
24 | ||
2bb8d94f RW |
25 | DEFINE_MUTEX(dbs_data_mutex); |
26 | EXPORT_SYMBOL_GPL(dbs_data_mutex); | |
27 | ||
aded387b VK |
28 | /* Common sysfs tunables */ |
29 | /** | |
30 | * store_sampling_rate - update sampling rate effective immediately if needed. | |
31 | * | |
32 | * If new rate is smaller than the old, simply updating | |
33 | * dbs.sampling_rate might not be appropriate. For example, if the | |
34 | * original sampling_rate was 1 second and the requested new sampling rate is 10 | |
35 | * ms because the user needs immediate reaction from ondemand governor, but not | |
36 | * sure if higher frequency will be required or not, then, the governor may | |
37 | * change the sampling rate too late; up to 1 second later. Thus, if we are | |
38 | * reducing the sampling rate, we need to make the new value effective | |
39 | * immediately. | |
40 | * | |
41 | * On the other hand, if new rate is larger than the old, then we may evaluate | |
42 | * the load too soon, and it might we worth updating sample_delay_ns then as | |
43 | * well. | |
44 | * | |
45 | * This must be called with dbs_data->mutex held, otherwise traversing | |
46 | * policy_dbs_list isn't safe. | |
47 | */ | |
48 | ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, | |
49 | size_t count) | |
50 | { | |
51 | struct policy_dbs_info *policy_dbs; | |
52 | unsigned int rate; | |
53 | int ret; | |
54 | ret = sscanf(buf, "%u", &rate); | |
55 | if (ret != 1) | |
56 | return -EINVAL; | |
57 | ||
58 | dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate); | |
59 | ||
60 | /* | |
61 | * We are operating under dbs_data->mutex and so the list and its | |
62 | * entries can't be freed concurrently. | |
63 | */ | |
64 | list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) { | |
65 | mutex_lock(&policy_dbs->timer_mutex); | |
66 | /* | |
67 | * On 32-bit architectures this may race with the | |
68 | * sample_delay_ns read in dbs_update_util_handler(), but that | |
69 | * really doesn't matter. If the read returns a value that's | |
70 | * too big, the sample will be skipped, but the next invocation | |
71 | * of dbs_update_util_handler() (when the update has been | |
72 | * completed) will take a sample. If the returned value is too | |
73 | * small, the sample will be taken immediately, but that isn't a | |
74 | * problem, as we want the new rate to take effect immediately | |
75 | * anyway. | |
76 | * | |
77 | * If this runs in parallel with dbs_work_handler(), we may end | |
78 | * up overwriting the sample_delay_ns value that it has just | |
79 | * written, but the difference should not be too big and it will | |
80 | * be corrected next time a sample is taken, so it shouldn't be | |
81 | * significant. | |
82 | */ | |
83 | gov_update_sample_delay(policy_dbs, dbs_data->sampling_rate); | |
84 | mutex_unlock(&policy_dbs->timer_mutex); | |
85 | } | |
86 | ||
87 | return count; | |
88 | } | |
89 | EXPORT_SYMBOL_GPL(store_sampling_rate); | |
90 | ||
c4435630 | 91 | static inline struct dbs_data *to_dbs_data(struct kobject *kobj) |
4d5dcc42 | 92 | { |
c4435630 | 93 | return container_of(kobj, struct dbs_data, kobj); |
4d5dcc42 VK |
94 | } |
95 | ||
c4435630 VK |
96 | static inline struct governor_attr *to_gov_attr(struct attribute *attr) |
97 | { | |
98 | return container_of(attr, struct governor_attr, attr); | |
99 | } | |
100 | ||
101 | static ssize_t governor_show(struct kobject *kobj, struct attribute *attr, | |
102 | char *buf) | |
103 | { | |
104 | struct dbs_data *dbs_data = to_dbs_data(kobj); | |
105 | struct governor_attr *gattr = to_gov_attr(attr); | |
106 | int ret = -EIO; | |
107 | ||
108 | if (gattr->show) | |
109 | ret = gattr->show(dbs_data, buf); | |
110 | ||
111 | return ret; | |
112 | } | |
113 | ||
114 | static ssize_t governor_store(struct kobject *kobj, struct attribute *attr, | |
115 | const char *buf, size_t count) | |
116 | { | |
117 | struct dbs_data *dbs_data = to_dbs_data(kobj); | |
118 | struct governor_attr *gattr = to_gov_attr(attr); | |
119 | int ret = -EIO; | |
120 | ||
121 | mutex_lock(&dbs_data->mutex); | |
122 | ||
123 | if (gattr->store) | |
124 | ret = gattr->store(dbs_data, buf, count); | |
125 | ||
126 | mutex_unlock(&dbs_data->mutex); | |
127 | ||
128 | return ret; | |
129 | } | |
130 | ||
131 | /* | |
132 | * Sysfs Ops for accessing governor attributes. | |
133 | * | |
134 | * All show/store invocations for governor specific sysfs attributes, will first | |
135 | * call the below show/store callbacks and the attribute specific callback will | |
136 | * be called from within it. | |
137 | */ | |
138 | static const struct sysfs_ops governor_sysfs_ops = { | |
139 | .show = governor_show, | |
140 | .store = governor_store, | |
141 | }; | |
142 | ||
d10b5eb5 | 143 | void dbs_check_cpu(struct cpufreq_policy *policy) |
4471a34f | 144 | { |
d10b5eb5 | 145 | int cpu = policy->cpu; |
ea59ee0d | 146 | struct dbs_governor *gov = dbs_governor_of(policy); |
bc505475 RW |
147 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
148 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
4471a34f | 149 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
ff4b1789 VK |
150 | unsigned int sampling_rate = dbs_data->sampling_rate; |
151 | unsigned int ignore_nice = dbs_data->ignore_nice_load; | |
4471a34f | 152 | unsigned int max_load = 0; |
4471a34f VK |
153 | unsigned int j; |
154 | ||
ea59ee0d | 155 | if (gov->governor == GOV_ONDEMAND) { |
18b46abd | 156 | struct od_cpu_dbs_info_s *od_dbs_info = |
ea59ee0d | 157 | gov->get_cpu_dbs_info_s(cpu); |
18b46abd SB |
158 | |
159 | /* | |
160 | * Sometimes, the ondemand governor uses an additional | |
161 | * multiplier to give long delays. So apply this multiplier to | |
162 | * the 'sampling_rate', so as to keep the wake-up-from-idle | |
163 | * detection logic a bit conservative. | |
164 | */ | |
18b46abd SB |
165 | sampling_rate *= od_dbs_info->rate_mult; |
166 | ||
18b46abd | 167 | } |
4471a34f | 168 | |
dfa5bb62 | 169 | /* Get Absolute Load */ |
4471a34f | 170 | for_each_cpu(j, policy->cpus) { |
875b8508 | 171 | struct cpu_dbs_info *j_cdbs; |
9366d840 SK |
172 | u64 cur_wall_time, cur_idle_time; |
173 | unsigned int idle_time, wall_time; | |
4471a34f | 174 | unsigned int load; |
9366d840 | 175 | int io_busy = 0; |
4471a34f | 176 | |
ea59ee0d | 177 | j_cdbs = gov->get_cpu_cdbs(j); |
4471a34f | 178 | |
9366d840 SK |
179 | /* |
180 | * For the purpose of ondemand, waiting for disk IO is | |
181 | * an indication that you're performance critical, and | |
182 | * not that the system is actually idle. So do not add | |
183 | * the iowait time to the cpu idle time. | |
184 | */ | |
ea59ee0d | 185 | if (gov->governor == GOV_ONDEMAND) |
9366d840 SK |
186 | io_busy = od_tuners->io_is_busy; |
187 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy); | |
4471a34f VK |
188 | |
189 | wall_time = (unsigned int) | |
190 | (cur_wall_time - j_cdbs->prev_cpu_wall); | |
191 | j_cdbs->prev_cpu_wall = cur_wall_time; | |
192 | ||
0df35026 CY |
193 | if (cur_idle_time < j_cdbs->prev_cpu_idle) |
194 | cur_idle_time = j_cdbs->prev_cpu_idle; | |
195 | ||
4471a34f VK |
196 | idle_time = (unsigned int) |
197 | (cur_idle_time - j_cdbs->prev_cpu_idle); | |
198 | j_cdbs->prev_cpu_idle = cur_idle_time; | |
199 | ||
200 | if (ignore_nice) { | |
679b8fe4 RW |
201 | u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
202 | ||
203 | idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice); | |
204 | j_cdbs->prev_cpu_nice = cur_nice; | |
4471a34f VK |
205 | } |
206 | ||
4471a34f VK |
207 | if (unlikely(!wall_time || wall_time < idle_time)) |
208 | continue; | |
209 | ||
18b46abd SB |
210 | /* |
211 | * If the CPU had gone completely idle, and a task just woke up | |
212 | * on this CPU now, it would be unfair to calculate 'load' the | |
213 | * usual way for this elapsed time-window, because it will show | |
214 | * near-zero load, irrespective of how CPU intensive that task | |
215 | * actually is. This is undesirable for latency-sensitive bursty | |
216 | * workloads. | |
217 | * | |
218 | * To avoid this, we reuse the 'load' from the previous | |
219 | * time-window and give this task a chance to start with a | |
220 | * reasonably high CPU frequency. (However, we shouldn't over-do | |
221 | * this copy, lest we get stuck at a high load (high frequency) | |
222 | * for too long, even when the current system load has actually | |
223 | * dropped down. So we perform the copy only once, upon the | |
224 | * first wake-up from idle.) | |
225 | * | |
9be4fd2c RW |
226 | * Detecting this situation is easy: the governor's utilization |
227 | * update handler would not have run during CPU-idle periods. | |
228 | * Hence, an unusually large 'wall_time' (as compared to the | |
229 | * sampling rate) indicates this scenario. | |
c8ae481b VK |
230 | * |
231 | * prev_load can be zero in two cases and we must recalculate it | |
232 | * for both cases: | |
233 | * - during long idle intervals | |
234 | * - explicitly set to zero | |
18b46abd | 235 | */ |
c8ae481b VK |
236 | if (unlikely(wall_time > (2 * sampling_rate) && |
237 | j_cdbs->prev_load)) { | |
18b46abd | 238 | load = j_cdbs->prev_load; |
c8ae481b VK |
239 | |
240 | /* | |
241 | * Perform a destructive copy, to ensure that we copy | |
242 | * the previous load only once, upon the first wake-up | |
243 | * from idle. | |
244 | */ | |
245 | j_cdbs->prev_load = 0; | |
18b46abd SB |
246 | } else { |
247 | load = 100 * (wall_time - idle_time) / wall_time; | |
248 | j_cdbs->prev_load = load; | |
18b46abd | 249 | } |
4471a34f | 250 | |
4471a34f VK |
251 | if (load > max_load) |
252 | max_load = load; | |
253 | } | |
254 | ||
ea59ee0d | 255 | gov->gov_check_cpu(cpu, max_load); |
4471a34f VK |
256 | } |
257 | EXPORT_SYMBOL_GPL(dbs_check_cpu); | |
258 | ||
e40e7b25 | 259 | void gov_set_update_util(struct policy_dbs_info *policy_dbs, |
9be4fd2c | 260 | unsigned int delay_us) |
4471a34f | 261 | { |
e40e7b25 | 262 | struct cpufreq_policy *policy = policy_dbs->policy; |
ea59ee0d | 263 | struct dbs_governor *gov = dbs_governor_of(policy); |
70f43e5e | 264 | int cpu; |
031299b3 | 265 | |
e40e7b25 RW |
266 | gov_update_sample_delay(policy_dbs, delay_us); |
267 | policy_dbs->last_sample_time = 0; | |
9be4fd2c | 268 | |
70f43e5e | 269 | for_each_cpu(cpu, policy->cpus) { |
ea59ee0d | 270 | struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu); |
9be4fd2c RW |
271 | |
272 | cpufreq_set_update_util_data(cpu, &cdbs->update_util); | |
031299b3 VK |
273 | } |
274 | } | |
9be4fd2c | 275 | EXPORT_SYMBOL_GPL(gov_set_update_util); |
031299b3 | 276 | |
9be4fd2c | 277 | static inline void gov_clear_update_util(struct cpufreq_policy *policy) |
031299b3 | 278 | { |
031299b3 | 279 | int i; |
58ddcead | 280 | |
9be4fd2c RW |
281 | for_each_cpu(i, policy->cpus) |
282 | cpufreq_set_update_util_data(i, NULL); | |
283 | ||
284 | synchronize_rcu(); | |
4471a34f VK |
285 | } |
286 | ||
581c214b | 287 | static void gov_cancel_work(struct cpufreq_policy *policy) |
70f43e5e | 288 | { |
581c214b VK |
289 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
290 | ||
e40e7b25 RW |
291 | gov_clear_update_util(policy_dbs->policy); |
292 | irq_work_sync(&policy_dbs->irq_work); | |
293 | cancel_work_sync(&policy_dbs->work); | |
686cc637 | 294 | atomic_set(&policy_dbs->work_count, 0); |
e4db2813 | 295 | policy_dbs->work_in_progress = false; |
70f43e5e | 296 | } |
43e0ee36 | 297 | |
70f43e5e | 298 | static void dbs_work_handler(struct work_struct *work) |
43e0ee36 | 299 | { |
e40e7b25 | 300 | struct policy_dbs_info *policy_dbs; |
3a91b069 | 301 | struct cpufreq_policy *policy; |
ea59ee0d | 302 | struct dbs_governor *gov; |
9be4fd2c | 303 | unsigned int delay; |
43e0ee36 | 304 | |
e40e7b25 RW |
305 | policy_dbs = container_of(work, struct policy_dbs_info, work); |
306 | policy = policy_dbs->policy; | |
ea59ee0d | 307 | gov = dbs_governor_of(policy); |
3a91b069 | 308 | |
70f43e5e | 309 | /* |
9be4fd2c RW |
310 | * Make sure cpufreq_governor_limits() isn't evaluating load or the |
311 | * ondemand governor isn't updating the sampling rate in parallel. | |
70f43e5e | 312 | */ |
e40e7b25 | 313 | mutex_lock(&policy_dbs->timer_mutex); |
ea59ee0d | 314 | delay = gov->gov_dbs_timer(policy); |
e40e7b25 RW |
315 | policy_dbs->sample_delay_ns = jiffies_to_nsecs(delay); |
316 | mutex_unlock(&policy_dbs->timer_mutex); | |
70f43e5e | 317 | |
e4db2813 RW |
318 | /* Allow the utilization update handler to queue up more work. */ |
319 | atomic_set(&policy_dbs->work_count, 0); | |
9be4fd2c | 320 | /* |
e4db2813 RW |
321 | * If the update below is reordered with respect to the sample delay |
322 | * modification, the utilization update handler may end up using a stale | |
323 | * sample delay value. | |
9be4fd2c | 324 | */ |
e4db2813 RW |
325 | smp_wmb(); |
326 | policy_dbs->work_in_progress = false; | |
9be4fd2c RW |
327 | } |
328 | ||
329 | static void dbs_irq_work(struct irq_work *irq_work) | |
330 | { | |
e40e7b25 | 331 | struct policy_dbs_info *policy_dbs; |
70f43e5e | 332 | |
e40e7b25 RW |
333 | policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work); |
334 | schedule_work(&policy_dbs->work); | |
70f43e5e VK |
335 | } |
336 | ||
9be4fd2c RW |
337 | static void dbs_update_util_handler(struct update_util_data *data, u64 time, |
338 | unsigned long util, unsigned long max) | |
339 | { | |
340 | struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util); | |
e40e7b25 | 341 | struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; |
e4db2813 | 342 | u64 delta_ns; |
70f43e5e VK |
343 | |
344 | /* | |
9be4fd2c RW |
345 | * The work may not be allowed to be queued up right now. |
346 | * Possible reasons: | |
347 | * - Work has already been queued up or is in progress. | |
9be4fd2c | 348 | * - It is too early (too little time from the previous sample). |
70f43e5e | 349 | */ |
e4db2813 RW |
350 | if (policy_dbs->work_in_progress) |
351 | return; | |
352 | ||
353 | /* | |
354 | * If the reads below are reordered before the check above, the value | |
355 | * of sample_delay_ns used in the computation may be stale. | |
356 | */ | |
357 | smp_rmb(); | |
358 | delta_ns = time - policy_dbs->last_sample_time; | |
359 | if ((s64)delta_ns < policy_dbs->sample_delay_ns) | |
360 | return; | |
361 | ||
362 | /* | |
363 | * If the policy is not shared, the irq_work may be queued up right away | |
364 | * at this point. Otherwise, we need to ensure that only one of the | |
365 | * CPUs sharing the policy will do that. | |
366 | */ | |
367 | if (policy_dbs->is_shared && | |
368 | !atomic_add_unless(&policy_dbs->work_count, 1, 1)) | |
369 | return; | |
370 | ||
371 | policy_dbs->last_sample_time = time; | |
372 | policy_dbs->work_in_progress = true; | |
373 | irq_work_queue(&policy_dbs->irq_work); | |
43e0ee36 | 374 | } |
4447266b | 375 | |
bc505475 RW |
376 | static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, |
377 | struct dbs_governor *gov) | |
44152cb8 | 378 | { |
e40e7b25 | 379 | struct policy_dbs_info *policy_dbs; |
44152cb8 VK |
380 | int j; |
381 | ||
382 | /* Allocate memory for the common information for policy->cpus */ | |
e40e7b25 RW |
383 | policy_dbs = kzalloc(sizeof(*policy_dbs), GFP_KERNEL); |
384 | if (!policy_dbs) | |
bc505475 | 385 | return NULL; |
44152cb8 | 386 | |
581c214b | 387 | policy_dbs->policy = policy; |
e40e7b25 | 388 | mutex_init(&policy_dbs->timer_mutex); |
686cc637 | 389 | atomic_set(&policy_dbs->work_count, 0); |
e40e7b25 RW |
390 | init_irq_work(&policy_dbs->irq_work, dbs_irq_work); |
391 | INIT_WORK(&policy_dbs->work, dbs_work_handler); | |
cea6a9e7 RW |
392 | |
393 | /* Set policy_dbs for all CPUs, online+offline */ | |
394 | for_each_cpu(j, policy->related_cpus) { | |
395 | struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j); | |
396 | ||
397 | j_cdbs->policy_dbs = policy_dbs; | |
398 | j_cdbs->update_util.func = dbs_update_util_handler; | |
399 | } | |
bc505475 | 400 | return policy_dbs; |
44152cb8 VK |
401 | } |
402 | ||
e40e7b25 | 403 | static void free_policy_dbs_info(struct cpufreq_policy *policy, |
7bdad34d | 404 | struct dbs_governor *gov) |
44152cb8 | 405 | { |
7bdad34d | 406 | struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(policy->cpu); |
e40e7b25 | 407 | struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; |
44152cb8 VK |
408 | int j; |
409 | ||
e40e7b25 | 410 | mutex_destroy(&policy_dbs->timer_mutex); |
5e4500d8 | 411 | |
cea6a9e7 RW |
412 | for_each_cpu(j, policy->related_cpus) { |
413 | struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j); | |
44152cb8 | 414 | |
cea6a9e7 RW |
415 | j_cdbs->policy_dbs = NULL; |
416 | j_cdbs->update_util.func = NULL; | |
417 | } | |
e40e7b25 | 418 | kfree(policy_dbs); |
44152cb8 VK |
419 | } |
420 | ||
906a6e5a | 421 | static int cpufreq_governor_init(struct cpufreq_policy *policy) |
4471a34f | 422 | { |
ea59ee0d | 423 | struct dbs_governor *gov = dbs_governor_of(policy); |
7bdad34d | 424 | struct dbs_data *dbs_data = gov->gdbs_data; |
bc505475 | 425 | struct policy_dbs_info *policy_dbs; |
714a2d9c VK |
426 | unsigned int latency; |
427 | int ret; | |
4471a34f | 428 | |
a72c4959 VK |
429 | /* State should be equivalent to EXIT */ |
430 | if (policy->governor_data) | |
431 | return -EBUSY; | |
432 | ||
bc505475 RW |
433 | policy_dbs = alloc_policy_dbs_info(policy, gov); |
434 | if (!policy_dbs) | |
435 | return -ENOMEM; | |
44152cb8 | 436 | |
bc505475 RW |
437 | if (dbs_data) { |
438 | if (WARN_ON(have_governor_per_policy())) { | |
439 | ret = -EINVAL; | |
440 | goto free_policy_dbs_info; | |
441 | } | |
bc505475 RW |
442 | policy_dbs->dbs_data = dbs_data; |
443 | policy->governor_data = policy_dbs; | |
c54df071 VK |
444 | |
445 | mutex_lock(&dbs_data->mutex); | |
446 | dbs_data->usage_count++; | |
447 | list_add(&policy_dbs->list, &dbs_data->policy_dbs_list); | |
448 | mutex_unlock(&dbs_data->mutex); | |
449 | ||
714a2d9c VK |
450 | return 0; |
451 | } | |
4d5dcc42 | 452 | |
714a2d9c | 453 | dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL); |
bc505475 RW |
454 | if (!dbs_data) { |
455 | ret = -ENOMEM; | |
456 | goto free_policy_dbs_info; | |
457 | } | |
44152cb8 | 458 | |
c54df071 | 459 | INIT_LIST_HEAD(&dbs_data->policy_dbs_list); |
c4435630 | 460 | mutex_init(&dbs_data->mutex); |
4d5dcc42 | 461 | |
7bdad34d | 462 | ret = gov->init(dbs_data, !policy->governor->initialized); |
714a2d9c | 463 | if (ret) |
e40e7b25 | 464 | goto free_policy_dbs_info; |
4d5dcc42 | 465 | |
714a2d9c VK |
466 | /* policy latency is in ns. Convert it to us first */ |
467 | latency = policy->cpuinfo.transition_latency / 1000; | |
468 | if (latency == 0) | |
469 | latency = 1; | |
4d5dcc42 | 470 | |
714a2d9c VK |
471 | /* Bring kernel and HW constraints together */ |
472 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, | |
473 | MIN_LATENCY_MULTIPLIER * latency); | |
ff4b1789 VK |
474 | dbs_data->sampling_rate = max(dbs_data->min_sampling_rate, |
475 | LATENCY_MULTIPLIER * latency); | |
2361be23 | 476 | |
8eec1020 | 477 | if (!have_governor_per_policy()) |
7bdad34d | 478 | gov->gdbs_data = dbs_data; |
4d5dcc42 | 479 | |
bc505475 | 480 | policy->governor_data = policy_dbs; |
e4b133cc | 481 | |
c54df071 VK |
482 | policy_dbs->dbs_data = dbs_data; |
483 | dbs_data->usage_count = 1; | |
484 | list_add(&policy_dbs->list, &dbs_data->policy_dbs_list); | |
485 | ||
c4435630 VK |
486 | gov->kobj_type.sysfs_ops = &governor_sysfs_ops; |
487 | ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type, | |
488 | get_governor_parent_kobj(policy), | |
489 | "%s", gov->gov.name); | |
fafd5e8a RW |
490 | if (!ret) |
491 | return 0; | |
4d5dcc42 | 492 | |
fafd5e8a | 493 | /* Failure, so roll back. */ |
c4435630 | 494 | pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret); |
4d5dcc42 | 495 | |
e4b133cc VK |
496 | policy->governor_data = NULL; |
497 | ||
8eec1020 | 498 | if (!have_governor_per_policy()) |
7bdad34d RW |
499 | gov->gdbs_data = NULL; |
500 | gov->exit(dbs_data, !policy->governor->initialized); | |
bc505475 RW |
501 | kfree(dbs_data); |
502 | ||
e40e7b25 RW |
503 | free_policy_dbs_info: |
504 | free_policy_dbs_info(policy, gov); | |
714a2d9c VK |
505 | return ret; |
506 | } | |
4d5dcc42 | 507 | |
5da3dd1e | 508 | static int cpufreq_governor_exit(struct cpufreq_policy *policy) |
714a2d9c | 509 | { |
ea59ee0d | 510 | struct dbs_governor *gov = dbs_governor_of(policy); |
bc505475 RW |
511 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
512 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
c54df071 | 513 | int count; |
a72c4959 | 514 | |
c54df071 VK |
515 | mutex_lock(&dbs_data->mutex); |
516 | list_del(&policy_dbs->list); | |
517 | count = --dbs_data->usage_count; | |
518 | mutex_unlock(&dbs_data->mutex); | |
519 | ||
520 | if (!count) { | |
c4435630 | 521 | kobject_put(&dbs_data->kobj); |
2361be23 | 522 | |
e4b133cc VK |
523 | policy->governor_data = NULL; |
524 | ||
8eec1020 | 525 | if (!have_governor_per_policy()) |
7bdad34d | 526 | gov->gdbs_data = NULL; |
4471a34f | 527 | |
7bdad34d | 528 | gov->exit(dbs_data, policy->governor->initialized == 1); |
c4435630 | 529 | mutex_destroy(&dbs_data->mutex); |
714a2d9c | 530 | kfree(dbs_data); |
e4b133cc VK |
531 | } else { |
532 | policy->governor_data = NULL; | |
4d5dcc42 | 533 | } |
44152cb8 | 534 | |
e40e7b25 | 535 | free_policy_dbs_info(policy, gov); |
a72c4959 | 536 | return 0; |
714a2d9c | 537 | } |
4d5dcc42 | 538 | |
5da3dd1e | 539 | static int cpufreq_governor_start(struct cpufreq_policy *policy) |
714a2d9c | 540 | { |
ea59ee0d | 541 | struct dbs_governor *gov = dbs_governor_of(policy); |
bc505475 RW |
542 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
543 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
714a2d9c | 544 | unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu; |
714a2d9c VK |
545 | int io_busy = 0; |
546 | ||
547 | if (!policy->cur) | |
548 | return -EINVAL; | |
549 | ||
e4db2813 RW |
550 | policy_dbs->is_shared = policy_is_shared(policy); |
551 | ||
ff4b1789 VK |
552 | sampling_rate = dbs_data->sampling_rate; |
553 | ignore_nice = dbs_data->ignore_nice_load; | |
4d5dcc42 | 554 | |
ff4b1789 | 555 | if (gov->governor == GOV_ONDEMAND) { |
714a2d9c VK |
556 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
557 | ||
9366d840 | 558 | io_busy = od_tuners->io_is_busy; |
4471a34f VK |
559 | } |
560 | ||
714a2d9c | 561 | for_each_cpu(j, policy->cpus) { |
7bdad34d | 562 | struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j); |
714a2d9c | 563 | unsigned int prev_load; |
4471a34f | 564 | |
714a2d9c VK |
565 | j_cdbs->prev_cpu_idle = |
566 | get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy); | |
4471a34f | 567 | |
714a2d9c VK |
568 | prev_load = (unsigned int)(j_cdbs->prev_cpu_wall - |
569 | j_cdbs->prev_cpu_idle); | |
570 | j_cdbs->prev_load = 100 * prev_load / | |
571 | (unsigned int)j_cdbs->prev_cpu_wall; | |
18b46abd | 572 | |
714a2d9c VK |
573 | if (ignore_nice) |
574 | j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | |
714a2d9c | 575 | } |
2abfa876 | 576 | |
7bdad34d | 577 | if (gov->governor == GOV_CONSERVATIVE) { |
714a2d9c | 578 | struct cs_cpu_dbs_info_s *cs_dbs_info = |
7bdad34d | 579 | gov->get_cpu_dbs_info_s(cpu); |
4471a34f | 580 | |
714a2d9c | 581 | cs_dbs_info->down_skip = 0; |
714a2d9c VK |
582 | cs_dbs_info->requested_freq = policy->cur; |
583 | } else { | |
7bdad34d RW |
584 | struct od_ops *od_ops = gov->gov_ops; |
585 | struct od_cpu_dbs_info_s *od_dbs_info = gov->get_cpu_dbs_info_s(cpu); | |
4471a34f | 586 | |
714a2d9c VK |
587 | od_dbs_info->rate_mult = 1; |
588 | od_dbs_info->sample_type = OD_NORMAL_SAMPLE; | |
589 | od_ops->powersave_bias_init_cpu(cpu); | |
590 | } | |
4471a34f | 591 | |
e40e7b25 | 592 | gov_set_update_util(policy_dbs, sampling_rate); |
714a2d9c VK |
593 | return 0; |
594 | } | |
595 | ||
5da3dd1e | 596 | static int cpufreq_governor_stop(struct cpufreq_policy *policy) |
714a2d9c | 597 | { |
581c214b | 598 | gov_cancel_work(policy); |
3a91b069 | 599 | |
a72c4959 | 600 | return 0; |
714a2d9c | 601 | } |
4471a34f | 602 | |
5da3dd1e | 603 | static int cpufreq_governor_limits(struct cpufreq_policy *policy) |
714a2d9c | 604 | { |
bc505475 | 605 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
8eeed095 | 606 | |
e9751894 RW |
607 | mutex_lock(&policy_dbs->timer_mutex); |
608 | if (policy->max < policy->cur) | |
609 | __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); | |
610 | else if (policy->min > policy->cur) | |
611 | __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); | |
d10b5eb5 | 612 | dbs_check_cpu(policy); |
e9751894 | 613 | mutex_unlock(&policy_dbs->timer_mutex); |
a72c4959 VK |
614 | |
615 | return 0; | |
714a2d9c | 616 | } |
4471a34f | 617 | |
906a6e5a | 618 | int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) |
714a2d9c | 619 | { |
5da3dd1e | 620 | int ret = -EINVAL; |
714a2d9c | 621 | |
732b6d61 | 622 | /* Lock governor to block concurrent initialization of governor */ |
2bb8d94f | 623 | mutex_lock(&dbs_data_mutex); |
732b6d61 | 624 | |
5da3dd1e | 625 | if (event == CPUFREQ_GOV_POLICY_INIT) { |
906a6e5a | 626 | ret = cpufreq_governor_init(policy); |
5da3dd1e RW |
627 | } else if (policy->governor_data) { |
628 | switch (event) { | |
629 | case CPUFREQ_GOV_POLICY_EXIT: | |
630 | ret = cpufreq_governor_exit(policy); | |
631 | break; | |
632 | case CPUFREQ_GOV_START: | |
633 | ret = cpufreq_governor_start(policy); | |
634 | break; | |
635 | case CPUFREQ_GOV_STOP: | |
636 | ret = cpufreq_governor_stop(policy); | |
637 | break; | |
638 | case CPUFREQ_GOV_LIMITS: | |
639 | ret = cpufreq_governor_limits(policy); | |
640 | break; | |
641 | } | |
4471a34f | 642 | } |
714a2d9c | 643 | |
2bb8d94f | 644 | mutex_unlock(&dbs_data_mutex); |
714a2d9c | 645 | return ret; |
4471a34f VK |
646 | } |
647 | EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); |