Commit | Line | Data |
---|---|---|
e1f60b29 NM |
1 | /* |
2 | * Generic OPP Interface | |
3 | * | |
4 | * Copyright (C) 2009-2010 Texas Instruments Incorporated. | |
5 | * Nishanth Menon | |
6 | * Romit Dasgupta | |
7 | * Kevin Hilman | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | ||
8d4d4e98 | 14 | #include <linux/cpu.h> |
e1f60b29 NM |
15 | #include <linux/kernel.h> |
16 | #include <linux/errno.h> | |
17 | #include <linux/err.h> | |
e1f60b29 | 18 | #include <linux/slab.h> |
51990e82 | 19 | #include <linux/device.h> |
e1f60b29 NM |
20 | #include <linux/list.h> |
21 | #include <linux/rculist.h> | |
22 | #include <linux/rcupdate.h> | |
e4db1c74 | 23 | #include <linux/pm_opp.h> |
b496dfbc | 24 | #include <linux/of.h> |
80126ce7 | 25 | #include <linux/export.h> |
e1f60b29 NM |
26 | |
27 | /* | |
28 | * Internal data structure organization with the OPP layer library is as | |
29 | * follows: | |
30 | * dev_opp_list (root) | |
31 | * |- device 1 (represents voltage domain 1) | |
32 | * | |- opp 1 (availability, freq, voltage) | |
33 | * | |- opp 2 .. | |
34 | * ... ... | |
35 | * | `- opp n .. | |
36 | * |- device 2 (represents the next voltage domain) | |
37 | * ... | |
38 | * `- device m (represents mth voltage domain) | |
39 | * device 1, 2.. are represented by dev_opp structure while each opp | |
40 | * is represented by the opp structure. | |
41 | */ | |
42 | ||
43 | /** | |
47d43ba7 | 44 | * struct dev_pm_opp - Generic OPP description structure |
e1f60b29 NM |
45 | * @node: opp list node. The nodes are maintained throughout the lifetime |
46 | * of boot. It is expected only an optimal set of OPPs are | |
47 | * added to the library by the SoC framework. | |
48 | * RCU usage: opp list is traversed with RCU locks. node | |
49 | * modification is possible realtime, hence the modifications | |
50 | * are protected by the dev_opp_list_lock for integrity. | |
51 | * IMPORTANT: the opp nodes should be maintained in increasing | |
52 | * order. | |
38393409 | 53 | * @dynamic: not-created from static DT entries. |
e1f60b29 | 54 | * @available: true/false - marks if this OPP as available or not |
27465902 | 55 | * @turbo: true if turbo (boost) OPP |
e1f60b29 | 56 | * @rate: Frequency in hertz |
27465902 VK |
57 | * @u_volt: Target voltage in microvolts corresponding to this OPP |
58 | * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP | |
59 | * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP | |
60 | * @u_amp: Maximum current drawn by the device in microamperes | |
3ca9bb33 VK |
61 | * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's |
62 | * frequency from any other OPP's frequency. | |
e1f60b29 | 63 | * @dev_opp: points back to the device_opp struct this opp belongs to |
cd1a068a | 64 | * @rcu_head: RCU callback head used for deferred freeing |
27465902 | 65 | * @np: OPP's device node. |
e1f60b29 NM |
66 | * |
67 | * This structure stores the OPP information for a given device. | |
68 | */ | |
47d43ba7 | 69 | struct dev_pm_opp { |
e1f60b29 NM |
70 | struct list_head node; |
71 | ||
72 | bool available; | |
38393409 | 73 | bool dynamic; |
27465902 | 74 | bool turbo; |
e1f60b29 | 75 | unsigned long rate; |
27465902 | 76 | |
e1f60b29 | 77 | unsigned long u_volt; |
27465902 VK |
78 | unsigned long u_volt_min; |
79 | unsigned long u_volt_max; | |
80 | unsigned long u_amp; | |
3ca9bb33 | 81 | unsigned long clock_latency_ns; |
e1f60b29 NM |
82 | |
83 | struct device_opp *dev_opp; | |
cd1a068a | 84 | struct rcu_head rcu_head; |
27465902 VK |
85 | |
86 | struct device_node *np; | |
e1f60b29 NM |
87 | }; |
88 | ||
06441658 VK |
89 | /** |
90 | * struct device_list_opp - devices managed by 'struct device_opp' | |
91 | * @node: list node | |
92 | * @dev: device to which the struct object belongs | |
93 | * @rcu_head: RCU callback head used for deferred freeing | |
94 | * | |
95 | * This is an internal data structure maintaining the list of devices that are | |
96 | * managed by 'struct device_opp'. | |
97 | */ | |
98 | struct device_list_opp { | |
99 | struct list_head node; | |
100 | const struct device *dev; | |
101 | struct rcu_head rcu_head; | |
e1f60b29 NM |
102 | }; |
103 | ||
104 | /** | |
105 | * struct device_opp - Device opp structure | |
106 | * @node: list node - contains the devices with OPPs that | |
107 | * have been registered. Nodes once added are not modified in this | |
108 | * list. | |
109 | * RCU usage: nodes are not modified in the list of device_opp, | |
110 | * however addition is possible and is secured by dev_opp_list_lock | |
cd1a068a | 111 | * @srcu_head: notifier head to notify the OPP availability changes. |
129eec55 | 112 | * @rcu_head: RCU callback head used for deferred freeing |
06441658 | 113 | * @dev_list: list of devices that share these OPPs |
e1f60b29 | 114 | * @opp_list: list of opps |
06441658 VK |
115 | * @np: struct device_node pointer for opp's DT node. |
116 | * @shared_opp: OPP is shared between multiple devices. | |
e1f60b29 NM |
117 | * |
118 | * This is an internal data structure maintaining the link to opps attached to | |
119 | * a device. This structure is not meant to be shared to users as it is | |
1c6a662f VK |
120 | * meant for book keeping and private to OPP library. |
121 | * | |
122 | * Because the opp structures can be used from both rcu and srcu readers, we | |
123 | * need to wait for the grace period of both of them before freeing any | |
124 | * resources. And so we have used kfree_rcu() from within call_srcu() handlers. | |
e1f60b29 NM |
125 | */ |
126 | struct device_opp { | |
127 | struct list_head node; | |
128 | ||
cd1a068a | 129 | struct srcu_notifier_head srcu_head; |
129eec55 | 130 | struct rcu_head rcu_head; |
06441658 | 131 | struct list_head dev_list; |
e1f60b29 | 132 | struct list_head opp_list; |
3ca9bb33 | 133 | |
06441658 | 134 | struct device_node *np; |
3ca9bb33 | 135 | unsigned long clock_latency_ns_max; |
06441658 | 136 | bool shared_opp; |
ad656a6a | 137 | struct dev_pm_opp *suspend_opp; |
e1f60b29 NM |
138 | }; |
139 | ||
140 | /* | |
141 | * The root of the list of all devices. All device_opp structures branch off | |
142 | * from here, with each device_opp containing the list of opp it supports in | |
143 | * various states of availability. | |
144 | */ | |
145 | static LIST_HEAD(dev_opp_list); | |
146 | /* Lock to allow exclusive modification to the device and opp lists */ | |
147 | static DEFINE_MUTEX(dev_opp_list_lock); | |
148 | ||
b02ded24 DT |
149 | #define opp_rcu_lockdep_assert() \ |
150 | do { \ | |
f78f5b90 PM |
151 | RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ |
152 | !lockdep_is_held(&dev_opp_list_lock), \ | |
b02ded24 DT |
153 | "Missing rcu_read_lock() or " \ |
154 | "dev_opp_list_lock protection"); \ | |
155 | } while (0) | |
156 | ||
06441658 VK |
157 | static struct device_list_opp *_find_list_dev(const struct device *dev, |
158 | struct device_opp *dev_opp) | |
159 | { | |
160 | struct device_list_opp *list_dev; | |
161 | ||
162 | list_for_each_entry(list_dev, &dev_opp->dev_list, node) | |
163 | if (list_dev->dev == dev) | |
164 | return list_dev; | |
165 | ||
166 | return NULL; | |
167 | } | |
168 | ||
169 | static struct device_opp *_managed_opp(const struct device_node *np) | |
170 | { | |
171 | struct device_opp *dev_opp; | |
172 | ||
173 | list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) { | |
174 | if (dev_opp->np == np) { | |
175 | /* | |
176 | * Multiple devices can point to the same OPP table and | |
177 | * so will have same node-pointer, np. | |
178 | * | |
179 | * But the OPPs will be considered as shared only if the | |
180 | * OPP table contains a "opp-shared" property. | |
181 | */ | |
182 | return dev_opp->shared_opp ? dev_opp : NULL; | |
183 | } | |
184 | } | |
185 | ||
186 | return NULL; | |
187 | } | |
188 | ||
e1f60b29 | 189 | /** |
327854c8 | 190 | * _find_device_opp() - find device_opp struct using device pointer |
e1f60b29 NM |
191 | * @dev: device pointer used to lookup device OPPs |
192 | * | |
193 | * Search list of device OPPs for one containing matching device. Does a RCU | |
194 | * reader operation to grab the pointer needed. | |
195 | * | |
984f16c8 | 196 | * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or |
e1f60b29 NM |
197 | * -EINVAL based on type of error. |
198 | * | |
199 | * Locking: This function must be called under rcu_read_lock(). device_opp | |
200 | * is a RCU protected pointer. This means that device_opp is valid as long | |
201 | * as we are under RCU lock. | |
202 | */ | |
327854c8 | 203 | static struct device_opp *_find_device_opp(struct device *dev) |
e1f60b29 | 204 | { |
06441658 | 205 | struct device_opp *dev_opp; |
e1f60b29 | 206 | |
50a3cb04 | 207 | if (IS_ERR_OR_NULL(dev)) { |
e1f60b29 NM |
208 | pr_err("%s: Invalid parameters\n", __func__); |
209 | return ERR_PTR(-EINVAL); | |
210 | } | |
211 | ||
06441658 VK |
212 | list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) |
213 | if (_find_list_dev(dev, dev_opp)) | |
214 | return dev_opp; | |
e1f60b29 | 215 | |
06441658 | 216 | return ERR_PTR(-ENODEV); |
e1f60b29 NM |
217 | } |
218 | ||
219 | /** | |
5d4879cd | 220 | * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp |
e1f60b29 NM |
221 | * @opp: opp for which voltage has to be returned for |
222 | * | |
984f16c8 | 223 | * Return: voltage in micro volt corresponding to the opp, else |
e1f60b29 NM |
224 | * return 0 |
225 | * | |
226 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | |
227 | * protected pointer. This means that opp which could have been fetched by | |
228 | * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are | |
229 | * under RCU lock. The pointer returned by the opp_find_freq family must be | |
230 | * used in the same section as the usage of this function with the pointer | |
231 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | |
232 | * pointer. | |
233 | */ | |
47d43ba7 | 234 | unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) |
e1f60b29 | 235 | { |
47d43ba7 | 236 | struct dev_pm_opp *tmp_opp; |
e1f60b29 NM |
237 | unsigned long v = 0; |
238 | ||
04bf1c7f KK |
239 | opp_rcu_lockdep_assert(); |
240 | ||
e1f60b29 | 241 | tmp_opp = rcu_dereference(opp); |
50a3cb04 | 242 | if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) |
e1f60b29 NM |
243 | pr_err("%s: Invalid parameters\n", __func__); |
244 | else | |
245 | v = tmp_opp->u_volt; | |
246 | ||
247 | return v; | |
248 | } | |
5d4879cd | 249 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); |
e1f60b29 NM |
250 | |
251 | /** | |
5d4879cd | 252 | * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp |
e1f60b29 NM |
253 | * @opp: opp for which frequency has to be returned for |
254 | * | |
984f16c8 | 255 | * Return: frequency in hertz corresponding to the opp, else |
e1f60b29 NM |
256 | * return 0 |
257 | * | |
258 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | |
259 | * protected pointer. This means that opp which could have been fetched by | |
260 | * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are | |
261 | * under RCU lock. The pointer returned by the opp_find_freq family must be | |
262 | * used in the same section as the usage of this function with the pointer | |
263 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | |
264 | * pointer. | |
265 | */ | |
47d43ba7 | 266 | unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) |
e1f60b29 | 267 | { |
47d43ba7 | 268 | struct dev_pm_opp *tmp_opp; |
e1f60b29 NM |
269 | unsigned long f = 0; |
270 | ||
04bf1c7f KK |
271 | opp_rcu_lockdep_assert(); |
272 | ||
e1f60b29 | 273 | tmp_opp = rcu_dereference(opp); |
50a3cb04 | 274 | if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) |
e1f60b29 NM |
275 | pr_err("%s: Invalid parameters\n", __func__); |
276 | else | |
277 | f = tmp_opp->rate; | |
278 | ||
279 | return f; | |
280 | } | |
5d4879cd | 281 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); |
e1f60b29 | 282 | |
19445b25 BZ |
283 | /** |
284 | * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not | |
285 | * @opp: opp for which turbo mode is being verified | |
286 | * | |
287 | * Turbo OPPs are not for normal use, and can be enabled (under certain | |
288 | * conditions) for short duration of times to finish high throughput work | |
289 | * quickly. Running on them for longer times may overheat the chip. | |
290 | * | |
291 | * Return: true if opp is turbo opp, else false. | |
292 | * | |
293 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | |
294 | * protected pointer. This means that opp which could have been fetched by | |
295 | * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are | |
296 | * under RCU lock. The pointer returned by the opp_find_freq family must be | |
297 | * used in the same section as the usage of this function with the pointer | |
298 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | |
299 | * pointer. | |
300 | */ | |
301 | bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) | |
302 | { | |
303 | struct dev_pm_opp *tmp_opp; | |
304 | ||
305 | opp_rcu_lockdep_assert(); | |
306 | ||
307 | tmp_opp = rcu_dereference(opp); | |
308 | if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) { | |
309 | pr_err("%s: Invalid parameters\n", __func__); | |
310 | return false; | |
311 | } | |
312 | ||
313 | return tmp_opp->turbo; | |
314 | } | |
315 | EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); | |
316 | ||
3ca9bb33 VK |
317 | /** |
318 | * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds | |
319 | * @dev: device for which we do this operation | |
320 | * | |
321 | * Return: This function returns the max clock latency in nanoseconds. | |
322 | * | |
323 | * Locking: This function takes rcu_read_lock(). | |
324 | */ | |
325 | unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) | |
326 | { | |
327 | struct device_opp *dev_opp; | |
328 | unsigned long clock_latency_ns; | |
329 | ||
330 | rcu_read_lock(); | |
331 | ||
332 | dev_opp = _find_device_opp(dev); | |
333 | if (IS_ERR(dev_opp)) | |
334 | clock_latency_ns = 0; | |
335 | else | |
336 | clock_latency_ns = dev_opp->clock_latency_ns_max; | |
337 | ||
338 | rcu_read_unlock(); | |
339 | return clock_latency_ns; | |
340 | } | |
341 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); | |
342 | ||
4eafbd15 BZ |
343 | /** |
344 | * dev_pm_opp_get_suspend_opp() - Get suspend opp | |
345 | * @dev: device for which we do this operation | |
346 | * | |
347 | * Return: This function returns pointer to the suspend opp if it is | |
1b2b90cb | 348 | * defined and available, otherwise it returns NULL. |
4eafbd15 BZ |
349 | * |
350 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | |
351 | * protected pointer. The reason for the same is that the opp pointer which is | |
352 | * returned will remain valid for use with opp_get_{voltage, freq} only while | |
353 | * under the locked area. The pointer returned must be used prior to unlocking | |
354 | * with rcu_read_unlock() to maintain the integrity of the pointer. | |
355 | */ | |
356 | struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) | |
357 | { | |
358 | struct device_opp *dev_opp; | |
4eafbd15 BZ |
359 | |
360 | opp_rcu_lockdep_assert(); | |
361 | ||
362 | dev_opp = _find_device_opp(dev); | |
1b2b90cb VK |
363 | if (IS_ERR(dev_opp) || !dev_opp->suspend_opp || |
364 | !dev_opp->suspend_opp->available) | |
365 | return NULL; | |
4eafbd15 | 366 | |
1b2b90cb | 367 | return dev_opp->suspend_opp; |
4eafbd15 BZ |
368 | } |
369 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp); | |
370 | ||
e1f60b29 | 371 | /** |
5d4879cd | 372 | * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list |
e1f60b29 NM |
373 | * @dev: device for which we do this operation |
374 | * | |
984f16c8 | 375 | * Return: This function returns the number of available opps if there are any, |
e1f60b29 NM |
376 | * else returns 0 if none or the corresponding error value. |
377 | * | |
b4718c02 | 378 | * Locking: This function takes rcu_read_lock(). |
e1f60b29 | 379 | */ |
5d4879cd | 380 | int dev_pm_opp_get_opp_count(struct device *dev) |
e1f60b29 NM |
381 | { |
382 | struct device_opp *dev_opp; | |
47d43ba7 | 383 | struct dev_pm_opp *temp_opp; |
e1f60b29 NM |
384 | int count = 0; |
385 | ||
b4718c02 | 386 | rcu_read_lock(); |
b02ded24 | 387 | |
327854c8 | 388 | dev_opp = _find_device_opp(dev); |
e1f60b29 | 389 | if (IS_ERR(dev_opp)) { |
b4718c02 DT |
390 | count = PTR_ERR(dev_opp); |
391 | dev_err(dev, "%s: device OPP not found (%d)\n", | |
392 | __func__, count); | |
393 | goto out_unlock; | |
e1f60b29 NM |
394 | } |
395 | ||
396 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | |
397 | if (temp_opp->available) | |
398 | count++; | |
399 | } | |
400 | ||
b4718c02 DT |
401 | out_unlock: |
402 | rcu_read_unlock(); | |
e1f60b29 NM |
403 | return count; |
404 | } | |
5d4879cd | 405 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); |
e1f60b29 NM |
406 | |
407 | /** | |
5d4879cd | 408 | * dev_pm_opp_find_freq_exact() - search for an exact frequency |
e1f60b29 NM |
409 | * @dev: device for which we do this operation |
410 | * @freq: frequency to search for | |
7ae49618 | 411 | * @available: true/false - match for available opp |
e1f60b29 | 412 | * |
984f16c8 NM |
413 | * Return: Searches for exact match in the opp list and returns pointer to the |
414 | * matching opp if found, else returns ERR_PTR in case of error and should | |
415 | * be handled using IS_ERR. Error return values can be: | |
0779726c NM |
416 | * EINVAL: for bad pointer |
417 | * ERANGE: no match found for search | |
418 | * ENODEV: if device not found in list of registered devices | |
e1f60b29 NM |
419 | * |
420 | * Note: available is a modifier for the search. if available=true, then the | |
421 | * match is for exact matching frequency and is available in the stored OPP | |
422 | * table. if false, the match is for exact frequency which is not available. | |
423 | * | |
424 | * This provides a mechanism to enable an opp which is not available currently | |
425 | * or the opposite as well. | |
426 | * | |
427 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | |
428 | * protected pointer. The reason for the same is that the opp pointer which is | |
429 | * returned will remain valid for use with opp_get_{voltage, freq} only while | |
430 | * under the locked area. The pointer returned must be used prior to unlocking | |
431 | * with rcu_read_unlock() to maintain the integrity of the pointer. | |
432 | */ | |
47d43ba7 NM |
433 | struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, |
434 | unsigned long freq, | |
435 | bool available) | |
e1f60b29 NM |
436 | { |
437 | struct device_opp *dev_opp; | |
47d43ba7 | 438 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
e1f60b29 | 439 | |
b02ded24 DT |
440 | opp_rcu_lockdep_assert(); |
441 | ||
327854c8 | 442 | dev_opp = _find_device_opp(dev); |
e1f60b29 NM |
443 | if (IS_ERR(dev_opp)) { |
444 | int r = PTR_ERR(dev_opp); | |
445 | dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); | |
446 | return ERR_PTR(r); | |
447 | } | |
448 | ||
449 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | |
450 | if (temp_opp->available == available && | |
451 | temp_opp->rate == freq) { | |
452 | opp = temp_opp; | |
453 | break; | |
454 | } | |
455 | } | |
456 | ||
457 | return opp; | |
458 | } | |
5d4879cd | 459 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); |
e1f60b29 NM |
460 | |
461 | /** | |
5d4879cd | 462 | * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq |
e1f60b29 NM |
463 | * @dev: device for which we do this operation |
464 | * @freq: Start frequency | |
465 | * | |
466 | * Search for the matching ceil *available* OPP from a starting freq | |
467 | * for a device. | |
468 | * | |
984f16c8 | 469 | * Return: matching *opp and refreshes *freq accordingly, else returns |
0779726c NM |
470 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
471 | * values can be: | |
472 | * EINVAL: for bad pointer | |
473 | * ERANGE: no match found for search | |
474 | * ENODEV: if device not found in list of registered devices | |
e1f60b29 NM |
475 | * |
476 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | |
477 | * protected pointer. The reason for the same is that the opp pointer which is | |
478 | * returned will remain valid for use with opp_get_{voltage, freq} only while | |
479 | * under the locked area. The pointer returned must be used prior to unlocking | |
480 | * with rcu_read_unlock() to maintain the integrity of the pointer. | |
481 | */ | |
47d43ba7 NM |
482 | struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, |
483 | unsigned long *freq) | |
e1f60b29 NM |
484 | { |
485 | struct device_opp *dev_opp; | |
47d43ba7 | 486 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
e1f60b29 | 487 | |
b02ded24 DT |
488 | opp_rcu_lockdep_assert(); |
489 | ||
e1f60b29 NM |
490 | if (!dev || !freq) { |
491 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | |
492 | return ERR_PTR(-EINVAL); | |
493 | } | |
494 | ||
327854c8 | 495 | dev_opp = _find_device_opp(dev); |
e1f60b29 | 496 | if (IS_ERR(dev_opp)) |
0779726c | 497 | return ERR_CAST(dev_opp); |
e1f60b29 NM |
498 | |
499 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | |
500 | if (temp_opp->available && temp_opp->rate >= *freq) { | |
501 | opp = temp_opp; | |
502 | *freq = opp->rate; | |
503 | break; | |
504 | } | |
505 | } | |
506 | ||
507 | return opp; | |
508 | } | |
5d4879cd | 509 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); |
e1f60b29 NM |
510 | |
511 | /** | |
5d4879cd | 512 | * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq |
e1f60b29 NM |
513 | * @dev: device for which we do this operation |
514 | * @freq: Start frequency | |
515 | * | |
516 | * Search for the matching floor *available* OPP from a starting freq | |
517 | * for a device. | |
518 | * | |
984f16c8 | 519 | * Return: matching *opp and refreshes *freq accordingly, else returns |
0779726c NM |
520 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
521 | * values can be: | |
522 | * EINVAL: for bad pointer | |
523 | * ERANGE: no match found for search | |
524 | * ENODEV: if device not found in list of registered devices | |
e1f60b29 NM |
525 | * |
526 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | |
527 | * protected pointer. The reason for the same is that the opp pointer which is | |
528 | * returned will remain valid for use with opp_get_{voltage, freq} only while | |
529 | * under the locked area. The pointer returned must be used prior to unlocking | |
530 | * with rcu_read_unlock() to maintain the integrity of the pointer. | |
531 | */ | |
47d43ba7 NM |
532 | struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, |
533 | unsigned long *freq) | |
e1f60b29 NM |
534 | { |
535 | struct device_opp *dev_opp; | |
47d43ba7 | 536 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
e1f60b29 | 537 | |
b02ded24 DT |
538 | opp_rcu_lockdep_assert(); |
539 | ||
e1f60b29 NM |
540 | if (!dev || !freq) { |
541 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | |
542 | return ERR_PTR(-EINVAL); | |
543 | } | |
544 | ||
327854c8 | 545 | dev_opp = _find_device_opp(dev); |
e1f60b29 | 546 | if (IS_ERR(dev_opp)) |
0779726c | 547 | return ERR_CAST(dev_opp); |
e1f60b29 NM |
548 | |
549 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | |
550 | if (temp_opp->available) { | |
551 | /* go to the next node, before choosing prev */ | |
552 | if (temp_opp->rate > *freq) | |
553 | break; | |
554 | else | |
555 | opp = temp_opp; | |
556 | } | |
557 | } | |
558 | if (!IS_ERR(opp)) | |
559 | *freq = opp->rate; | |
560 | ||
561 | return opp; | |
562 | } | |
5d4879cd | 563 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); |
e1f60b29 | 564 | |
06441658 VK |
565 | /* List-dev Helpers */ |
566 | static void _kfree_list_dev_rcu(struct rcu_head *head) | |
567 | { | |
568 | struct device_list_opp *list_dev; | |
569 | ||
570 | list_dev = container_of(head, struct device_list_opp, rcu_head); | |
571 | kfree_rcu(list_dev, rcu_head); | |
572 | } | |
573 | ||
574 | static void _remove_list_dev(struct device_list_opp *list_dev, | |
575 | struct device_opp *dev_opp) | |
576 | { | |
577 | list_del(&list_dev->node); | |
578 | call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head, | |
579 | _kfree_list_dev_rcu); | |
580 | } | |
581 | ||
582 | static struct device_list_opp *_add_list_dev(const struct device *dev, | |
583 | struct device_opp *dev_opp) | |
584 | { | |
585 | struct device_list_opp *list_dev; | |
586 | ||
587 | list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL); | |
588 | if (!list_dev) | |
589 | return NULL; | |
590 | ||
591 | /* Initialize list-dev */ | |
592 | list_dev->dev = dev; | |
593 | list_add_rcu(&list_dev->node, &dev_opp->dev_list); | |
594 | ||
595 | return list_dev; | |
596 | } | |
597 | ||
984f16c8 | 598 | /** |
aa5f2f85 | 599 | * _add_device_opp() - Find device OPP table or allocate a new one |
984f16c8 NM |
600 | * @dev: device for which we do this operation |
601 | * | |
aa5f2f85 VK |
602 | * It tries to find an existing table first, if it couldn't find one, it |
603 | * allocates a new OPP table and returns that. | |
984f16c8 NM |
604 | * |
605 | * Return: valid device_opp pointer if success, else NULL. | |
606 | */ | |
327854c8 | 607 | static struct device_opp *_add_device_opp(struct device *dev) |
07cce74a VK |
608 | { |
609 | struct device_opp *dev_opp; | |
06441658 | 610 | struct device_list_opp *list_dev; |
07cce74a | 611 | |
aa5f2f85 VK |
612 | /* Check for existing list for 'dev' first */ |
613 | dev_opp = _find_device_opp(dev); | |
614 | if (!IS_ERR(dev_opp)) | |
615 | return dev_opp; | |
07cce74a VK |
616 | |
617 | /* | |
618 | * Allocate a new device OPP table. In the infrequent case where a new | |
619 | * device is needed to be added, we pay this penalty. | |
620 | */ | |
621 | dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL); | |
622 | if (!dev_opp) | |
623 | return NULL; | |
624 | ||
06441658 VK |
625 | INIT_LIST_HEAD(&dev_opp->dev_list); |
626 | ||
627 | list_dev = _add_list_dev(dev, dev_opp); | |
628 | if (!list_dev) { | |
629 | kfree(dev_opp); | |
630 | return NULL; | |
631 | } | |
632 | ||
07cce74a VK |
633 | srcu_init_notifier_head(&dev_opp->srcu_head); |
634 | INIT_LIST_HEAD(&dev_opp->opp_list); | |
635 | ||
636 | /* Secure the device list modification */ | |
637 | list_add_rcu(&dev_opp->node, &dev_opp_list); | |
638 | return dev_opp; | |
639 | } | |
640 | ||
984f16c8 | 641 | /** |
737002b5 VK |
642 | * _kfree_device_rcu() - Free device_opp RCU handler |
643 | * @head: RCU head | |
984f16c8 | 644 | */ |
737002b5 | 645 | static void _kfree_device_rcu(struct rcu_head *head) |
e1f60b29 | 646 | { |
737002b5 | 647 | struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); |
6ce4184d | 648 | |
737002b5 | 649 | kfree_rcu(device_opp, rcu_head); |
e1f60b29 | 650 | } |
38393409 VK |
651 | |
652 | /** | |
3bac42ca VK |
653 | * _remove_device_opp() - Removes a device OPP table |
654 | * @dev_opp: device OPP table to be removed. | |
38393409 | 655 | * |
3bac42ca | 656 | * Removes/frees device OPP table it it doesn't contain any OPPs. |
38393409 | 657 | */ |
3bac42ca | 658 | static void _remove_device_opp(struct device_opp *dev_opp) |
38393409 | 659 | { |
06441658 VK |
660 | struct device_list_opp *list_dev; |
661 | ||
3bac42ca VK |
662 | if (!list_empty(&dev_opp->opp_list)) |
663 | return; | |
664 | ||
06441658 VK |
665 | list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp, |
666 | node); | |
667 | ||
668 | _remove_list_dev(list_dev, dev_opp); | |
669 | ||
670 | /* dev_list must be empty now */ | |
671 | WARN_ON(!list_empty(&dev_opp->dev_list)); | |
672 | ||
3bac42ca VK |
673 | list_del_rcu(&dev_opp->node); |
674 | call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, | |
675 | _kfree_device_rcu); | |
38393409 | 676 | } |
e1f60b29 | 677 | |
984f16c8 NM |
678 | /** |
679 | * _kfree_opp_rcu() - Free OPP RCU handler | |
680 | * @head: RCU head | |
681 | */ | |
327854c8 | 682 | static void _kfree_opp_rcu(struct rcu_head *head) |
129eec55 VK |
683 | { |
684 | struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); | |
685 | ||
686 | kfree_rcu(opp, rcu_head); | |
687 | } | |
688 | ||
984f16c8 NM |
689 | /** |
690 | * _opp_remove() - Remove an OPP from a table definition | |
691 | * @dev_opp: points back to the device_opp struct this opp belongs to | |
692 | * @opp: pointer to the OPP to remove | |
23dacf6d | 693 | * @notify: OPP_EVENT_REMOVE notification should be sent or not |
984f16c8 NM |
694 | * |
695 | * This function removes an opp definition from the opp list. | |
696 | * | |
697 | * Locking: The internal device_opp and opp structures are RCU protected. | |
698 | * It is assumed that the caller holds required mutex for an RCU updater | |
699 | * strategy. | |
700 | */ | |
327854c8 | 701 | static void _opp_remove(struct device_opp *dev_opp, |
23dacf6d | 702 | struct dev_pm_opp *opp, bool notify) |
129eec55 VK |
703 | { |
704 | /* | |
705 | * Notify the changes in the availability of the operable | |
706 | * frequency/voltage list. | |
707 | */ | |
23dacf6d VK |
708 | if (notify) |
709 | srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); | |
129eec55 | 710 | list_del_rcu(&opp->node); |
327854c8 | 711 | call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); |
129eec55 | 712 | |
3bac42ca | 713 | _remove_device_opp(dev_opp); |
129eec55 VK |
714 | } |
715 | ||
716 | /** | |
717 | * dev_pm_opp_remove() - Remove an OPP from OPP list | |
718 | * @dev: device for which we do this operation | |
719 | * @freq: OPP to remove with matching 'freq' | |
720 | * | |
721 | * This function removes an opp from the opp list. | |
984f16c8 NM |
722 | * |
723 | * Locking: The internal device_opp and opp structures are RCU protected. | |
724 | * Hence this function internally uses RCU updater strategy with mutex locks | |
725 | * to keep the integrity of the internal data structures. Callers should ensure | |
726 | * that this function is *NOT* called under RCU protection or in contexts where | |
727 | * mutex cannot be locked. | |
129eec55 VK |
728 | */ |
729 | void dev_pm_opp_remove(struct device *dev, unsigned long freq) | |
730 | { | |
731 | struct dev_pm_opp *opp; | |
732 | struct device_opp *dev_opp; | |
733 | bool found = false; | |
734 | ||
735 | /* Hold our list modification lock here */ | |
736 | mutex_lock(&dev_opp_list_lock); | |
737 | ||
327854c8 | 738 | dev_opp = _find_device_opp(dev); |
129eec55 VK |
739 | if (IS_ERR(dev_opp)) |
740 | goto unlock; | |
741 | ||
742 | list_for_each_entry(opp, &dev_opp->opp_list, node) { | |
743 | if (opp->rate == freq) { | |
744 | found = true; | |
745 | break; | |
746 | } | |
747 | } | |
748 | ||
749 | if (!found) { | |
750 | dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", | |
751 | __func__, freq); | |
752 | goto unlock; | |
753 | } | |
754 | ||
23dacf6d | 755 | _opp_remove(dev_opp, opp, true); |
129eec55 VK |
756 | unlock: |
757 | mutex_unlock(&dev_opp_list_lock); | |
758 | } | |
759 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove); | |
760 | ||
23dacf6d VK |
761 | static struct dev_pm_opp *_allocate_opp(struct device *dev, |
762 | struct device_opp **dev_opp) | |
e1f60b29 | 763 | { |
23dacf6d | 764 | struct dev_pm_opp *opp; |
e1f60b29 | 765 | |
23dacf6d VK |
766 | /* allocate new OPP node */ |
767 | opp = kzalloc(sizeof(*opp), GFP_KERNEL); | |
768 | if (!opp) | |
769 | return NULL; | |
e1f60b29 | 770 | |
23dacf6d | 771 | INIT_LIST_HEAD(&opp->node); |
e1f60b29 | 772 | |
23dacf6d VK |
773 | *dev_opp = _add_device_opp(dev); |
774 | if (!*dev_opp) { | |
775 | kfree(opp); | |
776 | return NULL; | |
777 | } | |
778 | ||
779 | return opp; | |
780 | } | |
781 | ||
06441658 VK |
782 | static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, |
783 | struct device_opp *dev_opp) | |
23dacf6d VK |
784 | { |
785 | struct dev_pm_opp *opp; | |
786 | struct list_head *head = &dev_opp->opp_list; | |
787 | ||
788 | /* | |
789 | * Insert new OPP in order of increasing frequency and discard if | |
790 | * already present. | |
791 | * | |
792 | * Need to use &dev_opp->opp_list in the condition part of the 'for' | |
793 | * loop, don't replace it with head otherwise it will become an infinite | |
794 | * loop. | |
795 | */ | |
796 | list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { | |
797 | if (new_opp->rate > opp->rate) { | |
798 | head = &opp->node; | |
799 | continue; | |
800 | } | |
801 | ||
802 | if (new_opp->rate < opp->rate) | |
803 | break; | |
804 | ||
805 | /* Duplicate OPPs */ | |
06441658 | 806 | dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", |
23dacf6d VK |
807 | __func__, opp->rate, opp->u_volt, opp->available, |
808 | new_opp->rate, new_opp->u_volt, new_opp->available); | |
809 | ||
810 | return opp->available && new_opp->u_volt == opp->u_volt ? | |
811 | 0 : -EEXIST; | |
812 | } | |
813 | ||
814 | new_opp->dev_opp = dev_opp; | |
815 | list_add_rcu(&new_opp->node, head); | |
816 | ||
817 | return 0; | |
818 | } | |
819 | ||
984f16c8 NM |
820 | /** |
821 | * _opp_add_dynamic() - Allocate a dynamic OPP. | |
822 | * @dev: device for which we do this operation | |
823 | * @freq: Frequency in Hz for this OPP | |
824 | * @u_volt: Voltage in uVolts for this OPP | |
825 | * @dynamic: Dynamically added OPPs. | |
826 | * | |
827 | * This function adds an opp definition to the opp list and returns status. | |
828 | * The opp is made available by default and it can be controlled using | |
829 | * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. | |
830 | * | |
831 | * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and | |
832 | * freed by of_free_opp_table. | |
833 | * | |
834 | * Locking: The internal device_opp and opp structures are RCU protected. | |
835 | * Hence this function internally uses RCU updater strategy with mutex locks | |
836 | * to keep the integrity of the internal data structures. Callers should ensure | |
837 | * that this function is *NOT* called under RCU protection or in contexts where | |
838 | * mutex cannot be locked. | |
839 | * | |
840 | * Return: | |
841 | * 0 On success OR | |
842 | * Duplicate OPPs (both freq and volt are same) and opp->available | |
843 | * -EEXIST Freq are same and volt are different OR | |
844 | * Duplicate OPPs (both freq and volt are same) and !opp->available | |
845 | * -ENOMEM Memory allocation failure | |
846 | */ | |
327854c8 NM |
847 | static int _opp_add_dynamic(struct device *dev, unsigned long freq, |
848 | long u_volt, bool dynamic) | |
e1f60b29 | 849 | { |
aa5f2f85 | 850 | struct device_opp *dev_opp; |
23dacf6d | 851 | struct dev_pm_opp *new_opp; |
6ce4184d | 852 | int ret; |
e1f60b29 | 853 | |
e1f60b29 NM |
854 | /* Hold our list modification lock here */ |
855 | mutex_lock(&dev_opp_list_lock); | |
856 | ||
23dacf6d VK |
857 | new_opp = _allocate_opp(dev, &dev_opp); |
858 | if (!new_opp) { | |
859 | ret = -ENOMEM; | |
860 | goto unlock; | |
861 | } | |
862 | ||
a7470db6 | 863 | /* populate the opp table */ |
a7470db6 VK |
864 | new_opp->rate = freq; |
865 | new_opp->u_volt = u_volt; | |
866 | new_opp->available = true; | |
23dacf6d | 867 | new_opp->dynamic = dynamic; |
a7470db6 | 868 | |
06441658 | 869 | ret = _opp_add(dev, new_opp, dev_opp); |
23dacf6d | 870 | if (ret) |
6ce4184d | 871 | goto free_opp; |
64ce8545 | 872 | |
e1f60b29 NM |
873 | mutex_unlock(&dev_opp_list_lock); |
874 | ||
03ca370f MH |
875 | /* |
876 | * Notify the changes in the availability of the operable | |
877 | * frequency/voltage list. | |
878 | */ | |
cd1a068a | 879 | srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); |
e1f60b29 | 880 | return 0; |
6ce4184d VK |
881 | |
882 | free_opp: | |
23dacf6d VK |
883 | _opp_remove(dev_opp, new_opp, false); |
884 | unlock: | |
6ce4184d | 885 | mutex_unlock(&dev_opp_list_lock); |
6ce4184d | 886 | return ret; |
e1f60b29 | 887 | } |
38393409 | 888 | |
27465902 VK |
889 | /* TODO: Support multiple regulators */ |
890 | static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev) | |
891 | { | |
892 | u32 microvolt[3] = {0}; | |
893 | int count, ret; | |
894 | ||
895 | count = of_property_count_u32_elems(opp->np, "opp-microvolt"); | |
896 | if (!count) | |
897 | return 0; | |
898 | ||
899 | /* There can be one or three elements here */ | |
900 | if (count != 1 && count != 3) { | |
901 | dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", | |
902 | __func__, count); | |
903 | return -EINVAL; | |
904 | } | |
905 | ||
906 | ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt, | |
907 | count); | |
908 | if (ret) { | |
909 | dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__, | |
910 | ret); | |
911 | return -EINVAL; | |
912 | } | |
913 | ||
914 | opp->u_volt = microvolt[0]; | |
915 | opp->u_volt_min = microvolt[1]; | |
916 | opp->u_volt_max = microvolt[2]; | |
917 | ||
918 | return 0; | |
919 | } | |
920 | ||
921 | /** | |
922 | * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) | |
923 | * @dev: device for which we do this operation | |
924 | * @np: device node | |
925 | * | |
926 | * This function adds an opp definition to the opp list and returns status. The | |
927 | * opp can be controlled using dev_pm_opp_enable/disable functions and may be | |
928 | * removed by dev_pm_opp_remove. | |
929 | * | |
930 | * Locking: The internal device_opp and opp structures are RCU protected. | |
931 | * Hence this function internally uses RCU updater strategy with mutex locks | |
932 | * to keep the integrity of the internal data structures. Callers should ensure | |
933 | * that this function is *NOT* called under RCU protection or in contexts where | |
934 | * mutex cannot be locked. | |
935 | * | |
936 | * Return: | |
937 | * 0 On success OR | |
938 | * Duplicate OPPs (both freq and volt are same) and opp->available | |
939 | * -EEXIST Freq are same and volt are different OR | |
940 | * Duplicate OPPs (both freq and volt are same) and !opp->available | |
941 | * -ENOMEM Memory allocation failure | |
942 | * -EINVAL Failed parsing the OPP node | |
943 | */ | |
944 | static int _opp_add_static_v2(struct device *dev, struct device_node *np) | |
945 | { | |
946 | struct device_opp *dev_opp; | |
947 | struct dev_pm_opp *new_opp; | |
948 | u64 rate; | |
68fa9f0a | 949 | u32 val; |
27465902 VK |
950 | int ret; |
951 | ||
952 | /* Hold our list modification lock here */ | |
953 | mutex_lock(&dev_opp_list_lock); | |
954 | ||
955 | new_opp = _allocate_opp(dev, &dev_opp); | |
956 | if (!new_opp) { | |
957 | ret = -ENOMEM; | |
958 | goto unlock; | |
959 | } | |
960 | ||
961 | ret = of_property_read_u64(np, "opp-hz", &rate); | |
962 | if (ret < 0) { | |
963 | dev_err(dev, "%s: opp-hz not found\n", __func__); | |
964 | goto free_opp; | |
965 | } | |
966 | ||
967 | /* | |
968 | * Rate is defined as an unsigned long in clk API, and so casting | |
969 | * explicitly to its type. Must be fixed once rate is 64 bit | |
970 | * guaranteed in clk API. | |
971 | */ | |
972 | new_opp->rate = (unsigned long)rate; | |
973 | new_opp->turbo = of_property_read_bool(np, "turbo-mode"); | |
974 | ||
975 | new_opp->np = np; | |
976 | new_opp->dynamic = false; | |
977 | new_opp->available = true; | |
68fa9f0a VK |
978 | |
979 | if (!of_property_read_u32(np, "clock-latency-ns", &val)) | |
980 | new_opp->clock_latency_ns = val; | |
27465902 VK |
981 | |
982 | ret = opp_get_microvolt(new_opp, dev); | |
983 | if (ret) | |
984 | goto free_opp; | |
985 | ||
68fa9f0a VK |
986 | if (!of_property_read_u32(new_opp->np, "opp-microamp", &val)) |
987 | new_opp->u_amp = val; | |
27465902 | 988 | |
06441658 | 989 | ret = _opp_add(dev, new_opp, dev_opp); |
27465902 VK |
990 | if (ret) |
991 | goto free_opp; | |
992 | ||
ad656a6a VK |
993 | /* OPP to select on device suspend */ |
994 | if (of_property_read_bool(np, "opp-suspend")) { | |
995 | if (dev_opp->suspend_opp) | |
996 | dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", | |
997 | __func__, dev_opp->suspend_opp->rate, | |
998 | new_opp->rate); | |
999 | else | |
1000 | dev_opp->suspend_opp = new_opp; | |
1001 | } | |
1002 | ||
3ca9bb33 VK |
1003 | if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max) |
1004 | dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns; | |
1005 | ||
27465902 VK |
1006 | mutex_unlock(&dev_opp_list_lock); |
1007 | ||
3ca9bb33 | 1008 | pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", |
27465902 | 1009 | __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, |
3ca9bb33 VK |
1010 | new_opp->u_volt_min, new_opp->u_volt_max, |
1011 | new_opp->clock_latency_ns); | |
27465902 VK |
1012 | |
1013 | /* | |
1014 | * Notify the changes in the availability of the operable | |
1015 | * frequency/voltage list. | |
1016 | */ | |
1017 | srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); | |
1018 | return 0; | |
1019 | ||
1020 | free_opp: | |
1021 | _opp_remove(dev_opp, new_opp, false); | |
1022 | unlock: | |
1023 | mutex_unlock(&dev_opp_list_lock); | |
1024 | return ret; | |
1025 | } | |
1026 | ||
38393409 VK |
1027 | /** |
1028 | * dev_pm_opp_add() - Add an OPP table from a table definitions | |
1029 | * @dev: device for which we do this operation | |
1030 | * @freq: Frequency in Hz for this OPP | |
1031 | * @u_volt: Voltage in uVolts for this OPP | |
1032 | * | |
1033 | * This function adds an opp definition to the opp list and returns status. | |
1034 | * The opp is made available by default and it can be controlled using | |
1035 | * dev_pm_opp_enable/disable functions. | |
1036 | * | |
1037 | * Locking: The internal device_opp and opp structures are RCU protected. | |
1038 | * Hence this function internally uses RCU updater strategy with mutex locks | |
1039 | * to keep the integrity of the internal data structures. Callers should ensure | |
1040 | * that this function is *NOT* called under RCU protection or in contexts where | |
1041 | * mutex cannot be locked. | |
1042 | * | |
1043 | * Return: | |
984f16c8 | 1044 | * 0 On success OR |
38393409 | 1045 | * Duplicate OPPs (both freq and volt are same) and opp->available |
984f16c8 | 1046 | * -EEXIST Freq are same and volt are different OR |
38393409 | 1047 | * Duplicate OPPs (both freq and volt are same) and !opp->available |
984f16c8 | 1048 | * -ENOMEM Memory allocation failure |
38393409 VK |
1049 | */ |
1050 | int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | |
1051 | { | |
327854c8 | 1052 | return _opp_add_dynamic(dev, freq, u_volt, true); |
38393409 | 1053 | } |
5d4879cd | 1054 | EXPORT_SYMBOL_GPL(dev_pm_opp_add); |
e1f60b29 NM |
1055 | |
1056 | /** | |
327854c8 | 1057 | * _opp_set_availability() - helper to set the availability of an opp |
e1f60b29 NM |
1058 | * @dev: device for which we do this operation |
1059 | * @freq: OPP frequency to modify availability | |
1060 | * @availability_req: availability status requested for this opp | |
1061 | * | |
1062 | * Set the availability of an OPP with an RCU operation, opp_{enable,disable} | |
1063 | * share a common logic which is isolated here. | |
1064 | * | |
984f16c8 | 1065 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
e1f60b29 NM |
1066 | * copy operation, returns 0 if no modifcation was done OR modification was |
1067 | * successful. | |
1068 | * | |
1069 | * Locking: The internal device_opp and opp structures are RCU protected. | |
1070 | * Hence this function internally uses RCU updater strategy with mutex locks to | |
1071 | * keep the integrity of the internal data structures. Callers should ensure | |
1072 | * that this function is *NOT* called under RCU protection or in contexts where | |
1073 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | |
1074 | */ | |
327854c8 NM |
1075 | static int _opp_set_availability(struct device *dev, unsigned long freq, |
1076 | bool availability_req) | |
e1f60b29 | 1077 | { |
29df0ee1 | 1078 | struct device_opp *dev_opp; |
47d43ba7 | 1079 | struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); |
e1f60b29 NM |
1080 | int r = 0; |
1081 | ||
1082 | /* keep the node allocated */ | |
47d43ba7 | 1083 | new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); |
59d84ca8 | 1084 | if (!new_opp) |
e1f60b29 | 1085 | return -ENOMEM; |
e1f60b29 NM |
1086 | |
1087 | mutex_lock(&dev_opp_list_lock); | |
1088 | ||
1089 | /* Find the device_opp */ | |
327854c8 | 1090 | dev_opp = _find_device_opp(dev); |
e1f60b29 NM |
1091 | if (IS_ERR(dev_opp)) { |
1092 | r = PTR_ERR(dev_opp); | |
1093 | dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); | |
1094 | goto unlock; | |
1095 | } | |
1096 | ||
1097 | /* Do we have the frequency? */ | |
1098 | list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) { | |
1099 | if (tmp_opp->rate == freq) { | |
1100 | opp = tmp_opp; | |
1101 | break; | |
1102 | } | |
1103 | } | |
1104 | if (IS_ERR(opp)) { | |
1105 | r = PTR_ERR(opp); | |
1106 | goto unlock; | |
1107 | } | |
1108 | ||
1109 | /* Is update really needed? */ | |
1110 | if (opp->available == availability_req) | |
1111 | goto unlock; | |
1112 | /* copy the old data over */ | |
1113 | *new_opp = *opp; | |
1114 | ||
1115 | /* plug in new node */ | |
1116 | new_opp->available = availability_req; | |
1117 | ||
1118 | list_replace_rcu(&opp->node, &new_opp->node); | |
1119 | mutex_unlock(&dev_opp_list_lock); | |
327854c8 | 1120 | call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); |
e1f60b29 | 1121 | |
03ca370f MH |
1122 | /* Notify the change of the OPP availability */ |
1123 | if (availability_req) | |
cd1a068a | 1124 | srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE, |
03ca370f MH |
1125 | new_opp); |
1126 | else | |
cd1a068a | 1127 | srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE, |
03ca370f MH |
1128 | new_opp); |
1129 | ||
dde8437d | 1130 | return 0; |
e1f60b29 NM |
1131 | |
1132 | unlock: | |
1133 | mutex_unlock(&dev_opp_list_lock); | |
e1f60b29 NM |
1134 | kfree(new_opp); |
1135 | return r; | |
1136 | } | |
1137 | ||
1138 | /** | |
5d4879cd | 1139 | * dev_pm_opp_enable() - Enable a specific OPP |
e1f60b29 NM |
1140 | * @dev: device for which we do this operation |
1141 | * @freq: OPP frequency to enable | |
1142 | * | |
1143 | * Enables a provided opp. If the operation is valid, this returns 0, else the | |
1144 | * corresponding error value. It is meant to be used for users an OPP available | |
5d4879cd | 1145 | * after being temporarily made unavailable with dev_pm_opp_disable. |
e1f60b29 NM |
1146 | * |
1147 | * Locking: The internal device_opp and opp structures are RCU protected. | |
1148 | * Hence this function indirectly uses RCU and mutex locks to keep the | |
1149 | * integrity of the internal data structures. Callers should ensure that | |
1150 | * this function is *NOT* called under RCU protection or in contexts where | |
1151 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | |
984f16c8 NM |
1152 | * |
1153 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | |
1154 | * copy operation, returns 0 if no modifcation was done OR modification was | |
1155 | * successful. | |
e1f60b29 | 1156 | */ |
5d4879cd | 1157 | int dev_pm_opp_enable(struct device *dev, unsigned long freq) |
e1f60b29 | 1158 | { |
327854c8 | 1159 | return _opp_set_availability(dev, freq, true); |
e1f60b29 | 1160 | } |
5d4879cd | 1161 | EXPORT_SYMBOL_GPL(dev_pm_opp_enable); |
e1f60b29 NM |
1162 | |
1163 | /** | |
5d4879cd | 1164 | * dev_pm_opp_disable() - Disable a specific OPP |
e1f60b29 NM |
1165 | * @dev: device for which we do this operation |
1166 | * @freq: OPP frequency to disable | |
1167 | * | |
1168 | * Disables a provided opp. If the operation is valid, this returns | |
1169 | * 0, else the corresponding error value. It is meant to be a temporary | |
1170 | * control by users to make this OPP not available until the circumstances are | |
5d4879cd | 1171 | * right to make it available again (with a call to dev_pm_opp_enable). |
e1f60b29 NM |
1172 | * |
1173 | * Locking: The internal device_opp and opp structures are RCU protected. | |
1174 | * Hence this function indirectly uses RCU and mutex locks to keep the | |
1175 | * integrity of the internal data structures. Callers should ensure that | |
1176 | * this function is *NOT* called under RCU protection or in contexts where | |
1177 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | |
984f16c8 NM |
1178 | * |
1179 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | |
1180 | * copy operation, returns 0 if no modifcation was done OR modification was | |
1181 | * successful. | |
e1f60b29 | 1182 | */ |
5d4879cd | 1183 | int dev_pm_opp_disable(struct device *dev, unsigned long freq) |
e1f60b29 | 1184 | { |
327854c8 | 1185 | return _opp_set_availability(dev, freq, false); |
e1f60b29 | 1186 | } |
5d4879cd | 1187 | EXPORT_SYMBOL_GPL(dev_pm_opp_disable); |
e1f60b29 | 1188 | |
03ca370f | 1189 | /** |
5d4879cd | 1190 | * dev_pm_opp_get_notifier() - find notifier_head of the device with opp |
03ca370f | 1191 | * @dev: device pointer used to lookup device OPPs. |
984f16c8 NM |
1192 | * |
1193 | * Return: pointer to notifier head if found, otherwise -ENODEV or | |
1194 | * -EINVAL based on type of error casted as pointer. value must be checked | |
1195 | * with IS_ERR to determine valid pointer or error result. | |
1196 | * | |
1197 | * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU | |
1198 | * protected pointer. The reason for the same is that the opp pointer which is | |
1199 | * returned will remain valid for use with opp_get_{voltage, freq} only while | |
1200 | * under the locked area. The pointer returned must be used prior to unlocking | |
1201 | * with rcu_read_unlock() to maintain the integrity of the pointer. | |
03ca370f | 1202 | */ |
5d4879cd | 1203 | struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) |
03ca370f | 1204 | { |
327854c8 | 1205 | struct device_opp *dev_opp = _find_device_opp(dev); |
03ca370f MH |
1206 | |
1207 | if (IS_ERR(dev_opp)) | |
156acb16 | 1208 | return ERR_CAST(dev_opp); /* matching type */ |
03ca370f | 1209 | |
cd1a068a | 1210 | return &dev_opp->srcu_head; |
03ca370f | 1211 | } |
4679ec37 | 1212 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); |
b496dfbc SG |
1213 | |
1214 | #ifdef CONFIG_OF | |
1215 | /** | |
737002b5 | 1216 | * of_free_opp_table() - Free OPP table entries created from static DT entries |
b496dfbc SG |
1217 | * @dev: device pointer used to lookup device OPPs. |
1218 | * | |
737002b5 | 1219 | * Free OPPs created using static entries present in DT. |
984f16c8 NM |
1220 | * |
1221 | * Locking: The internal device_opp and opp structures are RCU protected. | |
1222 | * Hence this function indirectly uses RCU updater strategy with mutex locks | |
1223 | * to keep the integrity of the internal data structures. Callers should ensure | |
1224 | * that this function is *NOT* called under RCU protection or in contexts where | |
1225 | * mutex cannot be locked. | |
b496dfbc | 1226 | */ |
737002b5 VK |
1227 | void of_free_opp_table(struct device *dev) |
1228 | { | |
1229 | struct device_opp *dev_opp; | |
1230 | struct dev_pm_opp *opp, *tmp; | |
1231 | ||
06441658 VK |
1232 | /* Hold our list modification lock here */ |
1233 | mutex_lock(&dev_opp_list_lock); | |
1234 | ||
737002b5 VK |
1235 | /* Check for existing list for 'dev' */ |
1236 | dev_opp = _find_device_opp(dev); | |
1237 | if (IS_ERR(dev_opp)) { | |
1238 | int error = PTR_ERR(dev_opp); | |
1239 | ||
1240 | if (error != -ENODEV) | |
1241 | WARN(1, "%s: dev_opp: %d\n", | |
1242 | IS_ERR_OR_NULL(dev) ? | |
1243 | "Invalid device" : dev_name(dev), | |
1244 | error); | |
06441658 | 1245 | goto unlock; |
737002b5 VK |
1246 | } |
1247 | ||
06441658 VK |
1248 | /* Find if dev_opp manages a single device */ |
1249 | if (list_is_singular(&dev_opp->dev_list)) { | |
1250 | /* Free static OPPs */ | |
1251 | list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { | |
1252 | if (!opp->dynamic) | |
1253 | _opp_remove(dev_opp, opp, true); | |
1254 | } | |
1255 | } else { | |
1256 | _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp); | |
737002b5 VK |
1257 | } |
1258 | ||
06441658 | 1259 | unlock: |
737002b5 VK |
1260 | mutex_unlock(&dev_opp_list_lock); |
1261 | } | |
1262 | EXPORT_SYMBOL_GPL(of_free_opp_table); | |
1263 | ||
8d4d4e98 VK |
1264 | void of_cpumask_free_opp_table(cpumask_var_t cpumask) |
1265 | { | |
1266 | struct device *cpu_dev; | |
1267 | int cpu; | |
1268 | ||
1269 | WARN_ON(cpumask_empty(cpumask)); | |
1270 | ||
1271 | for_each_cpu(cpu, cpumask) { | |
1272 | cpu_dev = get_cpu_device(cpu); | |
1273 | if (!cpu_dev) { | |
1274 | pr_err("%s: failed to get cpu%d device\n", __func__, | |
1275 | cpu); | |
1276 | continue; | |
1277 | } | |
1278 | ||
1279 | of_free_opp_table(cpu_dev); | |
1280 | } | |
1281 | } | |
1282 | EXPORT_SYMBOL_GPL(of_cpumask_free_opp_table); | |
1283 | ||
27465902 VK |
1284 | /* Returns opp descriptor node from its phandle. Caller must do of_node_put() */ |
1285 | static struct device_node * | |
1286 | _of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop) | |
1287 | { | |
1288 | struct device_node *opp_np; | |
1289 | ||
1290 | opp_np = of_find_node_by_phandle(be32_to_cpup(prop->value)); | |
1291 | if (!opp_np) { | |
1292 | dev_err(dev, "%s: Prop: %s contains invalid opp desc phandle\n", | |
1293 | __func__, prop->name); | |
1294 | return ERR_PTR(-EINVAL); | |
1295 | } | |
1296 | ||
1297 | return opp_np; | |
1298 | } | |
1299 | ||
8d4d4e98 VK |
1300 | /* Returns opp descriptor node for a device. Caller must do of_node_put() */ |
1301 | static struct device_node *_of_get_opp_desc_node(struct device *dev) | |
1302 | { | |
1303 | const struct property *prop; | |
1304 | ||
1305 | prop = of_find_property(dev->of_node, "operating-points-v2", NULL); | |
1306 | if (!prop) | |
1307 | return ERR_PTR(-ENODEV); | |
1308 | if (!prop->value) | |
1309 | return ERR_PTR(-ENODATA); | |
1310 | ||
1311 | /* | |
1312 | * TODO: Support for multiple OPP tables. | |
1313 | * | |
1314 | * There should be only ONE phandle present in "operating-points-v2" | |
1315 | * property. | |
1316 | */ | |
1317 | if (prop->length != sizeof(__be32)) { | |
1318 | dev_err(dev, "%s: Invalid opp desc phandle\n", __func__); | |
1319 | return ERR_PTR(-EINVAL); | |
1320 | } | |
1321 | ||
1322 | return _of_get_opp_desc_node_from_prop(dev, prop); | |
1323 | } | |
1324 | ||
27465902 VK |
1325 | /* Initializes OPP tables based on new bindings */ |
1326 | static int _of_init_opp_table_v2(struct device *dev, | |
1327 | const struct property *prop) | |
1328 | { | |
1329 | struct device_node *opp_np, *np; | |
06441658 | 1330 | struct device_opp *dev_opp; |
27465902 VK |
1331 | int ret = 0, count = 0; |
1332 | ||
1333 | if (!prop->value) | |
1334 | return -ENODATA; | |
1335 | ||
1336 | /* Get opp node */ | |
1337 | opp_np = _of_get_opp_desc_node_from_prop(dev, prop); | |
1338 | if (IS_ERR(opp_np)) | |
1339 | return PTR_ERR(opp_np); | |
1340 | ||
06441658 VK |
1341 | dev_opp = _managed_opp(opp_np); |
1342 | if (dev_opp) { | |
1343 | /* OPPs are already managed */ | |
1344 | if (!_add_list_dev(dev, dev_opp)) | |
1345 | ret = -ENOMEM; | |
1346 | goto put_opp_np; | |
1347 | } | |
1348 | ||
27465902 VK |
1349 | /* We have opp-list node now, iterate over it and add OPPs */ |
1350 | for_each_available_child_of_node(opp_np, np) { | |
1351 | count++; | |
1352 | ||
1353 | ret = _opp_add_static_v2(dev, np); | |
1354 | if (ret) { | |
1355 | dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, | |
1356 | ret); | |
1f821ed7 | 1357 | goto free_table; |
27465902 VK |
1358 | } |
1359 | } | |
1360 | ||
1361 | /* There should be one of more OPP defined */ | |
1f821ed7 VK |
1362 | if (WARN_ON(!count)) { |
1363 | ret = -ENOENT; | |
27465902 | 1364 | goto put_opp_np; |
1f821ed7 | 1365 | } |
27465902 | 1366 | |
1f821ed7 VK |
1367 | dev_opp = _find_device_opp(dev); |
1368 | if (WARN_ON(IS_ERR(dev_opp))) { | |
1369 | ret = PTR_ERR(dev_opp); | |
1370 | goto free_table; | |
06441658 | 1371 | } |
27465902 | 1372 | |
1f821ed7 VK |
1373 | dev_opp->np = opp_np; |
1374 | dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared"); | |
1375 | ||
1376 | of_node_put(opp_np); | |
1377 | return 0; | |
1378 | ||
1379 | free_table: | |
1380 | of_free_opp_table(dev); | |
27465902 VK |
1381 | put_opp_np: |
1382 | of_node_put(opp_np); | |
1383 | ||
1384 | return ret; | |
1385 | } | |
1386 | ||
1387 | /* Initializes OPP tables based on old-deprecated bindings */ | |
1388 | static int _of_init_opp_table_v1(struct device *dev) | |
b496dfbc SG |
1389 | { |
1390 | const struct property *prop; | |
1391 | const __be32 *val; | |
1392 | int nr; | |
1393 | ||
1394 | prop = of_find_property(dev->of_node, "operating-points", NULL); | |
1395 | if (!prop) | |
1396 | return -ENODEV; | |
1397 | if (!prop->value) | |
1398 | return -ENODATA; | |
1399 | ||
1400 | /* | |
1401 | * Each OPP is a set of tuples consisting of frequency and | |
1402 | * voltage like <freq-kHz vol-uV>. | |
1403 | */ | |
1404 | nr = prop->length / sizeof(u32); | |
1405 | if (nr % 2) { | |
1406 | dev_err(dev, "%s: Invalid OPP list\n", __func__); | |
1407 | return -EINVAL; | |
1408 | } | |
1409 | ||
1410 | val = prop->value; | |
1411 | while (nr) { | |
1412 | unsigned long freq = be32_to_cpup(val++) * 1000; | |
1413 | unsigned long volt = be32_to_cpup(val++); | |
1414 | ||
327854c8 | 1415 | if (_opp_add_dynamic(dev, freq, volt, false)) |
b496dfbc SG |
1416 | dev_warn(dev, "%s: Failed to add OPP %ld\n", |
1417 | __func__, freq); | |
b496dfbc SG |
1418 | nr -= 2; |
1419 | } | |
1420 | ||
1421 | return 0; | |
1422 | } | |
129eec55 VK |
1423 | |
1424 | /** | |
27465902 | 1425 | * of_init_opp_table() - Initialize opp table from device tree |
129eec55 VK |
1426 | * @dev: device pointer used to lookup device OPPs. |
1427 | * | |
27465902 | 1428 | * Register the initial OPP table with the OPP library for given device. |
984f16c8 NM |
1429 | * |
1430 | * Locking: The internal device_opp and opp structures are RCU protected. | |
1431 | * Hence this function indirectly uses RCU updater strategy with mutex locks | |
1432 | * to keep the integrity of the internal data structures. Callers should ensure | |
1433 | * that this function is *NOT* called under RCU protection or in contexts where | |
1434 | * mutex cannot be locked. | |
27465902 VK |
1435 | * |
1436 | * Return: | |
1437 | * 0 On success OR | |
1438 | * Duplicate OPPs (both freq and volt are same) and opp->available | |
1439 | * -EEXIST Freq are same and volt are different OR | |
1440 | * Duplicate OPPs (both freq and volt are same) and !opp->available | |
1441 | * -ENOMEM Memory allocation failure | |
1442 | * -ENODEV when 'operating-points' property is not found or is invalid data | |
1443 | * in device node. | |
1444 | * -ENODATA when empty 'operating-points' property is found | |
1445 | * -EINVAL when invalid entries are found in opp-v2 table | |
129eec55 | 1446 | */ |
27465902 | 1447 | int of_init_opp_table(struct device *dev) |
129eec55 | 1448 | { |
27465902 VK |
1449 | const struct property *prop; |
1450 | ||
1451 | /* | |
1452 | * OPPs have two version of bindings now. The older one is deprecated, | |
1453 | * try for the new binding first. | |
1454 | */ | |
1455 | prop = of_find_property(dev->of_node, "operating-points-v2", NULL); | |
1456 | if (!prop) { | |
1457 | /* | |
1458 | * Try old-deprecated bindings for backward compatibility with | |
1459 | * older dtbs. | |
1460 | */ | |
1461 | return _of_init_opp_table_v1(dev); | |
1462 | } | |
1463 | ||
1464 | return _of_init_opp_table_v2(dev, prop); | |
1465 | } | |
74c46c6e | 1466 | EXPORT_SYMBOL_GPL(of_init_opp_table); |
8d4d4e98 VK |
1467 | |
1468 | int of_cpumask_init_opp_table(cpumask_var_t cpumask) | |
1469 | { | |
1470 | struct device *cpu_dev; | |
1471 | int cpu, ret = 0; | |
1472 | ||
1473 | WARN_ON(cpumask_empty(cpumask)); | |
1474 | ||
1475 | for_each_cpu(cpu, cpumask) { | |
1476 | cpu_dev = get_cpu_device(cpu); | |
1477 | if (!cpu_dev) { | |
1478 | pr_err("%s: failed to get cpu%d device\n", __func__, | |
1479 | cpu); | |
1480 | continue; | |
1481 | } | |
1482 | ||
1483 | ret = of_init_opp_table(cpu_dev); | |
1484 | if (ret) { | |
1485 | pr_err("%s: couldn't find opp table for cpu:%d, %d\n", | |
1486 | __func__, cpu, ret); | |
1487 | ||
1488 | /* Free all other OPPs */ | |
1489 | of_cpumask_free_opp_table(cpumask); | |
1490 | break; | |
1491 | } | |
1492 | } | |
1493 | ||
1494 | return ret; | |
1495 | } | |
1496 | EXPORT_SYMBOL_GPL(of_cpumask_init_opp_table); | |
1497 | ||
1498 | /* Required only for V1 bindings, as v2 can manage it from DT itself */ | |
1499 | int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) | |
1500 | { | |
1501 | struct device_list_opp *list_dev; | |
2a6127d0 | 1502 | struct device_opp *dev_opp; |
8d4d4e98 VK |
1503 | struct device *dev; |
1504 | int cpu, ret = 0; | |
129eec55 | 1505 | |
8d4d4e98 VK |
1506 | rcu_read_lock(); |
1507 | ||
1508 | dev_opp = _find_device_opp(cpu_dev); | |
0fe30da2 | 1509 | if (IS_ERR(dev_opp)) { |
8d4d4e98 VK |
1510 | ret = -EINVAL; |
1511 | goto out_rcu_read_unlock; | |
0fe30da2 | 1512 | } |
129eec55 | 1513 | |
8d4d4e98 VK |
1514 | for_each_cpu(cpu, cpumask) { |
1515 | if (cpu == cpu_dev->id) | |
1516 | continue; | |
129eec55 | 1517 | |
8d4d4e98 VK |
1518 | dev = get_cpu_device(cpu); |
1519 | if (!dev) { | |
1520 | dev_err(cpu_dev, "%s: failed to get cpu%d device\n", | |
1521 | __func__, cpu); | |
1522 | continue; | |
1523 | } | |
1524 | ||
1525 | list_dev = _add_list_dev(dev, dev_opp); | |
1526 | if (!list_dev) { | |
1527 | dev_err(dev, "%s: failed to add list-dev for cpu%d device\n", | |
1528 | __func__, cpu); | |
1529 | continue; | |
1530 | } | |
129eec55 | 1531 | } |
8d4d4e98 VK |
1532 | out_rcu_read_unlock: |
1533 | rcu_read_unlock(); | |
129eec55 | 1534 | |
8d4d4e98 | 1535 | return 0; |
129eec55 | 1536 | } |
8d4d4e98 VK |
1537 | EXPORT_SYMBOL_GPL(set_cpus_sharing_opps); |
1538 | ||
1539 | /* | |
1540 | * Works only for OPP v2 bindings. | |
1541 | * | |
1542 | * cpumask should be already set to mask of cpu_dev->id. | |
1543 | * Returns -ENOENT if operating-points-v2 bindings aren't supported. | |
1544 | */ | |
1545 | int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) | |
1546 | { | |
1547 | struct device_node *np, *tmp_np; | |
1548 | struct device *tcpu_dev; | |
1549 | int cpu, ret = 0; | |
1550 | ||
1551 | /* Get OPP descriptor node */ | |
1552 | np = _of_get_opp_desc_node(cpu_dev); | |
1553 | if (IS_ERR(np)) { | |
1554 | dev_dbg(cpu_dev, "%s: Couldn't find opp node: %ld\n", __func__, | |
1555 | PTR_ERR(np)); | |
1556 | return -ENOENT; | |
1557 | } | |
1558 | ||
1559 | /* OPPs are shared ? */ | |
1560 | if (!of_property_read_bool(np, "opp-shared")) | |
1561 | goto put_cpu_node; | |
1562 | ||
1563 | for_each_possible_cpu(cpu) { | |
1564 | if (cpu == cpu_dev->id) | |
1565 | continue; | |
1566 | ||
1567 | tcpu_dev = get_cpu_device(cpu); | |
1568 | if (!tcpu_dev) { | |
1569 | dev_err(cpu_dev, "%s: failed to get cpu%d device\n", | |
1570 | __func__, cpu); | |
1571 | ret = -ENODEV; | |
1572 | goto put_cpu_node; | |
1573 | } | |
1574 | ||
1575 | /* Get OPP descriptor node */ | |
1576 | tmp_np = _of_get_opp_desc_node(tcpu_dev); | |
1577 | if (IS_ERR(tmp_np)) { | |
1578 | dev_err(tcpu_dev, "%s: Couldn't find opp node: %ld\n", | |
1579 | __func__, PTR_ERR(tmp_np)); | |
1580 | ret = PTR_ERR(tmp_np); | |
1581 | goto put_cpu_node; | |
1582 | } | |
1583 | ||
1584 | /* CPUs are sharing opp node */ | |
1585 | if (np == tmp_np) | |
1586 | cpumask_set_cpu(cpu, cpumask); | |
1587 | ||
1588 | of_node_put(tmp_np); | |
1589 | } | |
1590 | ||
1591 | put_cpu_node: | |
1592 | of_node_put(np); | |
1593 | return ret; | |
1594 | } | |
1595 | EXPORT_SYMBOL_GPL(of_get_cpus_sharing_opps); | |
b496dfbc | 1596 | #endif |