PM / OPP: Disable OPPs that aren't supported by the regulator
[deliverable/linux.git] / drivers / base / power / opp / core.c
CommitLineData
e1f60b29
NM
1/*
2 * Generic OPP Interface
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
d6d2a528
VK
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
e1f60b29
NM
16#include <linux/errno.h>
17#include <linux/err.h>
e1f60b29 18#include <linux/slab.h>
51990e82 19#include <linux/device.h>
b496dfbc 20#include <linux/of.h>
80126ce7 21#include <linux/export.h>
9f8ea969 22#include <linux/regulator/consumer.h>
e1f60b29 23
f59d3ee8 24#include "opp.h"
e1f60b29
NM
25
26/*
27 * The root of the list of all devices. All device_opp structures branch off
28 * from here, with each device_opp containing the list of opp it supports in
29 * various states of availability.
30 */
31static LIST_HEAD(dev_opp_list);
32/* Lock to allow exclusive modification to the device and opp lists */
87b4115d 33DEFINE_MUTEX(dev_opp_list_lock);
e1f60b29 34
b02ded24
DT
35#define opp_rcu_lockdep_assert() \
36do { \
f78f5b90
PM
37 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
38 !lockdep_is_held(&dev_opp_list_lock), \
b02ded24
DT
39 "Missing rcu_read_lock() or " \
40 "dev_opp_list_lock protection"); \
41} while (0)
42
06441658
VK
43static struct device_list_opp *_find_list_dev(const struct device *dev,
44 struct device_opp *dev_opp)
45{
46 struct device_list_opp *list_dev;
47
48 list_for_each_entry(list_dev, &dev_opp->dev_list, node)
49 if (list_dev->dev == dev)
50 return list_dev;
51
52 return NULL;
53}
54
55static struct device_opp *_managed_opp(const struct device_node *np)
56{
57 struct device_opp *dev_opp;
58
59 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
60 if (dev_opp->np == np) {
61 /*
62 * Multiple devices can point to the same OPP table and
63 * so will have same node-pointer, np.
64 *
65 * But the OPPs will be considered as shared only if the
66 * OPP table contains a "opp-shared" property.
67 */
68 return dev_opp->shared_opp ? dev_opp : NULL;
69 }
70 }
71
72 return NULL;
73}
74
e1f60b29 75/**
327854c8 76 * _find_device_opp() - find device_opp struct using device pointer
e1f60b29
NM
77 * @dev: device pointer used to lookup device OPPs
78 *
79 * Search list of device OPPs for one containing matching device. Does a RCU
80 * reader operation to grab the pointer needed.
81 *
984f16c8 82 * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
e1f60b29
NM
83 * -EINVAL based on type of error.
84 *
0597e818
VK
85 * Locking: For readers, this function must be called under rcu_read_lock().
86 * device_opp is a RCU protected pointer, which means that device_opp is valid
87 * as long as we are under RCU lock.
88 *
89 * For Writers, this function must be called with dev_opp_list_lock held.
e1f60b29 90 */
f59d3ee8 91struct device_opp *_find_device_opp(struct device *dev)
e1f60b29 92{
06441658 93 struct device_opp *dev_opp;
e1f60b29 94
0597e818
VK
95 opp_rcu_lockdep_assert();
96
50a3cb04 97 if (IS_ERR_OR_NULL(dev)) {
e1f60b29
NM
98 pr_err("%s: Invalid parameters\n", __func__);
99 return ERR_PTR(-EINVAL);
100 }
101
06441658
VK
102 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
103 if (_find_list_dev(dev, dev_opp))
104 return dev_opp;
e1f60b29 105
06441658 106 return ERR_PTR(-ENODEV);
e1f60b29
NM
107}
108
109/**
d6d00742 110 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
e1f60b29
NM
111 * @opp: opp for which voltage has to be returned for
112 *
984f16c8 113 * Return: voltage in micro volt corresponding to the opp, else
e1f60b29
NM
114 * return 0
115 *
116 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
117 * protected pointer. This means that opp which could have been fetched by
118 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
119 * under RCU lock. The pointer returned by the opp_find_freq family must be
120 * used in the same section as the usage of this function with the pointer
121 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
122 * pointer.
123 */
47d43ba7 124unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
e1f60b29 125{
47d43ba7 126 struct dev_pm_opp *tmp_opp;
e1f60b29
NM
127 unsigned long v = 0;
128
04bf1c7f
KK
129 opp_rcu_lockdep_assert();
130
e1f60b29 131 tmp_opp = rcu_dereference(opp);
d6d00742 132 if (IS_ERR_OR_NULL(tmp_opp))
e1f60b29
NM
133 pr_err("%s: Invalid parameters\n", __func__);
134 else
135 v = tmp_opp->u_volt;
136
137 return v;
138}
5d4879cd 139EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
e1f60b29
NM
140
141/**
5d4879cd 142 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
e1f60b29
NM
143 * @opp: opp for which frequency has to be returned for
144 *
984f16c8 145 * Return: frequency in hertz corresponding to the opp, else
e1f60b29
NM
146 * return 0
147 *
148 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
149 * protected pointer. This means that opp which could have been fetched by
150 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
151 * under RCU lock. The pointer returned by the opp_find_freq family must be
152 * used in the same section as the usage of this function with the pointer
153 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
154 * pointer.
155 */
47d43ba7 156unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
e1f60b29 157{
47d43ba7 158 struct dev_pm_opp *tmp_opp;
e1f60b29
NM
159 unsigned long f = 0;
160
04bf1c7f
KK
161 opp_rcu_lockdep_assert();
162
e1f60b29 163 tmp_opp = rcu_dereference(opp);
50a3cb04 164 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
e1f60b29
NM
165 pr_err("%s: Invalid parameters\n", __func__);
166 else
167 f = tmp_opp->rate;
168
169 return f;
170}
5d4879cd 171EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
e1f60b29 172
19445b25
BZ
173/**
174 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
175 * @opp: opp for which turbo mode is being verified
176 *
177 * Turbo OPPs are not for normal use, and can be enabled (under certain
178 * conditions) for short duration of times to finish high throughput work
179 * quickly. Running on them for longer times may overheat the chip.
180 *
181 * Return: true if opp is turbo opp, else false.
182 *
183 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
184 * protected pointer. This means that opp which could have been fetched by
185 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
186 * under RCU lock. The pointer returned by the opp_find_freq family must be
187 * used in the same section as the usage of this function with the pointer
188 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
189 * pointer.
190 */
191bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
192{
193 struct dev_pm_opp *tmp_opp;
194
195 opp_rcu_lockdep_assert();
196
197 tmp_opp = rcu_dereference(opp);
198 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
199 pr_err("%s: Invalid parameters\n", __func__);
200 return false;
201 }
202
203 return tmp_opp->turbo;
204}
205EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
206
3ca9bb33
VK
207/**
208 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
209 * @dev: device for which we do this operation
210 *
211 * Return: This function returns the max clock latency in nanoseconds.
212 *
213 * Locking: This function takes rcu_read_lock().
214 */
215unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
216{
217 struct device_opp *dev_opp;
218 unsigned long clock_latency_ns;
219
220 rcu_read_lock();
221
222 dev_opp = _find_device_opp(dev);
223 if (IS_ERR(dev_opp))
224 clock_latency_ns = 0;
225 else
226 clock_latency_ns = dev_opp->clock_latency_ns_max;
227
228 rcu_read_unlock();
229 return clock_latency_ns;
230}
231EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
232
4eafbd15
BZ
233/**
234 * dev_pm_opp_get_suspend_opp() - Get suspend opp
235 * @dev: device for which we do this operation
236 *
237 * Return: This function returns pointer to the suspend opp if it is
1b2b90cb 238 * defined and available, otherwise it returns NULL.
4eafbd15
BZ
239 *
240 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
241 * protected pointer. The reason for the same is that the opp pointer which is
242 * returned will remain valid for use with opp_get_{voltage, freq} only while
243 * under the locked area. The pointer returned must be used prior to unlocking
244 * with rcu_read_unlock() to maintain the integrity of the pointer.
245 */
246struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
247{
248 struct device_opp *dev_opp;
4eafbd15
BZ
249
250 opp_rcu_lockdep_assert();
251
252 dev_opp = _find_device_opp(dev);
1b2b90cb
VK
253 if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
254 !dev_opp->suspend_opp->available)
255 return NULL;
4eafbd15 256
1b2b90cb 257 return dev_opp->suspend_opp;
4eafbd15
BZ
258}
259EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
260
e1f60b29 261/**
5d4879cd 262 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
e1f60b29
NM
263 * @dev: device for which we do this operation
264 *
984f16c8 265 * Return: This function returns the number of available opps if there are any,
e1f60b29
NM
266 * else returns 0 if none or the corresponding error value.
267 *
b4718c02 268 * Locking: This function takes rcu_read_lock().
e1f60b29 269 */
5d4879cd 270int dev_pm_opp_get_opp_count(struct device *dev)
e1f60b29
NM
271{
272 struct device_opp *dev_opp;
47d43ba7 273 struct dev_pm_opp *temp_opp;
e1f60b29
NM
274 int count = 0;
275
b4718c02 276 rcu_read_lock();
b02ded24 277
327854c8 278 dev_opp = _find_device_opp(dev);
e1f60b29 279 if (IS_ERR(dev_opp)) {
b4718c02
DT
280 count = PTR_ERR(dev_opp);
281 dev_err(dev, "%s: device OPP not found (%d)\n",
282 __func__, count);
283 goto out_unlock;
e1f60b29
NM
284 }
285
286 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
287 if (temp_opp->available)
288 count++;
289 }
290
b4718c02
DT
291out_unlock:
292 rcu_read_unlock();
e1f60b29
NM
293 return count;
294}
5d4879cd 295EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
e1f60b29
NM
296
297/**
5d4879cd 298 * dev_pm_opp_find_freq_exact() - search for an exact frequency
e1f60b29
NM
299 * @dev: device for which we do this operation
300 * @freq: frequency to search for
7ae49618 301 * @available: true/false - match for available opp
e1f60b29 302 *
984f16c8
NM
303 * Return: Searches for exact match in the opp list and returns pointer to the
304 * matching opp if found, else returns ERR_PTR in case of error and should
305 * be handled using IS_ERR. Error return values can be:
0779726c
NM
306 * EINVAL: for bad pointer
307 * ERANGE: no match found for search
308 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
309 *
310 * Note: available is a modifier for the search. if available=true, then the
311 * match is for exact matching frequency and is available in the stored OPP
312 * table. if false, the match is for exact frequency which is not available.
313 *
314 * This provides a mechanism to enable an opp which is not available currently
315 * or the opposite as well.
316 *
317 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
318 * protected pointer. The reason for the same is that the opp pointer which is
319 * returned will remain valid for use with opp_get_{voltage, freq} only while
320 * under the locked area. The pointer returned must be used prior to unlocking
321 * with rcu_read_unlock() to maintain the integrity of the pointer.
322 */
47d43ba7
NM
323struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
324 unsigned long freq,
325 bool available)
e1f60b29
NM
326{
327 struct device_opp *dev_opp;
47d43ba7 328 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29 329
b02ded24
DT
330 opp_rcu_lockdep_assert();
331
327854c8 332 dev_opp = _find_device_opp(dev);
e1f60b29
NM
333 if (IS_ERR(dev_opp)) {
334 int r = PTR_ERR(dev_opp);
335 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
336 return ERR_PTR(r);
337 }
338
339 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
340 if (temp_opp->available == available &&
341 temp_opp->rate == freq) {
342 opp = temp_opp;
343 break;
344 }
345 }
346
347 return opp;
348}
5d4879cd 349EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
e1f60b29
NM
350
351/**
5d4879cd 352 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
e1f60b29
NM
353 * @dev: device for which we do this operation
354 * @freq: Start frequency
355 *
356 * Search for the matching ceil *available* OPP from a starting freq
357 * for a device.
358 *
984f16c8 359 * Return: matching *opp and refreshes *freq accordingly, else returns
0779726c
NM
360 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
361 * values can be:
362 * EINVAL: for bad pointer
363 * ERANGE: no match found for search
364 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
365 *
366 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
367 * protected pointer. The reason for the same is that the opp pointer which is
368 * returned will remain valid for use with opp_get_{voltage, freq} only while
369 * under the locked area. The pointer returned must be used prior to unlocking
370 * with rcu_read_unlock() to maintain the integrity of the pointer.
371 */
47d43ba7
NM
372struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
373 unsigned long *freq)
e1f60b29
NM
374{
375 struct device_opp *dev_opp;
47d43ba7 376 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29 377
b02ded24
DT
378 opp_rcu_lockdep_assert();
379
e1f60b29
NM
380 if (!dev || !freq) {
381 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
382 return ERR_PTR(-EINVAL);
383 }
384
327854c8 385 dev_opp = _find_device_opp(dev);
e1f60b29 386 if (IS_ERR(dev_opp))
0779726c 387 return ERR_CAST(dev_opp);
e1f60b29
NM
388
389 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
390 if (temp_opp->available && temp_opp->rate >= *freq) {
391 opp = temp_opp;
392 *freq = opp->rate;
393 break;
394 }
395 }
396
397 return opp;
398}
5d4879cd 399EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
e1f60b29
NM
400
401/**
5d4879cd 402 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
e1f60b29
NM
403 * @dev: device for which we do this operation
404 * @freq: Start frequency
405 *
406 * Search for the matching floor *available* OPP from a starting freq
407 * for a device.
408 *
984f16c8 409 * Return: matching *opp and refreshes *freq accordingly, else returns
0779726c
NM
410 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
411 * values can be:
412 * EINVAL: for bad pointer
413 * ERANGE: no match found for search
414 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
415 *
416 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
417 * protected pointer. The reason for the same is that the opp pointer which is
418 * returned will remain valid for use with opp_get_{voltage, freq} only while
419 * under the locked area. The pointer returned must be used prior to unlocking
420 * with rcu_read_unlock() to maintain the integrity of the pointer.
421 */
47d43ba7
NM
422struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
423 unsigned long *freq)
e1f60b29
NM
424{
425 struct device_opp *dev_opp;
47d43ba7 426 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29 427
b02ded24
DT
428 opp_rcu_lockdep_assert();
429
e1f60b29
NM
430 if (!dev || !freq) {
431 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
432 return ERR_PTR(-EINVAL);
433 }
434
327854c8 435 dev_opp = _find_device_opp(dev);
e1f60b29 436 if (IS_ERR(dev_opp))
0779726c 437 return ERR_CAST(dev_opp);
e1f60b29
NM
438
439 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
440 if (temp_opp->available) {
441 /* go to the next node, before choosing prev */
442 if (temp_opp->rate > *freq)
443 break;
444 else
445 opp = temp_opp;
446 }
447 }
448 if (!IS_ERR(opp))
449 *freq = opp->rate;
450
451 return opp;
452}
5d4879cd 453EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
e1f60b29 454
06441658
VK
455/* List-dev Helpers */
456static void _kfree_list_dev_rcu(struct rcu_head *head)
457{
458 struct device_list_opp *list_dev;
459
460 list_dev = container_of(head, struct device_list_opp, rcu_head);
461 kfree_rcu(list_dev, rcu_head);
462}
463
464static void _remove_list_dev(struct device_list_opp *list_dev,
465 struct device_opp *dev_opp)
466{
deaa5146 467 opp_debug_unregister(list_dev, dev_opp);
06441658
VK
468 list_del(&list_dev->node);
469 call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
470 _kfree_list_dev_rcu);
471}
472
f59d3ee8
VK
473struct device_list_opp *_add_list_dev(const struct device *dev,
474 struct device_opp *dev_opp)
06441658
VK
475{
476 struct device_list_opp *list_dev;
deaa5146 477 int ret;
06441658
VK
478
479 list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
480 if (!list_dev)
481 return NULL;
482
483 /* Initialize list-dev */
484 list_dev->dev = dev;
485 list_add_rcu(&list_dev->node, &dev_opp->dev_list);
486
deaa5146
VK
487 /* Create debugfs entries for the dev_opp */
488 ret = opp_debug_register(list_dev, dev_opp);
489 if (ret)
490 dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
491 __func__, ret);
492
06441658
VK
493 return list_dev;
494}
495
984f16c8 496/**
aa5f2f85 497 * _add_device_opp() - Find device OPP table or allocate a new one
984f16c8
NM
498 * @dev: device for which we do this operation
499 *
aa5f2f85
VK
500 * It tries to find an existing table first, if it couldn't find one, it
501 * allocates a new OPP table and returns that.
984f16c8
NM
502 *
503 * Return: valid device_opp pointer if success, else NULL.
504 */
327854c8 505static struct device_opp *_add_device_opp(struct device *dev)
07cce74a
VK
506{
507 struct device_opp *dev_opp;
06441658 508 struct device_list_opp *list_dev;
07cce74a 509
aa5f2f85
VK
510 /* Check for existing list for 'dev' first */
511 dev_opp = _find_device_opp(dev);
512 if (!IS_ERR(dev_opp))
513 return dev_opp;
07cce74a
VK
514
515 /*
516 * Allocate a new device OPP table. In the infrequent case where a new
517 * device is needed to be added, we pay this penalty.
518 */
519 dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
520 if (!dev_opp)
521 return NULL;
522
06441658
VK
523 INIT_LIST_HEAD(&dev_opp->dev_list);
524
525 list_dev = _add_list_dev(dev, dev_opp);
526 if (!list_dev) {
527 kfree(dev_opp);
528 return NULL;
529 }
530
07cce74a
VK
531 srcu_init_notifier_head(&dev_opp->srcu_head);
532 INIT_LIST_HEAD(&dev_opp->opp_list);
533
534 /* Secure the device list modification */
535 list_add_rcu(&dev_opp->node, &dev_opp_list);
536 return dev_opp;
537}
538
984f16c8 539/**
737002b5
VK
540 * _kfree_device_rcu() - Free device_opp RCU handler
541 * @head: RCU head
984f16c8 542 */
737002b5 543static void _kfree_device_rcu(struct rcu_head *head)
e1f60b29 544{
737002b5 545 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
6ce4184d 546
737002b5 547 kfree_rcu(device_opp, rcu_head);
e1f60b29 548}
38393409
VK
549
550/**
3bac42ca
VK
551 * _remove_device_opp() - Removes a device OPP table
552 * @dev_opp: device OPP table to be removed.
38393409 553 *
3bac42ca 554 * Removes/frees device OPP table it it doesn't contain any OPPs.
38393409 555 */
3bac42ca 556static void _remove_device_opp(struct device_opp *dev_opp)
38393409 557{
06441658
VK
558 struct device_list_opp *list_dev;
559
3bac42ca
VK
560 if (!list_empty(&dev_opp->opp_list))
561 return;
562
7de36b0a
VK
563 if (dev_opp->supported_hw)
564 return;
565
01fb4d3c
VK
566 if (dev_opp->prop_name)
567 return;
568
9f8ea969
VK
569 if (!IS_ERR_OR_NULL(dev_opp->regulator))
570 return;
571
06441658
VK
572 list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
573 node);
574
575 _remove_list_dev(list_dev, dev_opp);
576
577 /* dev_list must be empty now */
578 WARN_ON(!list_empty(&dev_opp->dev_list));
579
3bac42ca
VK
580 list_del_rcu(&dev_opp->node);
581 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
582 _kfree_device_rcu);
38393409 583}
e1f60b29 584
984f16c8
NM
585/**
586 * _kfree_opp_rcu() - Free OPP RCU handler
587 * @head: RCU head
588 */
327854c8 589static void _kfree_opp_rcu(struct rcu_head *head)
129eec55
VK
590{
591 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
592
593 kfree_rcu(opp, rcu_head);
594}
595
984f16c8
NM
596/**
597 * _opp_remove() - Remove an OPP from a table definition
598 * @dev_opp: points back to the device_opp struct this opp belongs to
599 * @opp: pointer to the OPP to remove
23dacf6d 600 * @notify: OPP_EVENT_REMOVE notification should be sent or not
984f16c8
NM
601 *
602 * This function removes an opp definition from the opp list.
603 *
604 * Locking: The internal device_opp and opp structures are RCU protected.
605 * It is assumed that the caller holds required mutex for an RCU updater
606 * strategy.
607 */
327854c8 608static void _opp_remove(struct device_opp *dev_opp,
23dacf6d 609 struct dev_pm_opp *opp, bool notify)
129eec55
VK
610{
611 /*
612 * Notify the changes in the availability of the operable
613 * frequency/voltage list.
614 */
23dacf6d
VK
615 if (notify)
616 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
deaa5146 617 opp_debug_remove_one(opp);
129eec55 618 list_del_rcu(&opp->node);
327854c8 619 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
129eec55 620
3bac42ca 621 _remove_device_opp(dev_opp);
129eec55
VK
622}
623
624/**
625 * dev_pm_opp_remove() - Remove an OPP from OPP list
626 * @dev: device for which we do this operation
627 * @freq: OPP to remove with matching 'freq'
628 *
629 * This function removes an opp from the opp list.
984f16c8
NM
630 *
631 * Locking: The internal device_opp and opp structures are RCU protected.
632 * Hence this function internally uses RCU updater strategy with mutex locks
633 * to keep the integrity of the internal data structures. Callers should ensure
634 * that this function is *NOT* called under RCU protection or in contexts where
635 * mutex cannot be locked.
129eec55
VK
636 */
637void dev_pm_opp_remove(struct device *dev, unsigned long freq)
638{
639 struct dev_pm_opp *opp;
640 struct device_opp *dev_opp;
641 bool found = false;
642
643 /* Hold our list modification lock here */
644 mutex_lock(&dev_opp_list_lock);
645
327854c8 646 dev_opp = _find_device_opp(dev);
129eec55
VK
647 if (IS_ERR(dev_opp))
648 goto unlock;
649
650 list_for_each_entry(opp, &dev_opp->opp_list, node) {
651 if (opp->rate == freq) {
652 found = true;
653 break;
654 }
655 }
656
657 if (!found) {
658 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
659 __func__, freq);
660 goto unlock;
661 }
662
23dacf6d 663 _opp_remove(dev_opp, opp, true);
129eec55
VK
664unlock:
665 mutex_unlock(&dev_opp_list_lock);
666}
667EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
668
23dacf6d
VK
669static struct dev_pm_opp *_allocate_opp(struct device *dev,
670 struct device_opp **dev_opp)
e1f60b29 671{
23dacf6d 672 struct dev_pm_opp *opp;
e1f60b29 673
23dacf6d
VK
674 /* allocate new OPP node */
675 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
676 if (!opp)
677 return NULL;
e1f60b29 678
23dacf6d 679 INIT_LIST_HEAD(&opp->node);
e1f60b29 680
23dacf6d
VK
681 *dev_opp = _add_device_opp(dev);
682 if (!*dev_opp) {
683 kfree(opp);
684 return NULL;
685 }
686
687 return opp;
688}
689
7d34d56e
VK
690static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
691 struct device_opp *dev_opp)
692{
693 struct regulator *reg = dev_opp->regulator;
694
695 if (!IS_ERR(reg) &&
696 !regulator_is_supported_voltage(reg, opp->u_volt_min,
697 opp->u_volt_max)) {
698 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
699 __func__, opp->u_volt_min, opp->u_volt_max);
700 return false;
701 }
702
703 return true;
704}
705
06441658
VK
706static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
707 struct device_opp *dev_opp)
23dacf6d
VK
708{
709 struct dev_pm_opp *opp;
710 struct list_head *head = &dev_opp->opp_list;
deaa5146 711 int ret;
23dacf6d
VK
712
713 /*
714 * Insert new OPP in order of increasing frequency and discard if
715 * already present.
716 *
717 * Need to use &dev_opp->opp_list in the condition part of the 'for'
718 * loop, don't replace it with head otherwise it will become an infinite
719 * loop.
720 */
721 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
722 if (new_opp->rate > opp->rate) {
723 head = &opp->node;
724 continue;
725 }
726
727 if (new_opp->rate < opp->rate)
728 break;
729
730 /* Duplicate OPPs */
06441658 731 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
23dacf6d
VK
732 __func__, opp->rate, opp->u_volt, opp->available,
733 new_opp->rate, new_opp->u_volt, new_opp->available);
734
735 return opp->available && new_opp->u_volt == opp->u_volt ?
736 0 : -EEXIST;
737 }
738
739 new_opp->dev_opp = dev_opp;
740 list_add_rcu(&new_opp->node, head);
741
deaa5146
VK
742 ret = opp_debug_create_one(new_opp, dev_opp);
743 if (ret)
744 dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
745 __func__, ret);
746
7d34d56e
VK
747 if (!_opp_supported_by_regulators(new_opp, dev_opp)) {
748 new_opp->available = false;
749 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
750 __func__, new_opp->rate);
751 }
752
23dacf6d
VK
753 return 0;
754}
755
984f16c8 756/**
b64b9c3f 757 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
984f16c8
NM
758 * @dev: device for which we do this operation
759 * @freq: Frequency in Hz for this OPP
760 * @u_volt: Voltage in uVolts for this OPP
761 * @dynamic: Dynamically added OPPs.
762 *
763 * This function adds an opp definition to the opp list and returns status.
764 * The opp is made available by default and it can be controlled using
765 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
766 *
8f8d37b2
VK
767 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
768 * and freed by dev_pm_opp_of_remove_table.
984f16c8
NM
769 *
770 * Locking: The internal device_opp and opp structures are RCU protected.
771 * Hence this function internally uses RCU updater strategy with mutex locks
772 * to keep the integrity of the internal data structures. Callers should ensure
773 * that this function is *NOT* called under RCU protection or in contexts where
774 * mutex cannot be locked.
775 *
776 * Return:
777 * 0 On success OR
778 * Duplicate OPPs (both freq and volt are same) and opp->available
779 * -EEXIST Freq are same and volt are different OR
780 * Duplicate OPPs (both freq and volt are same) and !opp->available
781 * -ENOMEM Memory allocation failure
782 */
b64b9c3f
VK
783static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
784 bool dynamic)
e1f60b29 785{
aa5f2f85 786 struct device_opp *dev_opp;
23dacf6d 787 struct dev_pm_opp *new_opp;
6ce4184d 788 int ret;
e1f60b29 789
e1f60b29
NM
790 /* Hold our list modification lock here */
791 mutex_lock(&dev_opp_list_lock);
792
23dacf6d
VK
793 new_opp = _allocate_opp(dev, &dev_opp);
794 if (!new_opp) {
795 ret = -ENOMEM;
796 goto unlock;
797 }
798
a7470db6 799 /* populate the opp table */
a7470db6
VK
800 new_opp->rate = freq;
801 new_opp->u_volt = u_volt;
802 new_opp->available = true;
23dacf6d 803 new_opp->dynamic = dynamic;
a7470db6 804
06441658 805 ret = _opp_add(dev, new_opp, dev_opp);
23dacf6d 806 if (ret)
6ce4184d 807 goto free_opp;
64ce8545 808
e1f60b29
NM
809 mutex_unlock(&dev_opp_list_lock);
810
03ca370f
MH
811 /*
812 * Notify the changes in the availability of the operable
813 * frequency/voltage list.
814 */
cd1a068a 815 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
e1f60b29 816 return 0;
6ce4184d
VK
817
818free_opp:
23dacf6d
VK
819 _opp_remove(dev_opp, new_opp, false);
820unlock:
6ce4184d 821 mutex_unlock(&dev_opp_list_lock);
6ce4184d 822 return ret;
e1f60b29 823}
38393409 824
27465902 825/* TODO: Support multiple regulators */
01fb4d3c
VK
826static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
827 struct device_opp *dev_opp)
27465902
VK
828{
829 u32 microvolt[3] = {0};
ad623c31 830 u32 val;
27465902 831 int count, ret;
01fb4d3c
VK
832 struct property *prop = NULL;
833 char name[NAME_MAX];
834
835 /* Search for "opp-microvolt-<name>" */
836 if (dev_opp->prop_name) {
5ff24d60
VK
837 snprintf(name, sizeof(name), "opp-microvolt-%s",
838 dev_opp->prop_name);
01fb4d3c
VK
839 prop = of_find_property(opp->np, name, NULL);
840 }
841
842 if (!prop) {
843 /* Search for "opp-microvolt" */
fd8d8e63 844 sprintf(name, "opp-microvolt");
01fb4d3c 845 prop = of_find_property(opp->np, name, NULL);
27465902 846
01fb4d3c
VK
847 /* Missing property isn't a problem, but an invalid entry is */
848 if (!prop)
849 return 0;
850 }
27465902 851
01fb4d3c 852 count = of_property_count_u32_elems(opp->np, name);
680168a5 853 if (count < 0) {
01fb4d3c
VK
854 dev_err(dev, "%s: Invalid %s property (%d)\n",
855 __func__, name, count);
680168a5
VK
856 return count;
857 }
858
27465902
VK
859 /* There can be one or three elements here */
860 if (count != 1 && count != 3) {
01fb4d3c
VK
861 dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
862 __func__, name, count);
27465902
VK
863 return -EINVAL;
864 }
865
01fb4d3c 866 ret = of_property_read_u32_array(opp->np, name, microvolt, count);
27465902 867 if (ret) {
01fb4d3c 868 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
27465902
VK
869 return -EINVAL;
870 }
871
872 opp->u_volt = microvolt[0];
873 opp->u_volt_min = microvolt[1];
874 opp->u_volt_max = microvolt[2];
875
01fb4d3c
VK
876 /* Search for "opp-microamp-<name>" */
877 prop = NULL;
878 if (dev_opp->prop_name) {
5ff24d60
VK
879 snprintf(name, sizeof(name), "opp-microamp-%s",
880 dev_opp->prop_name);
01fb4d3c
VK
881 prop = of_find_property(opp->np, name, NULL);
882 }
883
884 if (!prop) {
885 /* Search for "opp-microamp" */
fd8d8e63 886 sprintf(name, "opp-microamp");
01fb4d3c
VK
887 prop = of_find_property(opp->np, name, NULL);
888 }
889
890 if (prop && !of_property_read_u32(opp->np, name, &val))
ad623c31
VK
891 opp->u_amp = val;
892
27465902
VK
893 return 0;
894}
895
7de36b0a
VK
896/**
897 * dev_pm_opp_set_supported_hw() - Set supported platforms
898 * @dev: Device for which supported-hw has to be set.
899 * @versions: Array of hierarchy of versions to match.
900 * @count: Number of elements in the array.
901 *
902 * This is required only for the V2 bindings, and it enables a platform to
903 * specify the hierarchy of versions it supports. OPP layer will then enable
904 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
905 * property.
906 *
907 * Locking: The internal device_opp and opp structures are RCU protected.
908 * Hence this function internally uses RCU updater strategy with mutex locks
909 * to keep the integrity of the internal data structures. Callers should ensure
910 * that this function is *NOT* called under RCU protection or in contexts where
911 * mutex cannot be locked.
912 */
913int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
914 unsigned int count)
915{
916 struct device_opp *dev_opp;
917 int ret = 0;
918
919 /* Hold our list modification lock here */
920 mutex_lock(&dev_opp_list_lock);
921
922 dev_opp = _add_device_opp(dev);
923 if (!dev_opp) {
924 ret = -ENOMEM;
925 goto unlock;
926 }
927
928 /* Make sure there are no concurrent readers while updating dev_opp */
929 WARN_ON(!list_empty(&dev_opp->opp_list));
930
931 /* Do we already have a version hierarchy associated with dev_opp? */
932 if (dev_opp->supported_hw) {
933 dev_err(dev, "%s: Already have supported hardware list\n",
934 __func__);
935 ret = -EBUSY;
936 goto err;
937 }
938
939 dev_opp->supported_hw = kmemdup(versions, count * sizeof(*versions),
940 GFP_KERNEL);
941 if (!dev_opp->supported_hw) {
942 ret = -ENOMEM;
943 goto err;
944 }
945
946 dev_opp->supported_hw_count = count;
947 mutex_unlock(&dev_opp_list_lock);
948 return 0;
949
950err:
951 _remove_device_opp(dev_opp);
952unlock:
953 mutex_unlock(&dev_opp_list_lock);
954
955 return ret;
956}
957EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
958
959/**
960 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
961 * @dev: Device for which supported-hw has to be set.
962 *
963 * This is required only for the V2 bindings, and is called for a matching
964 * dev_pm_opp_set_supported_hw(). Until this is called, the device_opp structure
965 * will not be freed.
966 *
967 * Locking: The internal device_opp and opp structures are RCU protected.
968 * Hence this function internally uses RCU updater strategy with mutex locks
969 * to keep the integrity of the internal data structures. Callers should ensure
970 * that this function is *NOT* called under RCU protection or in contexts where
971 * mutex cannot be locked.
972 */
973void dev_pm_opp_put_supported_hw(struct device *dev)
974{
975 struct device_opp *dev_opp;
976
977 /* Hold our list modification lock here */
978 mutex_lock(&dev_opp_list_lock);
979
980 /* Check for existing list for 'dev' first */
981 dev_opp = _find_device_opp(dev);
982 if (IS_ERR(dev_opp)) {
983 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
984 goto unlock;
985 }
986
987 /* Make sure there are no concurrent readers while updating dev_opp */
988 WARN_ON(!list_empty(&dev_opp->opp_list));
989
990 if (!dev_opp->supported_hw) {
991 dev_err(dev, "%s: Doesn't have supported hardware list\n",
992 __func__);
993 goto unlock;
994 }
995
996 kfree(dev_opp->supported_hw);
997 dev_opp->supported_hw = NULL;
998 dev_opp->supported_hw_count = 0;
999
1000 /* Try freeing device_opp if this was the last blocking resource */
1001 _remove_device_opp(dev_opp);
1002
1003unlock:
1004 mutex_unlock(&dev_opp_list_lock);
1005}
1006EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1007
01fb4d3c
VK
1008/**
1009 * dev_pm_opp_set_prop_name() - Set prop-extn name
1010 * @dev: Device for which the regulator has to be set.
1011 * @name: name to postfix to properties.
1012 *
1013 * This is required only for the V2 bindings, and it enables a platform to
1014 * specify the extn to be used for certain property names. The properties to
1015 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1016 * should postfix the property name with -<name> while looking for them.
1017 *
1018 * Locking: The internal device_opp and opp structures are RCU protected.
1019 * Hence this function internally uses RCU updater strategy with mutex locks
1020 * to keep the integrity of the internal data structures. Callers should ensure
1021 * that this function is *NOT* called under RCU protection or in contexts where
1022 * mutex cannot be locked.
1023 */
1024int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1025{
1026 struct device_opp *dev_opp;
1027 int ret = 0;
1028
1029 /* Hold our list modification lock here */
1030 mutex_lock(&dev_opp_list_lock);
1031
1032 dev_opp = _add_device_opp(dev);
1033 if (!dev_opp) {
1034 ret = -ENOMEM;
1035 goto unlock;
1036 }
1037
1038 /* Make sure there are no concurrent readers while updating dev_opp */
1039 WARN_ON(!list_empty(&dev_opp->opp_list));
1040
1041 /* Do we already have a prop-name associated with dev_opp? */
1042 if (dev_opp->prop_name) {
1043 dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1044 dev_opp->prop_name);
1045 ret = -EBUSY;
1046 goto err;
1047 }
1048
1049 dev_opp->prop_name = kstrdup(name, GFP_KERNEL);
1050 if (!dev_opp->prop_name) {
1051 ret = -ENOMEM;
1052 goto err;
1053 }
1054
1055 mutex_unlock(&dev_opp_list_lock);
1056 return 0;
1057
1058err:
1059 _remove_device_opp(dev_opp);
1060unlock:
1061 mutex_unlock(&dev_opp_list_lock);
1062
1063 return ret;
1064}
1065EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1066
1067/**
1068 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1069 * @dev: Device for which the regulator has to be set.
1070 *
1071 * This is required only for the V2 bindings, and is called for a matching
1072 * dev_pm_opp_set_prop_name(). Until this is called, the device_opp structure
1073 * will not be freed.
1074 *
1075 * Locking: The internal device_opp and opp structures are RCU protected.
1076 * Hence this function internally uses RCU updater strategy with mutex locks
1077 * to keep the integrity of the internal data structures. Callers should ensure
1078 * that this function is *NOT* called under RCU protection or in contexts where
1079 * mutex cannot be locked.
1080 */
1081void dev_pm_opp_put_prop_name(struct device *dev)
1082{
1083 struct device_opp *dev_opp;
1084
1085 /* Hold our list modification lock here */
1086 mutex_lock(&dev_opp_list_lock);
1087
1088 /* Check for existing list for 'dev' first */
1089 dev_opp = _find_device_opp(dev);
1090 if (IS_ERR(dev_opp)) {
1091 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
1092 goto unlock;
1093 }
1094
1095 /* Make sure there are no concurrent readers while updating dev_opp */
1096 WARN_ON(!list_empty(&dev_opp->opp_list));
1097
1098 if (!dev_opp->prop_name) {
1099 dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1100 goto unlock;
1101 }
1102
1103 kfree(dev_opp->prop_name);
1104 dev_opp->prop_name = NULL;
1105
1106 /* Try freeing device_opp if this was the last blocking resource */
1107 _remove_device_opp(dev_opp);
1108
1109unlock:
1110 mutex_unlock(&dev_opp_list_lock);
1111}
1112EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1113
9f8ea969
VK
1114/**
1115 * dev_pm_opp_set_regulator() - Set regulator name for the device
1116 * @dev: Device for which regulator name is being set.
1117 * @name: Name of the regulator.
1118 *
1119 * In order to support OPP switching, OPP layer needs to know the name of the
1120 * device's regulator, as the core would be required to switch voltages as well.
1121 *
1122 * This must be called before any OPPs are initialized for the device.
1123 *
1124 * Locking: The internal device_opp and opp structures are RCU protected.
1125 * Hence this function internally uses RCU updater strategy with mutex locks
1126 * to keep the integrity of the internal data structures. Callers should ensure
1127 * that this function is *NOT* called under RCU protection or in contexts where
1128 * mutex cannot be locked.
1129 */
1130int dev_pm_opp_set_regulator(struct device *dev, const char *name)
1131{
1132 struct device_opp *dev_opp;
1133 struct regulator *reg;
1134 int ret;
1135
1136 mutex_lock(&dev_opp_list_lock);
1137
1138 dev_opp = _add_device_opp(dev);
1139 if (!dev_opp) {
1140 ret = -ENOMEM;
1141 goto unlock;
1142 }
1143
1144 /* This should be called before OPPs are initialized */
1145 if (WARN_ON(!list_empty(&dev_opp->opp_list))) {
1146 ret = -EBUSY;
1147 goto err;
1148 }
1149
1150 /* Already have a regulator set */
1151 if (WARN_ON(!IS_ERR_OR_NULL(dev_opp->regulator))) {
1152 ret = -EBUSY;
1153 goto err;
1154 }
1155 /* Allocate the regulator */
1156 reg = regulator_get_optional(dev, name);
1157 if (IS_ERR(reg)) {
1158 ret = PTR_ERR(reg);
1159 if (ret != -EPROBE_DEFER)
1160 dev_err(dev, "%s: no regulator (%s) found: %d\n",
1161 __func__, name, ret);
1162 goto err;
1163 }
1164
1165 dev_opp->regulator = reg;
1166
1167 mutex_unlock(&dev_opp_list_lock);
1168 return 0;
1169
1170err:
1171 _remove_device_opp(dev_opp);
1172unlock:
1173 mutex_unlock(&dev_opp_list_lock);
1174
1175 return ret;
1176}
1177EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
1178
1179/**
1180 * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
1181 * @dev: Device for which regulator was set.
1182 *
1183 * Locking: The internal device_opp and opp structures are RCU protected.
1184 * Hence this function internally uses RCU updater strategy with mutex locks
1185 * to keep the integrity of the internal data structures. Callers should ensure
1186 * that this function is *NOT* called under RCU protection or in contexts where
1187 * mutex cannot be locked.
1188 */
1189void dev_pm_opp_put_regulator(struct device *dev)
1190{
1191 struct device_opp *dev_opp;
1192
1193 mutex_lock(&dev_opp_list_lock);
1194
1195 /* Check for existing list for 'dev' first */
1196 dev_opp = _find_device_opp(dev);
1197 if (IS_ERR(dev_opp)) {
1198 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
1199 goto unlock;
1200 }
1201
1202 if (IS_ERR_OR_NULL(dev_opp->regulator)) {
1203 dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
1204 goto unlock;
1205 }
1206
1207 /* Make sure there are no concurrent readers while updating dev_opp */
1208 WARN_ON(!list_empty(&dev_opp->opp_list));
1209
1210 regulator_put(dev_opp->regulator);
1211 dev_opp->regulator = ERR_PTR(-EINVAL);
1212
1213 /* Try freeing device_opp if this was the last blocking resource */
1214 _remove_device_opp(dev_opp);
1215
1216unlock:
1217 mutex_unlock(&dev_opp_list_lock);
1218}
1219EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
1220
7de36b0a
VK
1221static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
1222 struct device_node *np)
1223{
1224 unsigned int count = dev_opp->supported_hw_count;
1225 u32 version;
1226 int ret;
1227
1228 if (!dev_opp->supported_hw)
1229 return true;
1230
1231 while (count--) {
1232 ret = of_property_read_u32_index(np, "opp-supported-hw", count,
1233 &version);
1234 if (ret) {
1235 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
1236 __func__, count, ret);
1237 return false;
1238 }
1239
1240 /* Both of these are bitwise masks of the versions */
1241 if (!(version & dev_opp->supported_hw[count]))
1242 return false;
1243 }
1244
1245 return true;
1246}
1247
27465902
VK
1248/**
1249 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
1250 * @dev: device for which we do this operation
1251 * @np: device node
1252 *
1253 * This function adds an opp definition to the opp list and returns status. The
1254 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
1255 * removed by dev_pm_opp_remove.
1256 *
1257 * Locking: The internal device_opp and opp structures are RCU protected.
1258 * Hence this function internally uses RCU updater strategy with mutex locks
1259 * to keep the integrity of the internal data structures. Callers should ensure
1260 * that this function is *NOT* called under RCU protection or in contexts where
1261 * mutex cannot be locked.
1262 *
1263 * Return:
1264 * 0 On success OR
1265 * Duplicate OPPs (both freq and volt are same) and opp->available
1266 * -EEXIST Freq are same and volt are different OR
1267 * Duplicate OPPs (both freq and volt are same) and !opp->available
1268 * -ENOMEM Memory allocation failure
1269 * -EINVAL Failed parsing the OPP node
1270 */
1271static int _opp_add_static_v2(struct device *dev, struct device_node *np)
1272{
1273 struct device_opp *dev_opp;
1274 struct dev_pm_opp *new_opp;
1275 u64 rate;
68fa9f0a 1276 u32 val;
27465902
VK
1277 int ret;
1278
1279 /* Hold our list modification lock here */
1280 mutex_lock(&dev_opp_list_lock);
1281
1282 new_opp = _allocate_opp(dev, &dev_opp);
1283 if (!new_opp) {
1284 ret = -ENOMEM;
1285 goto unlock;
1286 }
1287
1288 ret = of_property_read_u64(np, "opp-hz", &rate);
1289 if (ret < 0) {
1290 dev_err(dev, "%s: opp-hz not found\n", __func__);
1291 goto free_opp;
1292 }
1293
7de36b0a
VK
1294 /* Check if the OPP supports hardware's hierarchy of versions or not */
1295 if (!_opp_is_supported(dev, dev_opp, np)) {
1296 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
1297 goto free_opp;
1298 }
1299
27465902
VK
1300 /*
1301 * Rate is defined as an unsigned long in clk API, and so casting
1302 * explicitly to its type. Must be fixed once rate is 64 bit
1303 * guaranteed in clk API.
1304 */
1305 new_opp->rate = (unsigned long)rate;
1306 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
1307
1308 new_opp->np = np;
1309 new_opp->dynamic = false;
1310 new_opp->available = true;
68fa9f0a
VK
1311
1312 if (!of_property_read_u32(np, "clock-latency-ns", &val))
1313 new_opp->clock_latency_ns = val;
27465902 1314
01fb4d3c 1315 ret = opp_parse_supplies(new_opp, dev, dev_opp);
27465902
VK
1316 if (ret)
1317 goto free_opp;
1318
06441658 1319 ret = _opp_add(dev, new_opp, dev_opp);
27465902
VK
1320 if (ret)
1321 goto free_opp;
1322
ad656a6a
VK
1323 /* OPP to select on device suspend */
1324 if (of_property_read_bool(np, "opp-suspend")) {
deaa5146 1325 if (dev_opp->suspend_opp) {
ad656a6a
VK
1326 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
1327 __func__, dev_opp->suspend_opp->rate,
1328 new_opp->rate);
deaa5146
VK
1329 } else {
1330 new_opp->suspend = true;
ad656a6a 1331 dev_opp->suspend_opp = new_opp;
deaa5146 1332 }
ad656a6a
VK
1333 }
1334
3ca9bb33
VK
1335 if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
1336 dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
1337
27465902
VK
1338 mutex_unlock(&dev_opp_list_lock);
1339
3ca9bb33 1340 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
27465902 1341 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
3ca9bb33
VK
1342 new_opp->u_volt_min, new_opp->u_volt_max,
1343 new_opp->clock_latency_ns);
27465902
VK
1344
1345 /*
1346 * Notify the changes in the availability of the operable
1347 * frequency/voltage list.
1348 */
1349 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
1350 return 0;
1351
1352free_opp:
1353 _opp_remove(dev_opp, new_opp, false);
1354unlock:
1355 mutex_unlock(&dev_opp_list_lock);
1356 return ret;
1357}
1358
38393409
VK
1359/**
1360 * dev_pm_opp_add() - Add an OPP table from a table definitions
1361 * @dev: device for which we do this operation
1362 * @freq: Frequency in Hz for this OPP
1363 * @u_volt: Voltage in uVolts for this OPP
1364 *
1365 * This function adds an opp definition to the opp list and returns status.
1366 * The opp is made available by default and it can be controlled using
1367 * dev_pm_opp_enable/disable functions.
1368 *
1369 * Locking: The internal device_opp and opp structures are RCU protected.
1370 * Hence this function internally uses RCU updater strategy with mutex locks
1371 * to keep the integrity of the internal data structures. Callers should ensure
1372 * that this function is *NOT* called under RCU protection or in contexts where
1373 * mutex cannot be locked.
1374 *
1375 * Return:
984f16c8 1376 * 0 On success OR
38393409 1377 * Duplicate OPPs (both freq and volt are same) and opp->available
984f16c8 1378 * -EEXIST Freq are same and volt are different OR
38393409 1379 * Duplicate OPPs (both freq and volt are same) and !opp->available
984f16c8 1380 * -ENOMEM Memory allocation failure
38393409
VK
1381 */
1382int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1383{
b64b9c3f 1384 return _opp_add_v1(dev, freq, u_volt, true);
38393409 1385}
5d4879cd 1386EXPORT_SYMBOL_GPL(dev_pm_opp_add);
e1f60b29
NM
1387
1388/**
327854c8 1389 * _opp_set_availability() - helper to set the availability of an opp
e1f60b29
NM
1390 * @dev: device for which we do this operation
1391 * @freq: OPP frequency to modify availability
1392 * @availability_req: availability status requested for this opp
1393 *
1394 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1395 * share a common logic which is isolated here.
1396 *
984f16c8 1397 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1a2d49c 1398 * copy operation, returns 0 if no modification was done OR modification was
e1f60b29
NM
1399 * successful.
1400 *
1401 * Locking: The internal device_opp and opp structures are RCU protected.
1402 * Hence this function internally uses RCU updater strategy with mutex locks to
1403 * keep the integrity of the internal data structures. Callers should ensure
1404 * that this function is *NOT* called under RCU protection or in contexts where
1405 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1406 */
327854c8
NM
1407static int _opp_set_availability(struct device *dev, unsigned long freq,
1408 bool availability_req)
e1f60b29 1409{
29df0ee1 1410 struct device_opp *dev_opp;
47d43ba7 1411 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
e1f60b29
NM
1412 int r = 0;
1413
1414 /* keep the node allocated */
47d43ba7 1415 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
59d84ca8 1416 if (!new_opp)
e1f60b29 1417 return -ENOMEM;
e1f60b29
NM
1418
1419 mutex_lock(&dev_opp_list_lock);
1420
1421 /* Find the device_opp */
327854c8 1422 dev_opp = _find_device_opp(dev);
e1f60b29
NM
1423 if (IS_ERR(dev_opp)) {
1424 r = PTR_ERR(dev_opp);
1425 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1426 goto unlock;
1427 }
1428
1429 /* Do we have the frequency? */
1430 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
1431 if (tmp_opp->rate == freq) {
1432 opp = tmp_opp;
1433 break;
1434 }
1435 }
1436 if (IS_ERR(opp)) {
1437 r = PTR_ERR(opp);
1438 goto unlock;
1439 }
1440
1441 /* Is update really needed? */
1442 if (opp->available == availability_req)
1443 goto unlock;
1444 /* copy the old data over */
1445 *new_opp = *opp;
1446
1447 /* plug in new node */
1448 new_opp->available = availability_req;
1449
1450 list_replace_rcu(&opp->node, &new_opp->node);
1451 mutex_unlock(&dev_opp_list_lock);
327854c8 1452 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
e1f60b29 1453
03ca370f
MH
1454 /* Notify the change of the OPP availability */
1455 if (availability_req)
cd1a068a 1456 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
03ca370f
MH
1457 new_opp);
1458 else
cd1a068a 1459 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
03ca370f
MH
1460 new_opp);
1461
dde8437d 1462 return 0;
e1f60b29
NM
1463
1464unlock:
1465 mutex_unlock(&dev_opp_list_lock);
e1f60b29
NM
1466 kfree(new_opp);
1467 return r;
1468}
1469
1470/**
5d4879cd 1471 * dev_pm_opp_enable() - Enable a specific OPP
e1f60b29
NM
1472 * @dev: device for which we do this operation
1473 * @freq: OPP frequency to enable
1474 *
1475 * Enables a provided opp. If the operation is valid, this returns 0, else the
1476 * corresponding error value. It is meant to be used for users an OPP available
5d4879cd 1477 * after being temporarily made unavailable with dev_pm_opp_disable.
e1f60b29
NM
1478 *
1479 * Locking: The internal device_opp and opp structures are RCU protected.
1480 * Hence this function indirectly uses RCU and mutex locks to keep the
1481 * integrity of the internal data structures. Callers should ensure that
1482 * this function is *NOT* called under RCU protection or in contexts where
1483 * mutex locking or synchronize_rcu() blocking calls cannot be used.
984f16c8
NM
1484 *
1485 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1a2d49c 1486 * copy operation, returns 0 if no modification was done OR modification was
984f16c8 1487 * successful.
e1f60b29 1488 */
5d4879cd 1489int dev_pm_opp_enable(struct device *dev, unsigned long freq)
e1f60b29 1490{
327854c8 1491 return _opp_set_availability(dev, freq, true);
e1f60b29 1492}
5d4879cd 1493EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
e1f60b29
NM
1494
1495/**
5d4879cd 1496 * dev_pm_opp_disable() - Disable a specific OPP
e1f60b29
NM
1497 * @dev: device for which we do this operation
1498 * @freq: OPP frequency to disable
1499 *
1500 * Disables a provided opp. If the operation is valid, this returns
1501 * 0, else the corresponding error value. It is meant to be a temporary
1502 * control by users to make this OPP not available until the circumstances are
5d4879cd 1503 * right to make it available again (with a call to dev_pm_opp_enable).
e1f60b29
NM
1504 *
1505 * Locking: The internal device_opp and opp structures are RCU protected.
1506 * Hence this function indirectly uses RCU and mutex locks to keep the
1507 * integrity of the internal data structures. Callers should ensure that
1508 * this function is *NOT* called under RCU protection or in contexts where
1509 * mutex locking or synchronize_rcu() blocking calls cannot be used.
984f16c8
NM
1510 *
1511 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1a2d49c 1512 * copy operation, returns 0 if no modification was done OR modification was
984f16c8 1513 * successful.
e1f60b29 1514 */
5d4879cd 1515int dev_pm_opp_disable(struct device *dev, unsigned long freq)
e1f60b29 1516{
327854c8 1517 return _opp_set_availability(dev, freq, false);
e1f60b29 1518}
5d4879cd 1519EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
e1f60b29 1520
03ca370f 1521/**
5d4879cd 1522 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
03ca370f 1523 * @dev: device pointer used to lookup device OPPs.
984f16c8
NM
1524 *
1525 * Return: pointer to notifier head if found, otherwise -ENODEV or
1526 * -EINVAL based on type of error casted as pointer. value must be checked
1527 * with IS_ERR to determine valid pointer or error result.
1528 *
1529 * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
1530 * protected pointer. The reason for the same is that the opp pointer which is
1531 * returned will remain valid for use with opp_get_{voltage, freq} only while
1532 * under the locked area. The pointer returned must be used prior to unlocking
1533 * with rcu_read_unlock() to maintain the integrity of the pointer.
03ca370f 1534 */
5d4879cd 1535struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
03ca370f 1536{
327854c8 1537 struct device_opp *dev_opp = _find_device_opp(dev);
03ca370f
MH
1538
1539 if (IS_ERR(dev_opp))
156acb16 1540 return ERR_CAST(dev_opp); /* matching type */
03ca370f 1541
cd1a068a 1542 return &dev_opp->srcu_head;
03ca370f 1543}
4679ec37 1544EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
b496dfbc
SG
1545
1546#ifdef CONFIG_OF
1547/**
8f8d37b2
VK
1548 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
1549 * entries
b496dfbc
SG
1550 * @dev: device pointer used to lookup device OPPs.
1551 *
737002b5 1552 * Free OPPs created using static entries present in DT.
984f16c8
NM
1553 *
1554 * Locking: The internal device_opp and opp structures are RCU protected.
1555 * Hence this function indirectly uses RCU updater strategy with mutex locks
1556 * to keep the integrity of the internal data structures. Callers should ensure
1557 * that this function is *NOT* called under RCU protection or in contexts where
1558 * mutex cannot be locked.
b496dfbc 1559 */
8f8d37b2 1560void dev_pm_opp_of_remove_table(struct device *dev)
737002b5
VK
1561{
1562 struct device_opp *dev_opp;
1563 struct dev_pm_opp *opp, *tmp;
1564
06441658
VK
1565 /* Hold our list modification lock here */
1566 mutex_lock(&dev_opp_list_lock);
1567
737002b5
VK
1568 /* Check for existing list for 'dev' */
1569 dev_opp = _find_device_opp(dev);
1570 if (IS_ERR(dev_opp)) {
1571 int error = PTR_ERR(dev_opp);
1572
1573 if (error != -ENODEV)
1574 WARN(1, "%s: dev_opp: %d\n",
1575 IS_ERR_OR_NULL(dev) ?
1576 "Invalid device" : dev_name(dev),
1577 error);
06441658 1578 goto unlock;
737002b5
VK
1579 }
1580
06441658
VK
1581 /* Find if dev_opp manages a single device */
1582 if (list_is_singular(&dev_opp->dev_list)) {
1583 /* Free static OPPs */
1584 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
1585 if (!opp->dynamic)
1586 _opp_remove(dev_opp, opp, true);
1587 }
1588 } else {
1589 _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
737002b5
VK
1590 }
1591
06441658 1592unlock:
737002b5
VK
1593 mutex_unlock(&dev_opp_list_lock);
1594}
8f8d37b2 1595EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
737002b5 1596
1840995c 1597/* Returns opp descriptor node for a device, caller must do of_node_put() */
f59d3ee8 1598struct device_node *_of_get_opp_desc_node(struct device *dev)
8d4d4e98 1599{
8d4d4e98
VK
1600 /*
1601 * TODO: Support for multiple OPP tables.
1602 *
1603 * There should be only ONE phandle present in "operating-points-v2"
1604 * property.
1605 */
8d4d4e98 1606
1840995c 1607 return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
8d4d4e98
VK
1608}
1609
27465902 1610/* Initializes OPP tables based on new bindings */
f0489a5e 1611static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
27465902 1612{
1840995c 1613 struct device_node *np;
06441658 1614 struct device_opp *dev_opp;
27465902
VK
1615 int ret = 0, count = 0;
1616
4a3a1353
VK
1617 mutex_lock(&dev_opp_list_lock);
1618
06441658
VK
1619 dev_opp = _managed_opp(opp_np);
1620 if (dev_opp) {
1621 /* OPPs are already managed */
1622 if (!_add_list_dev(dev, dev_opp))
1623 ret = -ENOMEM;
4a3a1353 1624 mutex_unlock(&dev_opp_list_lock);
1840995c 1625 return ret;
06441658 1626 }
4a3a1353 1627 mutex_unlock(&dev_opp_list_lock);
06441658 1628
27465902
VK
1629 /* We have opp-list node now, iterate over it and add OPPs */
1630 for_each_available_child_of_node(opp_np, np) {
1631 count++;
1632
1633 ret = _opp_add_static_v2(dev, np);
1634 if (ret) {
1635 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1636 ret);
1f821ed7 1637 goto free_table;
27465902
VK
1638 }
1639 }
1640
1641 /* There should be one of more OPP defined */
1840995c
VK
1642 if (WARN_ON(!count))
1643 return -ENOENT;
27465902 1644
4a3a1353
VK
1645 mutex_lock(&dev_opp_list_lock);
1646
1f821ed7
VK
1647 dev_opp = _find_device_opp(dev);
1648 if (WARN_ON(IS_ERR(dev_opp))) {
1649 ret = PTR_ERR(dev_opp);
4a3a1353 1650 mutex_unlock(&dev_opp_list_lock);
1f821ed7 1651 goto free_table;
06441658 1652 }
27465902 1653
1f821ed7
VK
1654 dev_opp->np = opp_np;
1655 dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
1656
4a3a1353
VK
1657 mutex_unlock(&dev_opp_list_lock);
1658
1f821ed7
VK
1659 return 0;
1660
1661free_table:
8f8d37b2 1662 dev_pm_opp_of_remove_table(dev);
27465902
VK
1663
1664 return ret;
1665}
1666
1667/* Initializes OPP tables based on old-deprecated bindings */
f0489a5e 1668static int _of_add_opp_table_v1(struct device *dev)
b496dfbc
SG
1669{
1670 const struct property *prop;
1671 const __be32 *val;
1672 int nr;
1673
1674 prop = of_find_property(dev->of_node, "operating-points", NULL);
1675 if (!prop)
1676 return -ENODEV;
1677 if (!prop->value)
1678 return -ENODATA;
1679
1680 /*
1681 * Each OPP is a set of tuples consisting of frequency and
1682 * voltage like <freq-kHz vol-uV>.
1683 */
1684 nr = prop->length / sizeof(u32);
1685 if (nr % 2) {
1686 dev_err(dev, "%s: Invalid OPP list\n", __func__);
1687 return -EINVAL;
1688 }
1689
1690 val = prop->value;
1691 while (nr) {
1692 unsigned long freq = be32_to_cpup(val++) * 1000;
1693 unsigned long volt = be32_to_cpup(val++);
1694
b64b9c3f 1695 if (_opp_add_v1(dev, freq, volt, false))
b496dfbc
SG
1696 dev_warn(dev, "%s: Failed to add OPP %ld\n",
1697 __func__, freq);
b496dfbc
SG
1698 nr -= 2;
1699 }
1700
1701 return 0;
1702}
129eec55
VK
1703
1704/**
8f8d37b2 1705 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
129eec55
VK
1706 * @dev: device pointer used to lookup device OPPs.
1707 *
27465902 1708 * Register the initial OPP table with the OPP library for given device.
984f16c8
NM
1709 *
1710 * Locking: The internal device_opp and opp structures are RCU protected.
1711 * Hence this function indirectly uses RCU updater strategy with mutex locks
1712 * to keep the integrity of the internal data structures. Callers should ensure
1713 * that this function is *NOT* called under RCU protection or in contexts where
1714 * mutex cannot be locked.
27465902
VK
1715 *
1716 * Return:
1717 * 0 On success OR
1718 * Duplicate OPPs (both freq and volt are same) and opp->available
1719 * -EEXIST Freq are same and volt are different OR
1720 * Duplicate OPPs (both freq and volt are same) and !opp->available
1721 * -ENOMEM Memory allocation failure
1722 * -ENODEV when 'operating-points' property is not found or is invalid data
1723 * in device node.
1724 * -ENODATA when empty 'operating-points' property is found
1725 * -EINVAL when invalid entries are found in opp-v2 table
129eec55 1726 */
8f8d37b2 1727int dev_pm_opp_of_add_table(struct device *dev)
129eec55 1728{
1840995c
VK
1729 struct device_node *opp_np;
1730 int ret;
27465902
VK
1731
1732 /*
1733 * OPPs have two version of bindings now. The older one is deprecated,
1734 * try for the new binding first.
1735 */
1840995c
VK
1736 opp_np = _of_get_opp_desc_node(dev);
1737 if (!opp_np) {
27465902
VK
1738 /*
1739 * Try old-deprecated bindings for backward compatibility with
1740 * older dtbs.
1741 */
f0489a5e 1742 return _of_add_opp_table_v1(dev);
8d4d4e98
VK
1743 }
1744
f0489a5e 1745 ret = _of_add_opp_table_v2(dev, opp_np);
1840995c 1746 of_node_put(opp_np);
8d4d4e98 1747
8d4d4e98
VK
1748 return ret;
1749}
8f8d37b2 1750EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
b496dfbc 1751#endif
This page took 0.400079 seconds and 5 git commands to generate.