PM / OPP: Use snprintf() instead of sprintf()
[deliverable/linux.git] / drivers / base / power / opp / core.c
CommitLineData
e1f60b29
NM
1/*
2 * Generic OPP Interface
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
d6d2a528
VK
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
e1f60b29
NM
16#include <linux/errno.h>
17#include <linux/err.h>
e1f60b29 18#include <linux/slab.h>
51990e82 19#include <linux/device.h>
b496dfbc 20#include <linux/of.h>
80126ce7 21#include <linux/export.h>
e1f60b29 22
f59d3ee8 23#include "opp.h"
e1f60b29
NM
24
25/*
26 * The root of the list of all devices. All device_opp structures branch off
27 * from here, with each device_opp containing the list of opp it supports in
28 * various states of availability.
29 */
30static LIST_HEAD(dev_opp_list);
31/* Lock to allow exclusive modification to the device and opp lists */
87b4115d 32DEFINE_MUTEX(dev_opp_list_lock);
e1f60b29 33
b02ded24
DT
34#define opp_rcu_lockdep_assert() \
35do { \
f78f5b90
PM
36 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
37 !lockdep_is_held(&dev_opp_list_lock), \
b02ded24
DT
38 "Missing rcu_read_lock() or " \
39 "dev_opp_list_lock protection"); \
40} while (0)
41
06441658
VK
42static struct device_list_opp *_find_list_dev(const struct device *dev,
43 struct device_opp *dev_opp)
44{
45 struct device_list_opp *list_dev;
46
47 list_for_each_entry(list_dev, &dev_opp->dev_list, node)
48 if (list_dev->dev == dev)
49 return list_dev;
50
51 return NULL;
52}
53
54static struct device_opp *_managed_opp(const struct device_node *np)
55{
56 struct device_opp *dev_opp;
57
58 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
59 if (dev_opp->np == np) {
60 /*
61 * Multiple devices can point to the same OPP table and
62 * so will have same node-pointer, np.
63 *
64 * But the OPPs will be considered as shared only if the
65 * OPP table contains a "opp-shared" property.
66 */
67 return dev_opp->shared_opp ? dev_opp : NULL;
68 }
69 }
70
71 return NULL;
72}
73
e1f60b29 74/**
327854c8 75 * _find_device_opp() - find device_opp struct using device pointer
e1f60b29
NM
76 * @dev: device pointer used to lookup device OPPs
77 *
78 * Search list of device OPPs for one containing matching device. Does a RCU
79 * reader operation to grab the pointer needed.
80 *
984f16c8 81 * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
e1f60b29
NM
82 * -EINVAL based on type of error.
83 *
0597e818
VK
84 * Locking: For readers, this function must be called under rcu_read_lock().
85 * device_opp is a RCU protected pointer, which means that device_opp is valid
86 * as long as we are under RCU lock.
87 *
88 * For Writers, this function must be called with dev_opp_list_lock held.
e1f60b29 89 */
f59d3ee8 90struct device_opp *_find_device_opp(struct device *dev)
e1f60b29 91{
06441658 92 struct device_opp *dev_opp;
e1f60b29 93
0597e818
VK
94 opp_rcu_lockdep_assert();
95
50a3cb04 96 if (IS_ERR_OR_NULL(dev)) {
e1f60b29
NM
97 pr_err("%s: Invalid parameters\n", __func__);
98 return ERR_PTR(-EINVAL);
99 }
100
06441658
VK
101 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
102 if (_find_list_dev(dev, dev_opp))
103 return dev_opp;
e1f60b29 104
06441658 105 return ERR_PTR(-ENODEV);
e1f60b29
NM
106}
107
108/**
d6d00742 109 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
e1f60b29
NM
110 * @opp: opp for which voltage has to be returned for
111 *
984f16c8 112 * Return: voltage in micro volt corresponding to the opp, else
e1f60b29
NM
113 * return 0
114 *
115 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
116 * protected pointer. This means that opp which could have been fetched by
117 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
118 * under RCU lock. The pointer returned by the opp_find_freq family must be
119 * used in the same section as the usage of this function with the pointer
120 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
121 * pointer.
122 */
47d43ba7 123unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
e1f60b29 124{
47d43ba7 125 struct dev_pm_opp *tmp_opp;
e1f60b29
NM
126 unsigned long v = 0;
127
04bf1c7f
KK
128 opp_rcu_lockdep_assert();
129
e1f60b29 130 tmp_opp = rcu_dereference(opp);
d6d00742 131 if (IS_ERR_OR_NULL(tmp_opp))
e1f60b29
NM
132 pr_err("%s: Invalid parameters\n", __func__);
133 else
134 v = tmp_opp->u_volt;
135
136 return v;
137}
5d4879cd 138EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
e1f60b29
NM
139
140/**
5d4879cd 141 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
e1f60b29
NM
142 * @opp: opp for which frequency has to be returned for
143 *
984f16c8 144 * Return: frequency in hertz corresponding to the opp, else
e1f60b29
NM
145 * return 0
146 *
147 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
148 * protected pointer. This means that opp which could have been fetched by
149 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
150 * under RCU lock. The pointer returned by the opp_find_freq family must be
151 * used in the same section as the usage of this function with the pointer
152 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
153 * pointer.
154 */
47d43ba7 155unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
e1f60b29 156{
47d43ba7 157 struct dev_pm_opp *tmp_opp;
e1f60b29
NM
158 unsigned long f = 0;
159
04bf1c7f
KK
160 opp_rcu_lockdep_assert();
161
e1f60b29 162 tmp_opp = rcu_dereference(opp);
50a3cb04 163 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
e1f60b29
NM
164 pr_err("%s: Invalid parameters\n", __func__);
165 else
166 f = tmp_opp->rate;
167
168 return f;
169}
5d4879cd 170EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
e1f60b29 171
19445b25
BZ
172/**
173 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
174 * @opp: opp for which turbo mode is being verified
175 *
176 * Turbo OPPs are not for normal use, and can be enabled (under certain
177 * conditions) for short duration of times to finish high throughput work
178 * quickly. Running on them for longer times may overheat the chip.
179 *
180 * Return: true if opp is turbo opp, else false.
181 *
182 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
183 * protected pointer. This means that opp which could have been fetched by
184 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
185 * under RCU lock. The pointer returned by the opp_find_freq family must be
186 * used in the same section as the usage of this function with the pointer
187 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
188 * pointer.
189 */
190bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
191{
192 struct dev_pm_opp *tmp_opp;
193
194 opp_rcu_lockdep_assert();
195
196 tmp_opp = rcu_dereference(opp);
197 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
198 pr_err("%s: Invalid parameters\n", __func__);
199 return false;
200 }
201
202 return tmp_opp->turbo;
203}
204EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
205
3ca9bb33
VK
206/**
207 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
208 * @dev: device for which we do this operation
209 *
210 * Return: This function returns the max clock latency in nanoseconds.
211 *
212 * Locking: This function takes rcu_read_lock().
213 */
214unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
215{
216 struct device_opp *dev_opp;
217 unsigned long clock_latency_ns;
218
219 rcu_read_lock();
220
221 dev_opp = _find_device_opp(dev);
222 if (IS_ERR(dev_opp))
223 clock_latency_ns = 0;
224 else
225 clock_latency_ns = dev_opp->clock_latency_ns_max;
226
227 rcu_read_unlock();
228 return clock_latency_ns;
229}
230EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
231
4eafbd15
BZ
232/**
233 * dev_pm_opp_get_suspend_opp() - Get suspend opp
234 * @dev: device for which we do this operation
235 *
236 * Return: This function returns pointer to the suspend opp if it is
1b2b90cb 237 * defined and available, otherwise it returns NULL.
4eafbd15
BZ
238 *
239 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
240 * protected pointer. The reason for the same is that the opp pointer which is
241 * returned will remain valid for use with opp_get_{voltage, freq} only while
242 * under the locked area. The pointer returned must be used prior to unlocking
243 * with rcu_read_unlock() to maintain the integrity of the pointer.
244 */
245struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
246{
247 struct device_opp *dev_opp;
4eafbd15
BZ
248
249 opp_rcu_lockdep_assert();
250
251 dev_opp = _find_device_opp(dev);
1b2b90cb
VK
252 if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
253 !dev_opp->suspend_opp->available)
254 return NULL;
4eafbd15 255
1b2b90cb 256 return dev_opp->suspend_opp;
4eafbd15
BZ
257}
258EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
259
e1f60b29 260/**
5d4879cd 261 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
e1f60b29
NM
262 * @dev: device for which we do this operation
263 *
984f16c8 264 * Return: This function returns the number of available opps if there are any,
e1f60b29
NM
265 * else returns 0 if none or the corresponding error value.
266 *
b4718c02 267 * Locking: This function takes rcu_read_lock().
e1f60b29 268 */
5d4879cd 269int dev_pm_opp_get_opp_count(struct device *dev)
e1f60b29
NM
270{
271 struct device_opp *dev_opp;
47d43ba7 272 struct dev_pm_opp *temp_opp;
e1f60b29
NM
273 int count = 0;
274
b4718c02 275 rcu_read_lock();
b02ded24 276
327854c8 277 dev_opp = _find_device_opp(dev);
e1f60b29 278 if (IS_ERR(dev_opp)) {
b4718c02
DT
279 count = PTR_ERR(dev_opp);
280 dev_err(dev, "%s: device OPP not found (%d)\n",
281 __func__, count);
282 goto out_unlock;
e1f60b29
NM
283 }
284
285 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
286 if (temp_opp->available)
287 count++;
288 }
289
b4718c02
DT
290out_unlock:
291 rcu_read_unlock();
e1f60b29
NM
292 return count;
293}
5d4879cd 294EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
e1f60b29
NM
295
296/**
5d4879cd 297 * dev_pm_opp_find_freq_exact() - search for an exact frequency
e1f60b29
NM
298 * @dev: device for which we do this operation
299 * @freq: frequency to search for
7ae49618 300 * @available: true/false - match for available opp
e1f60b29 301 *
984f16c8
NM
302 * Return: Searches for exact match in the opp list and returns pointer to the
303 * matching opp if found, else returns ERR_PTR in case of error and should
304 * be handled using IS_ERR. Error return values can be:
0779726c
NM
305 * EINVAL: for bad pointer
306 * ERANGE: no match found for search
307 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
308 *
309 * Note: available is a modifier for the search. if available=true, then the
310 * match is for exact matching frequency and is available in the stored OPP
311 * table. if false, the match is for exact frequency which is not available.
312 *
313 * This provides a mechanism to enable an opp which is not available currently
314 * or the opposite as well.
315 *
316 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
317 * protected pointer. The reason for the same is that the opp pointer which is
318 * returned will remain valid for use with opp_get_{voltage, freq} only while
319 * under the locked area. The pointer returned must be used prior to unlocking
320 * with rcu_read_unlock() to maintain the integrity of the pointer.
321 */
47d43ba7
NM
322struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
323 unsigned long freq,
324 bool available)
e1f60b29
NM
325{
326 struct device_opp *dev_opp;
47d43ba7 327 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29 328
b02ded24
DT
329 opp_rcu_lockdep_assert();
330
327854c8 331 dev_opp = _find_device_opp(dev);
e1f60b29
NM
332 if (IS_ERR(dev_opp)) {
333 int r = PTR_ERR(dev_opp);
334 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
335 return ERR_PTR(r);
336 }
337
338 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
339 if (temp_opp->available == available &&
340 temp_opp->rate == freq) {
341 opp = temp_opp;
342 break;
343 }
344 }
345
346 return opp;
347}
5d4879cd 348EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
e1f60b29
NM
349
350/**
5d4879cd 351 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
e1f60b29
NM
352 * @dev: device for which we do this operation
353 * @freq: Start frequency
354 *
355 * Search for the matching ceil *available* OPP from a starting freq
356 * for a device.
357 *
984f16c8 358 * Return: matching *opp and refreshes *freq accordingly, else returns
0779726c
NM
359 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
360 * values can be:
361 * EINVAL: for bad pointer
362 * ERANGE: no match found for search
363 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
364 *
365 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
366 * protected pointer. The reason for the same is that the opp pointer which is
367 * returned will remain valid for use with opp_get_{voltage, freq} only while
368 * under the locked area. The pointer returned must be used prior to unlocking
369 * with rcu_read_unlock() to maintain the integrity of the pointer.
370 */
47d43ba7
NM
371struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
372 unsigned long *freq)
e1f60b29
NM
373{
374 struct device_opp *dev_opp;
47d43ba7 375 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29 376
b02ded24
DT
377 opp_rcu_lockdep_assert();
378
e1f60b29
NM
379 if (!dev || !freq) {
380 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
381 return ERR_PTR(-EINVAL);
382 }
383
327854c8 384 dev_opp = _find_device_opp(dev);
e1f60b29 385 if (IS_ERR(dev_opp))
0779726c 386 return ERR_CAST(dev_opp);
e1f60b29
NM
387
388 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
389 if (temp_opp->available && temp_opp->rate >= *freq) {
390 opp = temp_opp;
391 *freq = opp->rate;
392 break;
393 }
394 }
395
396 return opp;
397}
5d4879cd 398EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
e1f60b29
NM
399
400/**
5d4879cd 401 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
e1f60b29
NM
402 * @dev: device for which we do this operation
403 * @freq: Start frequency
404 *
405 * Search for the matching floor *available* OPP from a starting freq
406 * for a device.
407 *
984f16c8 408 * Return: matching *opp and refreshes *freq accordingly, else returns
0779726c
NM
409 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
410 * values can be:
411 * EINVAL: for bad pointer
412 * ERANGE: no match found for search
413 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
414 *
415 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
416 * protected pointer. The reason for the same is that the opp pointer which is
417 * returned will remain valid for use with opp_get_{voltage, freq} only while
418 * under the locked area. The pointer returned must be used prior to unlocking
419 * with rcu_read_unlock() to maintain the integrity of the pointer.
420 */
47d43ba7
NM
421struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
422 unsigned long *freq)
e1f60b29
NM
423{
424 struct device_opp *dev_opp;
47d43ba7 425 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29 426
b02ded24
DT
427 opp_rcu_lockdep_assert();
428
e1f60b29
NM
429 if (!dev || !freq) {
430 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
431 return ERR_PTR(-EINVAL);
432 }
433
327854c8 434 dev_opp = _find_device_opp(dev);
e1f60b29 435 if (IS_ERR(dev_opp))
0779726c 436 return ERR_CAST(dev_opp);
e1f60b29
NM
437
438 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
439 if (temp_opp->available) {
440 /* go to the next node, before choosing prev */
441 if (temp_opp->rate > *freq)
442 break;
443 else
444 opp = temp_opp;
445 }
446 }
447 if (!IS_ERR(opp))
448 *freq = opp->rate;
449
450 return opp;
451}
5d4879cd 452EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
e1f60b29 453
06441658
VK
454/* List-dev Helpers */
455static void _kfree_list_dev_rcu(struct rcu_head *head)
456{
457 struct device_list_opp *list_dev;
458
459 list_dev = container_of(head, struct device_list_opp, rcu_head);
460 kfree_rcu(list_dev, rcu_head);
461}
462
463static void _remove_list_dev(struct device_list_opp *list_dev,
464 struct device_opp *dev_opp)
465{
deaa5146 466 opp_debug_unregister(list_dev, dev_opp);
06441658
VK
467 list_del(&list_dev->node);
468 call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
469 _kfree_list_dev_rcu);
470}
471
f59d3ee8
VK
472struct device_list_opp *_add_list_dev(const struct device *dev,
473 struct device_opp *dev_opp)
06441658
VK
474{
475 struct device_list_opp *list_dev;
deaa5146 476 int ret;
06441658
VK
477
478 list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
479 if (!list_dev)
480 return NULL;
481
482 /* Initialize list-dev */
483 list_dev->dev = dev;
484 list_add_rcu(&list_dev->node, &dev_opp->dev_list);
485
deaa5146
VK
486 /* Create debugfs entries for the dev_opp */
487 ret = opp_debug_register(list_dev, dev_opp);
488 if (ret)
489 dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
490 __func__, ret);
491
06441658
VK
492 return list_dev;
493}
494
984f16c8 495/**
aa5f2f85 496 * _add_device_opp() - Find device OPP table or allocate a new one
984f16c8
NM
497 * @dev: device for which we do this operation
498 *
aa5f2f85
VK
499 * It tries to find an existing table first, if it couldn't find one, it
500 * allocates a new OPP table and returns that.
984f16c8
NM
501 *
502 * Return: valid device_opp pointer if success, else NULL.
503 */
327854c8 504static struct device_opp *_add_device_opp(struct device *dev)
07cce74a
VK
505{
506 struct device_opp *dev_opp;
06441658 507 struct device_list_opp *list_dev;
07cce74a 508
aa5f2f85
VK
509 /* Check for existing list for 'dev' first */
510 dev_opp = _find_device_opp(dev);
511 if (!IS_ERR(dev_opp))
512 return dev_opp;
07cce74a
VK
513
514 /*
515 * Allocate a new device OPP table. In the infrequent case where a new
516 * device is needed to be added, we pay this penalty.
517 */
518 dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
519 if (!dev_opp)
520 return NULL;
521
06441658
VK
522 INIT_LIST_HEAD(&dev_opp->dev_list);
523
524 list_dev = _add_list_dev(dev, dev_opp);
525 if (!list_dev) {
526 kfree(dev_opp);
527 return NULL;
528 }
529
07cce74a
VK
530 srcu_init_notifier_head(&dev_opp->srcu_head);
531 INIT_LIST_HEAD(&dev_opp->opp_list);
532
533 /* Secure the device list modification */
534 list_add_rcu(&dev_opp->node, &dev_opp_list);
535 return dev_opp;
536}
537
984f16c8 538/**
737002b5
VK
539 * _kfree_device_rcu() - Free device_opp RCU handler
540 * @head: RCU head
984f16c8 541 */
737002b5 542static void _kfree_device_rcu(struct rcu_head *head)
e1f60b29 543{
737002b5 544 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
6ce4184d 545
737002b5 546 kfree_rcu(device_opp, rcu_head);
e1f60b29 547}
38393409
VK
548
549/**
3bac42ca
VK
550 * _remove_device_opp() - Removes a device OPP table
551 * @dev_opp: device OPP table to be removed.
38393409 552 *
3bac42ca 553 * Removes/frees device OPP table it it doesn't contain any OPPs.
38393409 554 */
3bac42ca 555static void _remove_device_opp(struct device_opp *dev_opp)
38393409 556{
06441658
VK
557 struct device_list_opp *list_dev;
558
3bac42ca
VK
559 if (!list_empty(&dev_opp->opp_list))
560 return;
561
7de36b0a
VK
562 if (dev_opp->supported_hw)
563 return;
564
01fb4d3c
VK
565 if (dev_opp->prop_name)
566 return;
567
06441658
VK
568 list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
569 node);
570
571 _remove_list_dev(list_dev, dev_opp);
572
573 /* dev_list must be empty now */
574 WARN_ON(!list_empty(&dev_opp->dev_list));
575
3bac42ca
VK
576 list_del_rcu(&dev_opp->node);
577 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
578 _kfree_device_rcu);
38393409 579}
e1f60b29 580
984f16c8
NM
581/**
582 * _kfree_opp_rcu() - Free OPP RCU handler
583 * @head: RCU head
584 */
327854c8 585static void _kfree_opp_rcu(struct rcu_head *head)
129eec55
VK
586{
587 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
588
589 kfree_rcu(opp, rcu_head);
590}
591
984f16c8
NM
592/**
593 * _opp_remove() - Remove an OPP from a table definition
594 * @dev_opp: points back to the device_opp struct this opp belongs to
595 * @opp: pointer to the OPP to remove
23dacf6d 596 * @notify: OPP_EVENT_REMOVE notification should be sent or not
984f16c8
NM
597 *
598 * This function removes an opp definition from the opp list.
599 *
600 * Locking: The internal device_opp and opp structures are RCU protected.
601 * It is assumed that the caller holds required mutex for an RCU updater
602 * strategy.
603 */
327854c8 604static void _opp_remove(struct device_opp *dev_opp,
23dacf6d 605 struct dev_pm_opp *opp, bool notify)
129eec55
VK
606{
607 /*
608 * Notify the changes in the availability of the operable
609 * frequency/voltage list.
610 */
23dacf6d
VK
611 if (notify)
612 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
deaa5146 613 opp_debug_remove_one(opp);
129eec55 614 list_del_rcu(&opp->node);
327854c8 615 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
129eec55 616
3bac42ca 617 _remove_device_opp(dev_opp);
129eec55
VK
618}
619
620/**
621 * dev_pm_opp_remove() - Remove an OPP from OPP list
622 * @dev: device for which we do this operation
623 * @freq: OPP to remove with matching 'freq'
624 *
625 * This function removes an opp from the opp list.
984f16c8
NM
626 *
627 * Locking: The internal device_opp and opp structures are RCU protected.
628 * Hence this function internally uses RCU updater strategy with mutex locks
629 * to keep the integrity of the internal data structures. Callers should ensure
630 * that this function is *NOT* called under RCU protection or in contexts where
631 * mutex cannot be locked.
129eec55
VK
632 */
633void dev_pm_opp_remove(struct device *dev, unsigned long freq)
634{
635 struct dev_pm_opp *opp;
636 struct device_opp *dev_opp;
637 bool found = false;
638
639 /* Hold our list modification lock here */
640 mutex_lock(&dev_opp_list_lock);
641
327854c8 642 dev_opp = _find_device_opp(dev);
129eec55
VK
643 if (IS_ERR(dev_opp))
644 goto unlock;
645
646 list_for_each_entry(opp, &dev_opp->opp_list, node) {
647 if (opp->rate == freq) {
648 found = true;
649 break;
650 }
651 }
652
653 if (!found) {
654 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
655 __func__, freq);
656 goto unlock;
657 }
658
23dacf6d 659 _opp_remove(dev_opp, opp, true);
129eec55
VK
660unlock:
661 mutex_unlock(&dev_opp_list_lock);
662}
663EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
664
23dacf6d
VK
665static struct dev_pm_opp *_allocate_opp(struct device *dev,
666 struct device_opp **dev_opp)
e1f60b29 667{
23dacf6d 668 struct dev_pm_opp *opp;
e1f60b29 669
23dacf6d
VK
670 /* allocate new OPP node */
671 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
672 if (!opp)
673 return NULL;
e1f60b29 674
23dacf6d 675 INIT_LIST_HEAD(&opp->node);
e1f60b29 676
23dacf6d
VK
677 *dev_opp = _add_device_opp(dev);
678 if (!*dev_opp) {
679 kfree(opp);
680 return NULL;
681 }
682
683 return opp;
684}
685
06441658
VK
686static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
687 struct device_opp *dev_opp)
23dacf6d
VK
688{
689 struct dev_pm_opp *opp;
690 struct list_head *head = &dev_opp->opp_list;
deaa5146 691 int ret;
23dacf6d
VK
692
693 /*
694 * Insert new OPP in order of increasing frequency and discard if
695 * already present.
696 *
697 * Need to use &dev_opp->opp_list in the condition part of the 'for'
698 * loop, don't replace it with head otherwise it will become an infinite
699 * loop.
700 */
701 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
702 if (new_opp->rate > opp->rate) {
703 head = &opp->node;
704 continue;
705 }
706
707 if (new_opp->rate < opp->rate)
708 break;
709
710 /* Duplicate OPPs */
06441658 711 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
23dacf6d
VK
712 __func__, opp->rate, opp->u_volt, opp->available,
713 new_opp->rate, new_opp->u_volt, new_opp->available);
714
715 return opp->available && new_opp->u_volt == opp->u_volt ?
716 0 : -EEXIST;
717 }
718
719 new_opp->dev_opp = dev_opp;
720 list_add_rcu(&new_opp->node, head);
721
deaa5146
VK
722 ret = opp_debug_create_one(new_opp, dev_opp);
723 if (ret)
724 dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
725 __func__, ret);
726
23dacf6d
VK
727 return 0;
728}
729
984f16c8 730/**
b64b9c3f 731 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
984f16c8
NM
732 * @dev: device for which we do this operation
733 * @freq: Frequency in Hz for this OPP
734 * @u_volt: Voltage in uVolts for this OPP
735 * @dynamic: Dynamically added OPPs.
736 *
737 * This function adds an opp definition to the opp list and returns status.
738 * The opp is made available by default and it can be controlled using
739 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
740 *
8f8d37b2
VK
741 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
742 * and freed by dev_pm_opp_of_remove_table.
984f16c8
NM
743 *
744 * Locking: The internal device_opp and opp structures are RCU protected.
745 * Hence this function internally uses RCU updater strategy with mutex locks
746 * to keep the integrity of the internal data structures. Callers should ensure
747 * that this function is *NOT* called under RCU protection or in contexts where
748 * mutex cannot be locked.
749 *
750 * Return:
751 * 0 On success OR
752 * Duplicate OPPs (both freq and volt are same) and opp->available
753 * -EEXIST Freq are same and volt are different OR
754 * Duplicate OPPs (both freq and volt are same) and !opp->available
755 * -ENOMEM Memory allocation failure
756 */
b64b9c3f
VK
757static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
758 bool dynamic)
e1f60b29 759{
aa5f2f85 760 struct device_opp *dev_opp;
23dacf6d 761 struct dev_pm_opp *new_opp;
6ce4184d 762 int ret;
e1f60b29 763
e1f60b29
NM
764 /* Hold our list modification lock here */
765 mutex_lock(&dev_opp_list_lock);
766
23dacf6d
VK
767 new_opp = _allocate_opp(dev, &dev_opp);
768 if (!new_opp) {
769 ret = -ENOMEM;
770 goto unlock;
771 }
772
a7470db6 773 /* populate the opp table */
a7470db6
VK
774 new_opp->rate = freq;
775 new_opp->u_volt = u_volt;
776 new_opp->available = true;
23dacf6d 777 new_opp->dynamic = dynamic;
a7470db6 778
06441658 779 ret = _opp_add(dev, new_opp, dev_opp);
23dacf6d 780 if (ret)
6ce4184d 781 goto free_opp;
64ce8545 782
e1f60b29
NM
783 mutex_unlock(&dev_opp_list_lock);
784
03ca370f
MH
785 /*
786 * Notify the changes in the availability of the operable
787 * frequency/voltage list.
788 */
cd1a068a 789 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
e1f60b29 790 return 0;
6ce4184d
VK
791
792free_opp:
23dacf6d
VK
793 _opp_remove(dev_opp, new_opp, false);
794unlock:
6ce4184d 795 mutex_unlock(&dev_opp_list_lock);
6ce4184d 796 return ret;
e1f60b29 797}
38393409 798
27465902 799/* TODO: Support multiple regulators */
01fb4d3c
VK
800static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
801 struct device_opp *dev_opp)
27465902
VK
802{
803 u32 microvolt[3] = {0};
ad623c31 804 u32 val;
27465902 805 int count, ret;
01fb4d3c
VK
806 struct property *prop = NULL;
807 char name[NAME_MAX];
808
809 /* Search for "opp-microvolt-<name>" */
810 if (dev_opp->prop_name) {
5ff24d60
VK
811 snprintf(name, sizeof(name), "opp-microvolt-%s",
812 dev_opp->prop_name);
01fb4d3c
VK
813 prop = of_find_property(opp->np, name, NULL);
814 }
815
816 if (!prop) {
817 /* Search for "opp-microvolt" */
fd8d8e63 818 sprintf(name, "opp-microvolt");
01fb4d3c 819 prop = of_find_property(opp->np, name, NULL);
27465902 820
01fb4d3c
VK
821 /* Missing property isn't a problem, but an invalid entry is */
822 if (!prop)
823 return 0;
824 }
27465902 825
01fb4d3c 826 count = of_property_count_u32_elems(opp->np, name);
680168a5 827 if (count < 0) {
01fb4d3c
VK
828 dev_err(dev, "%s: Invalid %s property (%d)\n",
829 __func__, name, count);
680168a5
VK
830 return count;
831 }
832
27465902
VK
833 /* There can be one or three elements here */
834 if (count != 1 && count != 3) {
01fb4d3c
VK
835 dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
836 __func__, name, count);
27465902
VK
837 return -EINVAL;
838 }
839
01fb4d3c 840 ret = of_property_read_u32_array(opp->np, name, microvolt, count);
27465902 841 if (ret) {
01fb4d3c 842 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
27465902
VK
843 return -EINVAL;
844 }
845
846 opp->u_volt = microvolt[0];
847 opp->u_volt_min = microvolt[1];
848 opp->u_volt_max = microvolt[2];
849
01fb4d3c
VK
850 /* Search for "opp-microamp-<name>" */
851 prop = NULL;
852 if (dev_opp->prop_name) {
5ff24d60
VK
853 snprintf(name, sizeof(name), "opp-microamp-%s",
854 dev_opp->prop_name);
01fb4d3c
VK
855 prop = of_find_property(opp->np, name, NULL);
856 }
857
858 if (!prop) {
859 /* Search for "opp-microamp" */
fd8d8e63 860 sprintf(name, "opp-microamp");
01fb4d3c
VK
861 prop = of_find_property(opp->np, name, NULL);
862 }
863
864 if (prop && !of_property_read_u32(opp->np, name, &val))
ad623c31
VK
865 opp->u_amp = val;
866
27465902
VK
867 return 0;
868}
869
7de36b0a
VK
870/**
871 * dev_pm_opp_set_supported_hw() - Set supported platforms
872 * @dev: Device for which supported-hw has to be set.
873 * @versions: Array of hierarchy of versions to match.
874 * @count: Number of elements in the array.
875 *
876 * This is required only for the V2 bindings, and it enables a platform to
877 * specify the hierarchy of versions it supports. OPP layer will then enable
878 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
879 * property.
880 *
881 * Locking: The internal device_opp and opp structures are RCU protected.
882 * Hence this function internally uses RCU updater strategy with mutex locks
883 * to keep the integrity of the internal data structures. Callers should ensure
884 * that this function is *NOT* called under RCU protection or in contexts where
885 * mutex cannot be locked.
886 */
887int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
888 unsigned int count)
889{
890 struct device_opp *dev_opp;
891 int ret = 0;
892
893 /* Hold our list modification lock here */
894 mutex_lock(&dev_opp_list_lock);
895
896 dev_opp = _add_device_opp(dev);
897 if (!dev_opp) {
898 ret = -ENOMEM;
899 goto unlock;
900 }
901
902 /* Make sure there are no concurrent readers while updating dev_opp */
903 WARN_ON(!list_empty(&dev_opp->opp_list));
904
905 /* Do we already have a version hierarchy associated with dev_opp? */
906 if (dev_opp->supported_hw) {
907 dev_err(dev, "%s: Already have supported hardware list\n",
908 __func__);
909 ret = -EBUSY;
910 goto err;
911 }
912
913 dev_opp->supported_hw = kmemdup(versions, count * sizeof(*versions),
914 GFP_KERNEL);
915 if (!dev_opp->supported_hw) {
916 ret = -ENOMEM;
917 goto err;
918 }
919
920 dev_opp->supported_hw_count = count;
921 mutex_unlock(&dev_opp_list_lock);
922 return 0;
923
924err:
925 _remove_device_opp(dev_opp);
926unlock:
927 mutex_unlock(&dev_opp_list_lock);
928
929 return ret;
930}
931EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
932
933/**
934 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
935 * @dev: Device for which supported-hw has to be set.
936 *
937 * This is required only for the V2 bindings, and is called for a matching
938 * dev_pm_opp_set_supported_hw(). Until this is called, the device_opp structure
939 * will not be freed.
940 *
941 * Locking: The internal device_opp and opp structures are RCU protected.
942 * Hence this function internally uses RCU updater strategy with mutex locks
943 * to keep the integrity of the internal data structures. Callers should ensure
944 * that this function is *NOT* called under RCU protection or in contexts where
945 * mutex cannot be locked.
946 */
947void dev_pm_opp_put_supported_hw(struct device *dev)
948{
949 struct device_opp *dev_opp;
950
951 /* Hold our list modification lock here */
952 mutex_lock(&dev_opp_list_lock);
953
954 /* Check for existing list for 'dev' first */
955 dev_opp = _find_device_opp(dev);
956 if (IS_ERR(dev_opp)) {
957 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
958 goto unlock;
959 }
960
961 /* Make sure there are no concurrent readers while updating dev_opp */
962 WARN_ON(!list_empty(&dev_opp->opp_list));
963
964 if (!dev_opp->supported_hw) {
965 dev_err(dev, "%s: Doesn't have supported hardware list\n",
966 __func__);
967 goto unlock;
968 }
969
970 kfree(dev_opp->supported_hw);
971 dev_opp->supported_hw = NULL;
972 dev_opp->supported_hw_count = 0;
973
974 /* Try freeing device_opp if this was the last blocking resource */
975 _remove_device_opp(dev_opp);
976
977unlock:
978 mutex_unlock(&dev_opp_list_lock);
979}
980EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
981
01fb4d3c
VK
982/**
983 * dev_pm_opp_set_prop_name() - Set prop-extn name
984 * @dev: Device for which the regulator has to be set.
985 * @name: name to postfix to properties.
986 *
987 * This is required only for the V2 bindings, and it enables a platform to
988 * specify the extn to be used for certain property names. The properties to
989 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
990 * should postfix the property name with -<name> while looking for them.
991 *
992 * Locking: The internal device_opp and opp structures are RCU protected.
993 * Hence this function internally uses RCU updater strategy with mutex locks
994 * to keep the integrity of the internal data structures. Callers should ensure
995 * that this function is *NOT* called under RCU protection or in contexts where
996 * mutex cannot be locked.
997 */
998int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
999{
1000 struct device_opp *dev_opp;
1001 int ret = 0;
1002
1003 /* Hold our list modification lock here */
1004 mutex_lock(&dev_opp_list_lock);
1005
1006 dev_opp = _add_device_opp(dev);
1007 if (!dev_opp) {
1008 ret = -ENOMEM;
1009 goto unlock;
1010 }
1011
1012 /* Make sure there are no concurrent readers while updating dev_opp */
1013 WARN_ON(!list_empty(&dev_opp->opp_list));
1014
1015 /* Do we already have a prop-name associated with dev_opp? */
1016 if (dev_opp->prop_name) {
1017 dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1018 dev_opp->prop_name);
1019 ret = -EBUSY;
1020 goto err;
1021 }
1022
1023 dev_opp->prop_name = kstrdup(name, GFP_KERNEL);
1024 if (!dev_opp->prop_name) {
1025 ret = -ENOMEM;
1026 goto err;
1027 }
1028
1029 mutex_unlock(&dev_opp_list_lock);
1030 return 0;
1031
1032err:
1033 _remove_device_opp(dev_opp);
1034unlock:
1035 mutex_unlock(&dev_opp_list_lock);
1036
1037 return ret;
1038}
1039EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1040
1041/**
1042 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1043 * @dev: Device for which the regulator has to be set.
1044 *
1045 * This is required only for the V2 bindings, and is called for a matching
1046 * dev_pm_opp_set_prop_name(). Until this is called, the device_opp structure
1047 * will not be freed.
1048 *
1049 * Locking: The internal device_opp and opp structures are RCU protected.
1050 * Hence this function internally uses RCU updater strategy with mutex locks
1051 * to keep the integrity of the internal data structures. Callers should ensure
1052 * that this function is *NOT* called under RCU protection or in contexts where
1053 * mutex cannot be locked.
1054 */
1055void dev_pm_opp_put_prop_name(struct device *dev)
1056{
1057 struct device_opp *dev_opp;
1058
1059 /* Hold our list modification lock here */
1060 mutex_lock(&dev_opp_list_lock);
1061
1062 /* Check for existing list for 'dev' first */
1063 dev_opp = _find_device_opp(dev);
1064 if (IS_ERR(dev_opp)) {
1065 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
1066 goto unlock;
1067 }
1068
1069 /* Make sure there are no concurrent readers while updating dev_opp */
1070 WARN_ON(!list_empty(&dev_opp->opp_list));
1071
1072 if (!dev_opp->prop_name) {
1073 dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1074 goto unlock;
1075 }
1076
1077 kfree(dev_opp->prop_name);
1078 dev_opp->prop_name = NULL;
1079
1080 /* Try freeing device_opp if this was the last blocking resource */
1081 _remove_device_opp(dev_opp);
1082
1083unlock:
1084 mutex_unlock(&dev_opp_list_lock);
1085}
1086EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1087
7de36b0a
VK
1088static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
1089 struct device_node *np)
1090{
1091 unsigned int count = dev_opp->supported_hw_count;
1092 u32 version;
1093 int ret;
1094
1095 if (!dev_opp->supported_hw)
1096 return true;
1097
1098 while (count--) {
1099 ret = of_property_read_u32_index(np, "opp-supported-hw", count,
1100 &version);
1101 if (ret) {
1102 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
1103 __func__, count, ret);
1104 return false;
1105 }
1106
1107 /* Both of these are bitwise masks of the versions */
1108 if (!(version & dev_opp->supported_hw[count]))
1109 return false;
1110 }
1111
1112 return true;
1113}
1114
27465902
VK
1115/**
1116 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
1117 * @dev: device for which we do this operation
1118 * @np: device node
1119 *
1120 * This function adds an opp definition to the opp list and returns status. The
1121 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
1122 * removed by dev_pm_opp_remove.
1123 *
1124 * Locking: The internal device_opp and opp structures are RCU protected.
1125 * Hence this function internally uses RCU updater strategy with mutex locks
1126 * to keep the integrity of the internal data structures. Callers should ensure
1127 * that this function is *NOT* called under RCU protection or in contexts where
1128 * mutex cannot be locked.
1129 *
1130 * Return:
1131 * 0 On success OR
1132 * Duplicate OPPs (both freq and volt are same) and opp->available
1133 * -EEXIST Freq are same and volt are different OR
1134 * Duplicate OPPs (both freq and volt are same) and !opp->available
1135 * -ENOMEM Memory allocation failure
1136 * -EINVAL Failed parsing the OPP node
1137 */
1138static int _opp_add_static_v2(struct device *dev, struct device_node *np)
1139{
1140 struct device_opp *dev_opp;
1141 struct dev_pm_opp *new_opp;
1142 u64 rate;
68fa9f0a 1143 u32 val;
27465902
VK
1144 int ret;
1145
1146 /* Hold our list modification lock here */
1147 mutex_lock(&dev_opp_list_lock);
1148
1149 new_opp = _allocate_opp(dev, &dev_opp);
1150 if (!new_opp) {
1151 ret = -ENOMEM;
1152 goto unlock;
1153 }
1154
1155 ret = of_property_read_u64(np, "opp-hz", &rate);
1156 if (ret < 0) {
1157 dev_err(dev, "%s: opp-hz not found\n", __func__);
1158 goto free_opp;
1159 }
1160
7de36b0a
VK
1161 /* Check if the OPP supports hardware's hierarchy of versions or not */
1162 if (!_opp_is_supported(dev, dev_opp, np)) {
1163 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
1164 goto free_opp;
1165 }
1166
27465902
VK
1167 /*
1168 * Rate is defined as an unsigned long in clk API, and so casting
1169 * explicitly to its type. Must be fixed once rate is 64 bit
1170 * guaranteed in clk API.
1171 */
1172 new_opp->rate = (unsigned long)rate;
1173 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
1174
1175 new_opp->np = np;
1176 new_opp->dynamic = false;
1177 new_opp->available = true;
68fa9f0a
VK
1178
1179 if (!of_property_read_u32(np, "clock-latency-ns", &val))
1180 new_opp->clock_latency_ns = val;
27465902 1181
01fb4d3c 1182 ret = opp_parse_supplies(new_opp, dev, dev_opp);
27465902
VK
1183 if (ret)
1184 goto free_opp;
1185
06441658 1186 ret = _opp_add(dev, new_opp, dev_opp);
27465902
VK
1187 if (ret)
1188 goto free_opp;
1189
ad656a6a
VK
1190 /* OPP to select on device suspend */
1191 if (of_property_read_bool(np, "opp-suspend")) {
deaa5146 1192 if (dev_opp->suspend_opp) {
ad656a6a
VK
1193 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
1194 __func__, dev_opp->suspend_opp->rate,
1195 new_opp->rate);
deaa5146
VK
1196 } else {
1197 new_opp->suspend = true;
ad656a6a 1198 dev_opp->suspend_opp = new_opp;
deaa5146 1199 }
ad656a6a
VK
1200 }
1201
3ca9bb33
VK
1202 if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
1203 dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
1204
27465902
VK
1205 mutex_unlock(&dev_opp_list_lock);
1206
3ca9bb33 1207 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
27465902 1208 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
3ca9bb33
VK
1209 new_opp->u_volt_min, new_opp->u_volt_max,
1210 new_opp->clock_latency_ns);
27465902
VK
1211
1212 /*
1213 * Notify the changes in the availability of the operable
1214 * frequency/voltage list.
1215 */
1216 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
1217 return 0;
1218
1219free_opp:
1220 _opp_remove(dev_opp, new_opp, false);
1221unlock:
1222 mutex_unlock(&dev_opp_list_lock);
1223 return ret;
1224}
1225
38393409
VK
1226/**
1227 * dev_pm_opp_add() - Add an OPP table from a table definitions
1228 * @dev: device for which we do this operation
1229 * @freq: Frequency in Hz for this OPP
1230 * @u_volt: Voltage in uVolts for this OPP
1231 *
1232 * This function adds an opp definition to the opp list and returns status.
1233 * The opp is made available by default and it can be controlled using
1234 * dev_pm_opp_enable/disable functions.
1235 *
1236 * Locking: The internal device_opp and opp structures are RCU protected.
1237 * Hence this function internally uses RCU updater strategy with mutex locks
1238 * to keep the integrity of the internal data structures. Callers should ensure
1239 * that this function is *NOT* called under RCU protection or in contexts where
1240 * mutex cannot be locked.
1241 *
1242 * Return:
984f16c8 1243 * 0 On success OR
38393409 1244 * Duplicate OPPs (both freq and volt are same) and opp->available
984f16c8 1245 * -EEXIST Freq are same and volt are different OR
38393409 1246 * Duplicate OPPs (both freq and volt are same) and !opp->available
984f16c8 1247 * -ENOMEM Memory allocation failure
38393409
VK
1248 */
1249int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1250{
b64b9c3f 1251 return _opp_add_v1(dev, freq, u_volt, true);
38393409 1252}
5d4879cd 1253EXPORT_SYMBOL_GPL(dev_pm_opp_add);
e1f60b29
NM
1254
1255/**
327854c8 1256 * _opp_set_availability() - helper to set the availability of an opp
e1f60b29
NM
1257 * @dev: device for which we do this operation
1258 * @freq: OPP frequency to modify availability
1259 * @availability_req: availability status requested for this opp
1260 *
1261 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1262 * share a common logic which is isolated here.
1263 *
984f16c8 1264 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1a2d49c 1265 * copy operation, returns 0 if no modification was done OR modification was
e1f60b29
NM
1266 * successful.
1267 *
1268 * Locking: The internal device_opp and opp structures are RCU protected.
1269 * Hence this function internally uses RCU updater strategy with mutex locks to
1270 * keep the integrity of the internal data structures. Callers should ensure
1271 * that this function is *NOT* called under RCU protection or in contexts where
1272 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1273 */
327854c8
NM
1274static int _opp_set_availability(struct device *dev, unsigned long freq,
1275 bool availability_req)
e1f60b29 1276{
29df0ee1 1277 struct device_opp *dev_opp;
47d43ba7 1278 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
e1f60b29
NM
1279 int r = 0;
1280
1281 /* keep the node allocated */
47d43ba7 1282 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
59d84ca8 1283 if (!new_opp)
e1f60b29 1284 return -ENOMEM;
e1f60b29
NM
1285
1286 mutex_lock(&dev_opp_list_lock);
1287
1288 /* Find the device_opp */
327854c8 1289 dev_opp = _find_device_opp(dev);
e1f60b29
NM
1290 if (IS_ERR(dev_opp)) {
1291 r = PTR_ERR(dev_opp);
1292 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1293 goto unlock;
1294 }
1295
1296 /* Do we have the frequency? */
1297 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
1298 if (tmp_opp->rate == freq) {
1299 opp = tmp_opp;
1300 break;
1301 }
1302 }
1303 if (IS_ERR(opp)) {
1304 r = PTR_ERR(opp);
1305 goto unlock;
1306 }
1307
1308 /* Is update really needed? */
1309 if (opp->available == availability_req)
1310 goto unlock;
1311 /* copy the old data over */
1312 *new_opp = *opp;
1313
1314 /* plug in new node */
1315 new_opp->available = availability_req;
1316
1317 list_replace_rcu(&opp->node, &new_opp->node);
1318 mutex_unlock(&dev_opp_list_lock);
327854c8 1319 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
e1f60b29 1320
03ca370f
MH
1321 /* Notify the change of the OPP availability */
1322 if (availability_req)
cd1a068a 1323 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
03ca370f
MH
1324 new_opp);
1325 else
cd1a068a 1326 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
03ca370f
MH
1327 new_opp);
1328
dde8437d 1329 return 0;
e1f60b29
NM
1330
1331unlock:
1332 mutex_unlock(&dev_opp_list_lock);
e1f60b29
NM
1333 kfree(new_opp);
1334 return r;
1335}
1336
1337/**
5d4879cd 1338 * dev_pm_opp_enable() - Enable a specific OPP
e1f60b29
NM
1339 * @dev: device for which we do this operation
1340 * @freq: OPP frequency to enable
1341 *
1342 * Enables a provided opp. If the operation is valid, this returns 0, else the
1343 * corresponding error value. It is meant to be used for users an OPP available
5d4879cd 1344 * after being temporarily made unavailable with dev_pm_opp_disable.
e1f60b29
NM
1345 *
1346 * Locking: The internal device_opp and opp structures are RCU protected.
1347 * Hence this function indirectly uses RCU and mutex locks to keep the
1348 * integrity of the internal data structures. Callers should ensure that
1349 * this function is *NOT* called under RCU protection or in contexts where
1350 * mutex locking or synchronize_rcu() blocking calls cannot be used.
984f16c8
NM
1351 *
1352 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1a2d49c 1353 * copy operation, returns 0 if no modification was done OR modification was
984f16c8 1354 * successful.
e1f60b29 1355 */
5d4879cd 1356int dev_pm_opp_enable(struct device *dev, unsigned long freq)
e1f60b29 1357{
327854c8 1358 return _opp_set_availability(dev, freq, true);
e1f60b29 1359}
5d4879cd 1360EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
e1f60b29
NM
1361
1362/**
5d4879cd 1363 * dev_pm_opp_disable() - Disable a specific OPP
e1f60b29
NM
1364 * @dev: device for which we do this operation
1365 * @freq: OPP frequency to disable
1366 *
1367 * Disables a provided opp. If the operation is valid, this returns
1368 * 0, else the corresponding error value. It is meant to be a temporary
1369 * control by users to make this OPP not available until the circumstances are
5d4879cd 1370 * right to make it available again (with a call to dev_pm_opp_enable).
e1f60b29
NM
1371 *
1372 * Locking: The internal device_opp and opp structures are RCU protected.
1373 * Hence this function indirectly uses RCU and mutex locks to keep the
1374 * integrity of the internal data structures. Callers should ensure that
1375 * this function is *NOT* called under RCU protection or in contexts where
1376 * mutex locking or synchronize_rcu() blocking calls cannot be used.
984f16c8
NM
1377 *
1378 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1a2d49c 1379 * copy operation, returns 0 if no modification was done OR modification was
984f16c8 1380 * successful.
e1f60b29 1381 */
5d4879cd 1382int dev_pm_opp_disable(struct device *dev, unsigned long freq)
e1f60b29 1383{
327854c8 1384 return _opp_set_availability(dev, freq, false);
e1f60b29 1385}
5d4879cd 1386EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
e1f60b29 1387
03ca370f 1388/**
5d4879cd 1389 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
03ca370f 1390 * @dev: device pointer used to lookup device OPPs.
984f16c8
NM
1391 *
1392 * Return: pointer to notifier head if found, otherwise -ENODEV or
1393 * -EINVAL based on type of error casted as pointer. value must be checked
1394 * with IS_ERR to determine valid pointer or error result.
1395 *
1396 * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
1397 * protected pointer. The reason for the same is that the opp pointer which is
1398 * returned will remain valid for use with opp_get_{voltage, freq} only while
1399 * under the locked area. The pointer returned must be used prior to unlocking
1400 * with rcu_read_unlock() to maintain the integrity of the pointer.
03ca370f 1401 */
5d4879cd 1402struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
03ca370f 1403{
327854c8 1404 struct device_opp *dev_opp = _find_device_opp(dev);
03ca370f
MH
1405
1406 if (IS_ERR(dev_opp))
156acb16 1407 return ERR_CAST(dev_opp); /* matching type */
03ca370f 1408
cd1a068a 1409 return &dev_opp->srcu_head;
03ca370f 1410}
4679ec37 1411EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
b496dfbc
SG
1412
1413#ifdef CONFIG_OF
1414/**
8f8d37b2
VK
1415 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
1416 * entries
b496dfbc
SG
1417 * @dev: device pointer used to lookup device OPPs.
1418 *
737002b5 1419 * Free OPPs created using static entries present in DT.
984f16c8
NM
1420 *
1421 * Locking: The internal device_opp and opp structures are RCU protected.
1422 * Hence this function indirectly uses RCU updater strategy with mutex locks
1423 * to keep the integrity of the internal data structures. Callers should ensure
1424 * that this function is *NOT* called under RCU protection or in contexts where
1425 * mutex cannot be locked.
b496dfbc 1426 */
8f8d37b2 1427void dev_pm_opp_of_remove_table(struct device *dev)
737002b5
VK
1428{
1429 struct device_opp *dev_opp;
1430 struct dev_pm_opp *opp, *tmp;
1431
06441658
VK
1432 /* Hold our list modification lock here */
1433 mutex_lock(&dev_opp_list_lock);
1434
737002b5
VK
1435 /* Check for existing list for 'dev' */
1436 dev_opp = _find_device_opp(dev);
1437 if (IS_ERR(dev_opp)) {
1438 int error = PTR_ERR(dev_opp);
1439
1440 if (error != -ENODEV)
1441 WARN(1, "%s: dev_opp: %d\n",
1442 IS_ERR_OR_NULL(dev) ?
1443 "Invalid device" : dev_name(dev),
1444 error);
06441658 1445 goto unlock;
737002b5
VK
1446 }
1447
06441658
VK
1448 /* Find if dev_opp manages a single device */
1449 if (list_is_singular(&dev_opp->dev_list)) {
1450 /* Free static OPPs */
1451 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
1452 if (!opp->dynamic)
1453 _opp_remove(dev_opp, opp, true);
1454 }
1455 } else {
1456 _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
737002b5
VK
1457 }
1458
06441658 1459unlock:
737002b5
VK
1460 mutex_unlock(&dev_opp_list_lock);
1461}
8f8d37b2 1462EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
737002b5 1463
1840995c 1464/* Returns opp descriptor node for a device, caller must do of_node_put() */
f59d3ee8 1465struct device_node *_of_get_opp_desc_node(struct device *dev)
8d4d4e98 1466{
8d4d4e98
VK
1467 /*
1468 * TODO: Support for multiple OPP tables.
1469 *
1470 * There should be only ONE phandle present in "operating-points-v2"
1471 * property.
1472 */
8d4d4e98 1473
1840995c 1474 return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
8d4d4e98
VK
1475}
1476
27465902 1477/* Initializes OPP tables based on new bindings */
f0489a5e 1478static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
27465902 1479{
1840995c 1480 struct device_node *np;
06441658 1481 struct device_opp *dev_opp;
27465902
VK
1482 int ret = 0, count = 0;
1483
4a3a1353
VK
1484 mutex_lock(&dev_opp_list_lock);
1485
06441658
VK
1486 dev_opp = _managed_opp(opp_np);
1487 if (dev_opp) {
1488 /* OPPs are already managed */
1489 if (!_add_list_dev(dev, dev_opp))
1490 ret = -ENOMEM;
4a3a1353 1491 mutex_unlock(&dev_opp_list_lock);
1840995c 1492 return ret;
06441658 1493 }
4a3a1353 1494 mutex_unlock(&dev_opp_list_lock);
06441658 1495
27465902
VK
1496 /* We have opp-list node now, iterate over it and add OPPs */
1497 for_each_available_child_of_node(opp_np, np) {
1498 count++;
1499
1500 ret = _opp_add_static_v2(dev, np);
1501 if (ret) {
1502 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1503 ret);
1f821ed7 1504 goto free_table;
27465902
VK
1505 }
1506 }
1507
1508 /* There should be one of more OPP defined */
1840995c
VK
1509 if (WARN_ON(!count))
1510 return -ENOENT;
27465902 1511
4a3a1353
VK
1512 mutex_lock(&dev_opp_list_lock);
1513
1f821ed7
VK
1514 dev_opp = _find_device_opp(dev);
1515 if (WARN_ON(IS_ERR(dev_opp))) {
1516 ret = PTR_ERR(dev_opp);
4a3a1353 1517 mutex_unlock(&dev_opp_list_lock);
1f821ed7 1518 goto free_table;
06441658 1519 }
27465902 1520
1f821ed7
VK
1521 dev_opp->np = opp_np;
1522 dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
1523
4a3a1353
VK
1524 mutex_unlock(&dev_opp_list_lock);
1525
1f821ed7
VK
1526 return 0;
1527
1528free_table:
8f8d37b2 1529 dev_pm_opp_of_remove_table(dev);
27465902
VK
1530
1531 return ret;
1532}
1533
1534/* Initializes OPP tables based on old-deprecated bindings */
f0489a5e 1535static int _of_add_opp_table_v1(struct device *dev)
b496dfbc
SG
1536{
1537 const struct property *prop;
1538 const __be32 *val;
1539 int nr;
1540
1541 prop = of_find_property(dev->of_node, "operating-points", NULL);
1542 if (!prop)
1543 return -ENODEV;
1544 if (!prop->value)
1545 return -ENODATA;
1546
1547 /*
1548 * Each OPP is a set of tuples consisting of frequency and
1549 * voltage like <freq-kHz vol-uV>.
1550 */
1551 nr = prop->length / sizeof(u32);
1552 if (nr % 2) {
1553 dev_err(dev, "%s: Invalid OPP list\n", __func__);
1554 return -EINVAL;
1555 }
1556
1557 val = prop->value;
1558 while (nr) {
1559 unsigned long freq = be32_to_cpup(val++) * 1000;
1560 unsigned long volt = be32_to_cpup(val++);
1561
b64b9c3f 1562 if (_opp_add_v1(dev, freq, volt, false))
b496dfbc
SG
1563 dev_warn(dev, "%s: Failed to add OPP %ld\n",
1564 __func__, freq);
b496dfbc
SG
1565 nr -= 2;
1566 }
1567
1568 return 0;
1569}
129eec55
VK
1570
1571/**
8f8d37b2 1572 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
129eec55
VK
1573 * @dev: device pointer used to lookup device OPPs.
1574 *
27465902 1575 * Register the initial OPP table with the OPP library for given device.
984f16c8
NM
1576 *
1577 * Locking: The internal device_opp and opp structures are RCU protected.
1578 * Hence this function indirectly uses RCU updater strategy with mutex locks
1579 * to keep the integrity of the internal data structures. Callers should ensure
1580 * that this function is *NOT* called under RCU protection or in contexts where
1581 * mutex cannot be locked.
27465902
VK
1582 *
1583 * Return:
1584 * 0 On success OR
1585 * Duplicate OPPs (both freq and volt are same) and opp->available
1586 * -EEXIST Freq are same and volt are different OR
1587 * Duplicate OPPs (both freq and volt are same) and !opp->available
1588 * -ENOMEM Memory allocation failure
1589 * -ENODEV when 'operating-points' property is not found or is invalid data
1590 * in device node.
1591 * -ENODATA when empty 'operating-points' property is found
1592 * -EINVAL when invalid entries are found in opp-v2 table
129eec55 1593 */
8f8d37b2 1594int dev_pm_opp_of_add_table(struct device *dev)
129eec55 1595{
1840995c
VK
1596 struct device_node *opp_np;
1597 int ret;
27465902
VK
1598
1599 /*
1600 * OPPs have two version of bindings now. The older one is deprecated,
1601 * try for the new binding first.
1602 */
1840995c
VK
1603 opp_np = _of_get_opp_desc_node(dev);
1604 if (!opp_np) {
27465902
VK
1605 /*
1606 * Try old-deprecated bindings for backward compatibility with
1607 * older dtbs.
1608 */
f0489a5e 1609 return _of_add_opp_table_v1(dev);
8d4d4e98
VK
1610 }
1611
f0489a5e 1612 ret = _of_add_opp_table_v2(dev, opp_np);
1840995c 1613 of_node_put(opp_np);
8d4d4e98 1614
8d4d4e98
VK
1615 return ret;
1616}
8f8d37b2 1617EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
b496dfbc 1618#endif
This page took 0.398868 seconds and 5 git commands to generate.