PM / OPP: Add OPP sharing information to OPP library
[deliverable/linux.git] / drivers / base / power / opp.c
CommitLineData
e1f60b29
NM
1/*
2 * Generic OPP Interface
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/err.h>
e1f60b29 17#include <linux/slab.h>
51990e82 18#include <linux/device.h>
e1f60b29
NM
19#include <linux/list.h>
20#include <linux/rculist.h>
21#include <linux/rcupdate.h>
e4db1c74 22#include <linux/pm_opp.h>
b496dfbc 23#include <linux/of.h>
80126ce7 24#include <linux/export.h>
e1f60b29
NM
25
26/*
27 * Internal data structure organization with the OPP layer library is as
28 * follows:
29 * dev_opp_list (root)
30 * |- device 1 (represents voltage domain 1)
31 * | |- opp 1 (availability, freq, voltage)
32 * | |- opp 2 ..
33 * ... ...
34 * | `- opp n ..
35 * |- device 2 (represents the next voltage domain)
36 * ...
37 * `- device m (represents mth voltage domain)
38 * device 1, 2.. are represented by dev_opp structure while each opp
39 * is represented by the opp structure.
40 */
41
42/**
47d43ba7 43 * struct dev_pm_opp - Generic OPP description structure
e1f60b29
NM
44 * @node: opp list node. The nodes are maintained throughout the lifetime
45 * of boot. It is expected only an optimal set of OPPs are
46 * added to the library by the SoC framework.
47 * RCU usage: opp list is traversed with RCU locks. node
48 * modification is possible realtime, hence the modifications
49 * are protected by the dev_opp_list_lock for integrity.
50 * IMPORTANT: the opp nodes should be maintained in increasing
51 * order.
38393409 52 * @dynamic: not-created from static DT entries.
e1f60b29 53 * @available: true/false - marks if this OPP as available or not
27465902 54 * @turbo: true if turbo (boost) OPP
e1f60b29 55 * @rate: Frequency in hertz
27465902
VK
56 * @u_volt: Target voltage in microvolts corresponding to this OPP
57 * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
58 * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
59 * @u_amp: Maximum current drawn by the device in microamperes
3ca9bb33
VK
60 * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
61 * frequency from any other OPP's frequency.
e1f60b29 62 * @dev_opp: points back to the device_opp struct this opp belongs to
cd1a068a 63 * @rcu_head: RCU callback head used for deferred freeing
27465902 64 * @np: OPP's device node.
e1f60b29
NM
65 *
66 * This structure stores the OPP information for a given device.
67 */
47d43ba7 68struct dev_pm_opp {
e1f60b29
NM
69 struct list_head node;
70
71 bool available;
38393409 72 bool dynamic;
27465902 73 bool turbo;
e1f60b29 74 unsigned long rate;
27465902 75
e1f60b29 76 unsigned long u_volt;
27465902
VK
77 unsigned long u_volt_min;
78 unsigned long u_volt_max;
79 unsigned long u_amp;
3ca9bb33 80 unsigned long clock_latency_ns;
e1f60b29
NM
81
82 struct device_opp *dev_opp;
cd1a068a 83 struct rcu_head rcu_head;
27465902
VK
84
85 struct device_node *np;
e1f60b29
NM
86};
87
06441658
VK
88/**
89 * struct device_list_opp - devices managed by 'struct device_opp'
90 * @node: list node
91 * @dev: device to which the struct object belongs
92 * @rcu_head: RCU callback head used for deferred freeing
93 *
94 * This is an internal data structure maintaining the list of devices that are
95 * managed by 'struct device_opp'.
96 */
97struct device_list_opp {
98 struct list_head node;
99 const struct device *dev;
100 struct rcu_head rcu_head;
101};
102
e1f60b29
NM
103/**
104 * struct device_opp - Device opp structure
105 * @node: list node - contains the devices with OPPs that
106 * have been registered. Nodes once added are not modified in this
107 * list.
108 * RCU usage: nodes are not modified in the list of device_opp,
109 * however addition is possible and is secured by dev_opp_list_lock
cd1a068a 110 * @srcu_head: notifier head to notify the OPP availability changes.
129eec55 111 * @rcu_head: RCU callback head used for deferred freeing
06441658 112 * @dev_list: list of devices that share these OPPs
e1f60b29 113 * @opp_list: list of opps
06441658
VK
114 * @np: struct device_node pointer for opp's DT node.
115 * @shared_opp: OPP is shared between multiple devices.
e1f60b29
NM
116 *
117 * This is an internal data structure maintaining the link to opps attached to
118 * a device. This structure is not meant to be shared to users as it is
1c6a662f
VK
119 * meant for book keeping and private to OPP library.
120 *
121 * Because the opp structures can be used from both rcu and srcu readers, we
122 * need to wait for the grace period of both of them before freeing any
123 * resources. And so we have used kfree_rcu() from within call_srcu() handlers.
e1f60b29
NM
124 */
125struct device_opp {
126 struct list_head node;
127
cd1a068a 128 struct srcu_notifier_head srcu_head;
129eec55 129 struct rcu_head rcu_head;
06441658 130 struct list_head dev_list;
e1f60b29 131 struct list_head opp_list;
3ca9bb33 132
06441658 133 struct device_node *np;
3ca9bb33 134 unsigned long clock_latency_ns_max;
06441658 135 bool shared_opp;
e1f60b29
NM
136};
137
138/*
139 * The root of the list of all devices. All device_opp structures branch off
140 * from here, with each device_opp containing the list of opp it supports in
141 * various states of availability.
142 */
143static LIST_HEAD(dev_opp_list);
144/* Lock to allow exclusive modification to the device and opp lists */
145static DEFINE_MUTEX(dev_opp_list_lock);
146
b02ded24
DT
147#define opp_rcu_lockdep_assert() \
148do { \
149 rcu_lockdep_assert(rcu_read_lock_held() || \
150 lockdep_is_held(&dev_opp_list_lock), \
151 "Missing rcu_read_lock() or " \
152 "dev_opp_list_lock protection"); \
153} while (0)
154
06441658
VK
155static struct device_list_opp *_find_list_dev(const struct device *dev,
156 struct device_opp *dev_opp)
157{
158 struct device_list_opp *list_dev;
159
160 list_for_each_entry(list_dev, &dev_opp->dev_list, node)
161 if (list_dev->dev == dev)
162 return list_dev;
163
164 return NULL;
165}
166
167static struct device_opp *_managed_opp(const struct device_node *np)
168{
169 struct device_opp *dev_opp;
170
171 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
172 if (dev_opp->np == np) {
173 /*
174 * Multiple devices can point to the same OPP table and
175 * so will have same node-pointer, np.
176 *
177 * But the OPPs will be considered as shared only if the
178 * OPP table contains a "opp-shared" property.
179 */
180 return dev_opp->shared_opp ? dev_opp : NULL;
181 }
182 }
183
184 return NULL;
185}
186
e1f60b29 187/**
327854c8 188 * _find_device_opp() - find device_opp struct using device pointer
e1f60b29
NM
189 * @dev: device pointer used to lookup device OPPs
190 *
191 * Search list of device OPPs for one containing matching device. Does a RCU
192 * reader operation to grab the pointer needed.
193 *
984f16c8 194 * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
e1f60b29
NM
195 * -EINVAL based on type of error.
196 *
197 * Locking: This function must be called under rcu_read_lock(). device_opp
198 * is a RCU protected pointer. This means that device_opp is valid as long
199 * as we are under RCU lock.
200 */
327854c8 201static struct device_opp *_find_device_opp(struct device *dev)
e1f60b29 202{
06441658 203 struct device_opp *dev_opp;
e1f60b29
NM
204
205 if (unlikely(IS_ERR_OR_NULL(dev))) {
206 pr_err("%s: Invalid parameters\n", __func__);
207 return ERR_PTR(-EINVAL);
208 }
209
06441658
VK
210 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
211 if (_find_list_dev(dev, dev_opp))
212 return dev_opp;
e1f60b29 213
06441658 214 return ERR_PTR(-ENODEV);
e1f60b29
NM
215}
216
217/**
5d4879cd 218 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
e1f60b29
NM
219 * @opp: opp for which voltage has to be returned for
220 *
984f16c8 221 * Return: voltage in micro volt corresponding to the opp, else
e1f60b29
NM
222 * return 0
223 *
224 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
225 * protected pointer. This means that opp which could have been fetched by
226 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
227 * under RCU lock. The pointer returned by the opp_find_freq family must be
228 * used in the same section as the usage of this function with the pointer
229 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
230 * pointer.
231 */
47d43ba7 232unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
e1f60b29 233{
47d43ba7 234 struct dev_pm_opp *tmp_opp;
e1f60b29
NM
235 unsigned long v = 0;
236
04bf1c7f
KK
237 opp_rcu_lockdep_assert();
238
e1f60b29
NM
239 tmp_opp = rcu_dereference(opp);
240 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
241 pr_err("%s: Invalid parameters\n", __func__);
242 else
243 v = tmp_opp->u_volt;
244
245 return v;
246}
5d4879cd 247EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
e1f60b29
NM
248
249/**
5d4879cd 250 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
e1f60b29
NM
251 * @opp: opp for which frequency has to be returned for
252 *
984f16c8 253 * Return: frequency in hertz corresponding to the opp, else
e1f60b29
NM
254 * return 0
255 *
256 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
257 * protected pointer. This means that opp which could have been fetched by
258 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
259 * under RCU lock. The pointer returned by the opp_find_freq family must be
260 * used in the same section as the usage of this function with the pointer
261 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
262 * pointer.
263 */
47d43ba7 264unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
e1f60b29 265{
47d43ba7 266 struct dev_pm_opp *tmp_opp;
e1f60b29
NM
267 unsigned long f = 0;
268
04bf1c7f
KK
269 opp_rcu_lockdep_assert();
270
e1f60b29
NM
271 tmp_opp = rcu_dereference(opp);
272 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
273 pr_err("%s: Invalid parameters\n", __func__);
274 else
275 f = tmp_opp->rate;
276
277 return f;
278}
5d4879cd 279EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
e1f60b29 280
3ca9bb33
VK
281/**
282 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
283 * @dev: device for which we do this operation
284 *
285 * Return: This function returns the max clock latency in nanoseconds.
286 *
287 * Locking: This function takes rcu_read_lock().
288 */
289unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
290{
291 struct device_opp *dev_opp;
292 unsigned long clock_latency_ns;
293
294 rcu_read_lock();
295
296 dev_opp = _find_device_opp(dev);
297 if (IS_ERR(dev_opp))
298 clock_latency_ns = 0;
299 else
300 clock_latency_ns = dev_opp->clock_latency_ns_max;
301
302 rcu_read_unlock();
303 return clock_latency_ns;
304}
305EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
306
e1f60b29 307/**
5d4879cd 308 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
e1f60b29
NM
309 * @dev: device for which we do this operation
310 *
984f16c8 311 * Return: This function returns the number of available opps if there are any,
e1f60b29
NM
312 * else returns 0 if none or the corresponding error value.
313 *
b4718c02 314 * Locking: This function takes rcu_read_lock().
e1f60b29 315 */
5d4879cd 316int dev_pm_opp_get_opp_count(struct device *dev)
e1f60b29
NM
317{
318 struct device_opp *dev_opp;
47d43ba7 319 struct dev_pm_opp *temp_opp;
e1f60b29
NM
320 int count = 0;
321
b4718c02 322 rcu_read_lock();
b02ded24 323
327854c8 324 dev_opp = _find_device_opp(dev);
e1f60b29 325 if (IS_ERR(dev_opp)) {
b4718c02
DT
326 count = PTR_ERR(dev_opp);
327 dev_err(dev, "%s: device OPP not found (%d)\n",
328 __func__, count);
329 goto out_unlock;
e1f60b29
NM
330 }
331
332 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
333 if (temp_opp->available)
334 count++;
335 }
336
b4718c02
DT
337out_unlock:
338 rcu_read_unlock();
e1f60b29
NM
339 return count;
340}
5d4879cd 341EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
e1f60b29
NM
342
343/**
5d4879cd 344 * dev_pm_opp_find_freq_exact() - search for an exact frequency
e1f60b29
NM
345 * @dev: device for which we do this operation
346 * @freq: frequency to search for
7ae49618 347 * @available: true/false - match for available opp
e1f60b29 348 *
984f16c8
NM
349 * Return: Searches for exact match in the opp list and returns pointer to the
350 * matching opp if found, else returns ERR_PTR in case of error and should
351 * be handled using IS_ERR. Error return values can be:
0779726c
NM
352 * EINVAL: for bad pointer
353 * ERANGE: no match found for search
354 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
355 *
356 * Note: available is a modifier for the search. if available=true, then the
357 * match is for exact matching frequency and is available in the stored OPP
358 * table. if false, the match is for exact frequency which is not available.
359 *
360 * This provides a mechanism to enable an opp which is not available currently
361 * or the opposite as well.
362 *
363 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
364 * protected pointer. The reason for the same is that the opp pointer which is
365 * returned will remain valid for use with opp_get_{voltage, freq} only while
366 * under the locked area. The pointer returned must be used prior to unlocking
367 * with rcu_read_unlock() to maintain the integrity of the pointer.
368 */
47d43ba7
NM
369struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
370 unsigned long freq,
371 bool available)
e1f60b29
NM
372{
373 struct device_opp *dev_opp;
47d43ba7 374 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29 375
b02ded24
DT
376 opp_rcu_lockdep_assert();
377
327854c8 378 dev_opp = _find_device_opp(dev);
e1f60b29
NM
379 if (IS_ERR(dev_opp)) {
380 int r = PTR_ERR(dev_opp);
381 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
382 return ERR_PTR(r);
383 }
384
385 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
386 if (temp_opp->available == available &&
387 temp_opp->rate == freq) {
388 opp = temp_opp;
389 break;
390 }
391 }
392
393 return opp;
394}
5d4879cd 395EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
e1f60b29
NM
396
397/**
5d4879cd 398 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
e1f60b29
NM
399 * @dev: device for which we do this operation
400 * @freq: Start frequency
401 *
402 * Search for the matching ceil *available* OPP from a starting freq
403 * for a device.
404 *
984f16c8 405 * Return: matching *opp and refreshes *freq accordingly, else returns
0779726c
NM
406 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
407 * values can be:
408 * EINVAL: for bad pointer
409 * ERANGE: no match found for search
410 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
411 *
412 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
413 * protected pointer. The reason for the same is that the opp pointer which is
414 * returned will remain valid for use with opp_get_{voltage, freq} only while
415 * under the locked area. The pointer returned must be used prior to unlocking
416 * with rcu_read_unlock() to maintain the integrity of the pointer.
417 */
47d43ba7
NM
418struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
419 unsigned long *freq)
e1f60b29
NM
420{
421 struct device_opp *dev_opp;
47d43ba7 422 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29 423
b02ded24
DT
424 opp_rcu_lockdep_assert();
425
e1f60b29
NM
426 if (!dev || !freq) {
427 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
428 return ERR_PTR(-EINVAL);
429 }
430
327854c8 431 dev_opp = _find_device_opp(dev);
e1f60b29 432 if (IS_ERR(dev_opp))
0779726c 433 return ERR_CAST(dev_opp);
e1f60b29
NM
434
435 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
436 if (temp_opp->available && temp_opp->rate >= *freq) {
437 opp = temp_opp;
438 *freq = opp->rate;
439 break;
440 }
441 }
442
443 return opp;
444}
5d4879cd 445EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
e1f60b29
NM
446
447/**
5d4879cd 448 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
e1f60b29
NM
449 * @dev: device for which we do this operation
450 * @freq: Start frequency
451 *
452 * Search for the matching floor *available* OPP from a starting freq
453 * for a device.
454 *
984f16c8 455 * Return: matching *opp and refreshes *freq accordingly, else returns
0779726c
NM
456 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
457 * values can be:
458 * EINVAL: for bad pointer
459 * ERANGE: no match found for search
460 * ENODEV: if device not found in list of registered devices
e1f60b29
NM
461 *
462 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
463 * protected pointer. The reason for the same is that the opp pointer which is
464 * returned will remain valid for use with opp_get_{voltage, freq} only while
465 * under the locked area. The pointer returned must be used prior to unlocking
466 * with rcu_read_unlock() to maintain the integrity of the pointer.
467 */
47d43ba7
NM
468struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
469 unsigned long *freq)
e1f60b29
NM
470{
471 struct device_opp *dev_opp;
47d43ba7 472 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
e1f60b29 473
b02ded24
DT
474 opp_rcu_lockdep_assert();
475
e1f60b29
NM
476 if (!dev || !freq) {
477 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
478 return ERR_PTR(-EINVAL);
479 }
480
327854c8 481 dev_opp = _find_device_opp(dev);
e1f60b29 482 if (IS_ERR(dev_opp))
0779726c 483 return ERR_CAST(dev_opp);
e1f60b29
NM
484
485 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
486 if (temp_opp->available) {
487 /* go to the next node, before choosing prev */
488 if (temp_opp->rate > *freq)
489 break;
490 else
491 opp = temp_opp;
492 }
493 }
494 if (!IS_ERR(opp))
495 *freq = opp->rate;
496
497 return opp;
498}
5d4879cd 499EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
e1f60b29 500
06441658
VK
501/* List-dev Helpers */
502static void _kfree_list_dev_rcu(struct rcu_head *head)
503{
504 struct device_list_opp *list_dev;
505
506 list_dev = container_of(head, struct device_list_opp, rcu_head);
507 kfree_rcu(list_dev, rcu_head);
508}
509
510static void _remove_list_dev(struct device_list_opp *list_dev,
511 struct device_opp *dev_opp)
512{
513 list_del(&list_dev->node);
514 call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
515 _kfree_list_dev_rcu);
516}
517
518static struct device_list_opp *_add_list_dev(const struct device *dev,
519 struct device_opp *dev_opp)
520{
521 struct device_list_opp *list_dev;
522
523 list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
524 if (!list_dev)
525 return NULL;
526
527 /* Initialize list-dev */
528 list_dev->dev = dev;
529 list_add_rcu(&list_dev->node, &dev_opp->dev_list);
530
531 return list_dev;
532}
533
984f16c8 534/**
aa5f2f85 535 * _add_device_opp() - Find device OPP table or allocate a new one
984f16c8
NM
536 * @dev: device for which we do this operation
537 *
aa5f2f85
VK
538 * It tries to find an existing table first, if it couldn't find one, it
539 * allocates a new OPP table and returns that.
984f16c8
NM
540 *
541 * Return: valid device_opp pointer if success, else NULL.
542 */
327854c8 543static struct device_opp *_add_device_opp(struct device *dev)
07cce74a
VK
544{
545 struct device_opp *dev_opp;
06441658 546 struct device_list_opp *list_dev;
07cce74a 547
aa5f2f85
VK
548 /* Check for existing list for 'dev' first */
549 dev_opp = _find_device_opp(dev);
550 if (!IS_ERR(dev_opp))
551 return dev_opp;
552
07cce74a
VK
553 /*
554 * Allocate a new device OPP table. In the infrequent case where a new
555 * device is needed to be added, we pay this penalty.
556 */
557 dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
558 if (!dev_opp)
559 return NULL;
560
06441658
VK
561 INIT_LIST_HEAD(&dev_opp->dev_list);
562
563 list_dev = _add_list_dev(dev, dev_opp);
564 if (!list_dev) {
565 kfree(dev_opp);
566 return NULL;
567 }
568
07cce74a
VK
569 srcu_init_notifier_head(&dev_opp->srcu_head);
570 INIT_LIST_HEAD(&dev_opp->opp_list);
571
572 /* Secure the device list modification */
573 list_add_rcu(&dev_opp->node, &dev_opp_list);
574 return dev_opp;
575}
576
737002b5
VK
577/**
578 * _kfree_device_rcu() - Free device_opp RCU handler
579 * @head: RCU head
580 */
581static void _kfree_device_rcu(struct rcu_head *head)
582{
583 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
584
585 kfree_rcu(device_opp, rcu_head);
586}
587
3bac42ca
VK
588/**
589 * _remove_device_opp() - Removes a device OPP table
590 * @dev_opp: device OPP table to be removed.
591 *
592 * Removes/frees device OPP table it it doesn't contain any OPPs.
593 */
594static void _remove_device_opp(struct device_opp *dev_opp)
595{
06441658
VK
596 struct device_list_opp *list_dev;
597
3bac42ca
VK
598 if (!list_empty(&dev_opp->opp_list))
599 return;
600
06441658
VK
601 list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
602 node);
603
604 _remove_list_dev(list_dev, dev_opp);
605
606 /* dev_list must be empty now */
607 WARN_ON(!list_empty(&dev_opp->dev_list));
608
3bac42ca
VK
609 list_del_rcu(&dev_opp->node);
610 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
611 _kfree_device_rcu);
612}
613
737002b5
VK
614/**
615 * _kfree_opp_rcu() - Free OPP RCU handler
616 * @head: RCU head
617 */
618static void _kfree_opp_rcu(struct rcu_head *head)
619{
620 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
621
622 kfree_rcu(opp, rcu_head);
623}
624
625/**
626 * _opp_remove() - Remove an OPP from a table definition
627 * @dev_opp: points back to the device_opp struct this opp belongs to
628 * @opp: pointer to the OPP to remove
23dacf6d 629 * @notify: OPP_EVENT_REMOVE notification should be sent or not
737002b5
VK
630 *
631 * This function removes an opp definition from the opp list.
632 *
633 * Locking: The internal device_opp and opp structures are RCU protected.
634 * It is assumed that the caller holds required mutex for an RCU updater
635 * strategy.
636 */
637static void _opp_remove(struct device_opp *dev_opp,
23dacf6d 638 struct dev_pm_opp *opp, bool notify)
737002b5
VK
639{
640 /*
641 * Notify the changes in the availability of the operable
642 * frequency/voltage list.
643 */
23dacf6d
VK
644 if (notify)
645 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
737002b5
VK
646 list_del_rcu(&opp->node);
647 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
648
3bac42ca 649 _remove_device_opp(dev_opp);
737002b5
VK
650}
651
652/**
653 * dev_pm_opp_remove() - Remove an OPP from OPP list
654 * @dev: device for which we do this operation
655 * @freq: OPP to remove with matching 'freq'
656 *
657 * This function removes an opp from the opp list.
658 *
659 * Locking: The internal device_opp and opp structures are RCU protected.
660 * Hence this function internally uses RCU updater strategy with mutex locks
661 * to keep the integrity of the internal data structures. Callers should ensure
662 * that this function is *NOT* called under RCU protection or in contexts where
663 * mutex cannot be locked.
664 */
665void dev_pm_opp_remove(struct device *dev, unsigned long freq)
666{
667 struct dev_pm_opp *opp;
668 struct device_opp *dev_opp;
669 bool found = false;
670
671 /* Hold our list modification lock here */
672 mutex_lock(&dev_opp_list_lock);
673
674 dev_opp = _find_device_opp(dev);
675 if (IS_ERR(dev_opp))
676 goto unlock;
677
678 list_for_each_entry(opp, &dev_opp->opp_list, node) {
679 if (opp->rate == freq) {
680 found = true;
681 break;
682 }
683 }
684
685 if (!found) {
686 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
687 __func__, freq);
688 goto unlock;
689 }
690
23dacf6d 691 _opp_remove(dev_opp, opp, true);
737002b5
VK
692unlock:
693 mutex_unlock(&dev_opp_list_lock);
694}
695EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
696
23dacf6d
VK
697static struct dev_pm_opp *_allocate_opp(struct device *dev,
698 struct device_opp **dev_opp)
699{
700 struct dev_pm_opp *opp;
701
702 /* allocate new OPP node */
703 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
704 if (!opp)
705 return NULL;
706
707 INIT_LIST_HEAD(&opp->node);
708
709 *dev_opp = _add_device_opp(dev);
710 if (!*dev_opp) {
711 kfree(opp);
712 return NULL;
713 }
714
715 return opp;
716}
717
06441658
VK
718static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
719 struct device_opp *dev_opp)
23dacf6d
VK
720{
721 struct dev_pm_opp *opp;
722 struct list_head *head = &dev_opp->opp_list;
723
724 /*
725 * Insert new OPP in order of increasing frequency and discard if
726 * already present.
727 *
728 * Need to use &dev_opp->opp_list in the condition part of the 'for'
729 * loop, don't replace it with head otherwise it will become an infinite
730 * loop.
731 */
732 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
733 if (new_opp->rate > opp->rate) {
734 head = &opp->node;
735 continue;
736 }
737
738 if (new_opp->rate < opp->rate)
739 break;
740
741 /* Duplicate OPPs */
06441658 742 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
23dacf6d
VK
743 __func__, opp->rate, opp->u_volt, opp->available,
744 new_opp->rate, new_opp->u_volt, new_opp->available);
745
746 return opp->available && new_opp->u_volt == opp->u_volt ?
747 0 : -EEXIST;
748 }
749
750 new_opp->dev_opp = dev_opp;
751 list_add_rcu(&new_opp->node, head);
752
753 return 0;
754}
755
984f16c8
NM
756/**
757 * _opp_add_dynamic() - Allocate a dynamic OPP.
758 * @dev: device for which we do this operation
759 * @freq: Frequency in Hz for this OPP
760 * @u_volt: Voltage in uVolts for this OPP
761 * @dynamic: Dynamically added OPPs.
762 *
763 * This function adds an opp definition to the opp list and returns status.
764 * The opp is made available by default and it can be controlled using
765 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
766 *
767 * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and
768 * freed by of_free_opp_table.
769 *
770 * Locking: The internal device_opp and opp structures are RCU protected.
771 * Hence this function internally uses RCU updater strategy with mutex locks
772 * to keep the integrity of the internal data structures. Callers should ensure
773 * that this function is *NOT* called under RCU protection or in contexts where
774 * mutex cannot be locked.
775 *
776 * Return:
777 * 0 On success OR
778 * Duplicate OPPs (both freq and volt are same) and opp->available
779 * -EEXIST Freq are same and volt are different OR
780 * Duplicate OPPs (both freq and volt are same) and !opp->available
781 * -ENOMEM Memory allocation failure
782 */
327854c8
NM
783static int _opp_add_dynamic(struct device *dev, unsigned long freq,
784 long u_volt, bool dynamic)
e1f60b29 785{
aa5f2f85 786 struct device_opp *dev_opp;
23dacf6d 787 struct dev_pm_opp *new_opp;
6ce4184d 788 int ret;
e1f60b29 789
e1f60b29
NM
790 /* Hold our list modification lock here */
791 mutex_lock(&dev_opp_list_lock);
792
23dacf6d
VK
793 new_opp = _allocate_opp(dev, &dev_opp);
794 if (!new_opp) {
795 ret = -ENOMEM;
796 goto unlock;
797 }
798
a7470db6 799 /* populate the opp table */
a7470db6
VK
800 new_opp->rate = freq;
801 new_opp->u_volt = u_volt;
802 new_opp->available = true;
23dacf6d 803 new_opp->dynamic = dynamic;
a7470db6 804
06441658 805 ret = _opp_add(dev, new_opp, dev_opp);
23dacf6d 806 if (ret)
6ce4184d 807 goto free_opp;
64ce8545 808
e1f60b29
NM
809 mutex_unlock(&dev_opp_list_lock);
810
03ca370f
MH
811 /*
812 * Notify the changes in the availability of the operable
813 * frequency/voltage list.
814 */
cd1a068a 815 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
e1f60b29 816 return 0;
6ce4184d
VK
817
818free_opp:
23dacf6d
VK
819 _opp_remove(dev_opp, new_opp, false);
820unlock:
6ce4184d 821 mutex_unlock(&dev_opp_list_lock);
6ce4184d 822 return ret;
e1f60b29 823}
38393409 824
27465902
VK
825/* TODO: Support multiple regulators */
826static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
827{
828 u32 microvolt[3] = {0};
829 int count, ret;
830
831 count = of_property_count_u32_elems(opp->np, "opp-microvolt");
832 if (!count)
833 return 0;
834
835 /* There can be one or three elements here */
836 if (count != 1 && count != 3) {
837 dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
838 __func__, count);
839 return -EINVAL;
840 }
841
842 ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt,
843 count);
844 if (ret) {
845 dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__,
846 ret);
847 return -EINVAL;
848 }
849
850 opp->u_volt = microvolt[0];
851 opp->u_volt_min = microvolt[1];
852 opp->u_volt_max = microvolt[2];
853
854 return 0;
855}
856
857/**
858 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
859 * @dev: device for which we do this operation
860 * @np: device node
861 *
862 * This function adds an opp definition to the opp list and returns status. The
863 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
864 * removed by dev_pm_opp_remove.
865 *
866 * Locking: The internal device_opp and opp structures are RCU protected.
867 * Hence this function internally uses RCU updater strategy with mutex locks
868 * to keep the integrity of the internal data structures. Callers should ensure
869 * that this function is *NOT* called under RCU protection or in contexts where
870 * mutex cannot be locked.
871 *
872 * Return:
873 * 0 On success OR
874 * Duplicate OPPs (both freq and volt are same) and opp->available
875 * -EEXIST Freq are same and volt are different OR
876 * Duplicate OPPs (both freq and volt are same) and !opp->available
877 * -ENOMEM Memory allocation failure
878 * -EINVAL Failed parsing the OPP node
879 */
880static int _opp_add_static_v2(struct device *dev, struct device_node *np)
881{
882 struct device_opp *dev_opp;
883 struct dev_pm_opp *new_opp;
884 u64 rate;
885 int ret;
886
887 /* Hold our list modification lock here */
888 mutex_lock(&dev_opp_list_lock);
889
890 new_opp = _allocate_opp(dev, &dev_opp);
891 if (!new_opp) {
892 ret = -ENOMEM;
893 goto unlock;
894 }
895
896 ret = of_property_read_u64(np, "opp-hz", &rate);
897 if (ret < 0) {
898 dev_err(dev, "%s: opp-hz not found\n", __func__);
899 goto free_opp;
900 }
901
902 /*
903 * Rate is defined as an unsigned long in clk API, and so casting
904 * explicitly to its type. Must be fixed once rate is 64 bit
905 * guaranteed in clk API.
906 */
907 new_opp->rate = (unsigned long)rate;
908 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
909
910 new_opp->np = np;
911 new_opp->dynamic = false;
912 new_opp->available = true;
3ca9bb33
VK
913 of_property_read_u32(np, "clock-latency-ns",
914 (u32 *)&new_opp->clock_latency_ns);
27465902
VK
915
916 ret = opp_get_microvolt(new_opp, dev);
917 if (ret)
918 goto free_opp;
919
920 of_property_read_u32(np, "opp-microamp", (u32 *)&new_opp->u_amp);
921
06441658 922 ret = _opp_add(dev, new_opp, dev_opp);
27465902
VK
923 if (ret)
924 goto free_opp;
925
3ca9bb33
VK
926 if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
927 dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
928
27465902
VK
929 mutex_unlock(&dev_opp_list_lock);
930
3ca9bb33 931 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
27465902 932 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
3ca9bb33
VK
933 new_opp->u_volt_min, new_opp->u_volt_max,
934 new_opp->clock_latency_ns);
27465902
VK
935
936 /*
937 * Notify the changes in the availability of the operable
938 * frequency/voltage list.
939 */
940 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
941 return 0;
942
943free_opp:
944 _opp_remove(dev_opp, new_opp, false);
945unlock:
946 mutex_unlock(&dev_opp_list_lock);
947 return ret;
948}
949
38393409
VK
950/**
951 * dev_pm_opp_add() - Add an OPP table from a table definitions
952 * @dev: device for which we do this operation
953 * @freq: Frequency in Hz for this OPP
954 * @u_volt: Voltage in uVolts for this OPP
955 *
956 * This function adds an opp definition to the opp list and returns status.
957 * The opp is made available by default and it can be controlled using
958 * dev_pm_opp_enable/disable functions.
959 *
960 * Locking: The internal device_opp and opp structures are RCU protected.
961 * Hence this function internally uses RCU updater strategy with mutex locks
962 * to keep the integrity of the internal data structures. Callers should ensure
963 * that this function is *NOT* called under RCU protection or in contexts where
964 * mutex cannot be locked.
965 *
966 * Return:
984f16c8 967 * 0 On success OR
38393409 968 * Duplicate OPPs (both freq and volt are same) and opp->available
984f16c8 969 * -EEXIST Freq are same and volt are different OR
38393409 970 * Duplicate OPPs (both freq and volt are same) and !opp->available
984f16c8 971 * -ENOMEM Memory allocation failure
38393409
VK
972 */
973int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
974{
327854c8 975 return _opp_add_dynamic(dev, freq, u_volt, true);
38393409 976}
5d4879cd 977EXPORT_SYMBOL_GPL(dev_pm_opp_add);
e1f60b29
NM
978
979/**
327854c8 980 * _opp_set_availability() - helper to set the availability of an opp
e1f60b29
NM
981 * @dev: device for which we do this operation
982 * @freq: OPP frequency to modify availability
983 * @availability_req: availability status requested for this opp
984 *
985 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
986 * share a common logic which is isolated here.
987 *
984f16c8 988 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
e1f60b29
NM
989 * copy operation, returns 0 if no modifcation was done OR modification was
990 * successful.
991 *
992 * Locking: The internal device_opp and opp structures are RCU protected.
993 * Hence this function internally uses RCU updater strategy with mutex locks to
994 * keep the integrity of the internal data structures. Callers should ensure
995 * that this function is *NOT* called under RCU protection or in contexts where
996 * mutex locking or synchronize_rcu() blocking calls cannot be used.
997 */
327854c8
NM
998static int _opp_set_availability(struct device *dev, unsigned long freq,
999 bool availability_req)
e1f60b29 1000{
29df0ee1 1001 struct device_opp *dev_opp;
47d43ba7 1002 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
e1f60b29
NM
1003 int r = 0;
1004
1005 /* keep the node allocated */
47d43ba7 1006 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
59d84ca8 1007 if (!new_opp)
e1f60b29 1008 return -ENOMEM;
e1f60b29
NM
1009
1010 mutex_lock(&dev_opp_list_lock);
1011
1012 /* Find the device_opp */
327854c8 1013 dev_opp = _find_device_opp(dev);
e1f60b29
NM
1014 if (IS_ERR(dev_opp)) {
1015 r = PTR_ERR(dev_opp);
1016 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1017 goto unlock;
1018 }
1019
1020 /* Do we have the frequency? */
1021 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
1022 if (tmp_opp->rate == freq) {
1023 opp = tmp_opp;
1024 break;
1025 }
1026 }
1027 if (IS_ERR(opp)) {
1028 r = PTR_ERR(opp);
1029 goto unlock;
1030 }
1031
1032 /* Is update really needed? */
1033 if (opp->available == availability_req)
1034 goto unlock;
1035 /* copy the old data over */
1036 *new_opp = *opp;
1037
1038 /* plug in new node */
1039 new_opp->available = availability_req;
1040
1041 list_replace_rcu(&opp->node, &new_opp->node);
1042 mutex_unlock(&dev_opp_list_lock);
327854c8 1043 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
e1f60b29 1044
03ca370f
MH
1045 /* Notify the change of the OPP availability */
1046 if (availability_req)
cd1a068a 1047 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
03ca370f
MH
1048 new_opp);
1049 else
cd1a068a 1050 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
03ca370f
MH
1051 new_opp);
1052
dde8437d 1053 return 0;
e1f60b29
NM
1054
1055unlock:
1056 mutex_unlock(&dev_opp_list_lock);
e1f60b29
NM
1057 kfree(new_opp);
1058 return r;
1059}
1060
1061/**
5d4879cd 1062 * dev_pm_opp_enable() - Enable a specific OPP
e1f60b29
NM
1063 * @dev: device for which we do this operation
1064 * @freq: OPP frequency to enable
1065 *
1066 * Enables a provided opp. If the operation is valid, this returns 0, else the
1067 * corresponding error value. It is meant to be used for users an OPP available
5d4879cd 1068 * after being temporarily made unavailable with dev_pm_opp_disable.
e1f60b29
NM
1069 *
1070 * Locking: The internal device_opp and opp structures are RCU protected.
1071 * Hence this function indirectly uses RCU and mutex locks to keep the
1072 * integrity of the internal data structures. Callers should ensure that
1073 * this function is *NOT* called under RCU protection or in contexts where
1074 * mutex locking or synchronize_rcu() blocking calls cannot be used.
984f16c8
NM
1075 *
1076 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1077 * copy operation, returns 0 if no modifcation was done OR modification was
1078 * successful.
e1f60b29 1079 */
5d4879cd 1080int dev_pm_opp_enable(struct device *dev, unsigned long freq)
e1f60b29 1081{
327854c8 1082 return _opp_set_availability(dev, freq, true);
e1f60b29 1083}
5d4879cd 1084EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
e1f60b29
NM
1085
1086/**
5d4879cd 1087 * dev_pm_opp_disable() - Disable a specific OPP
e1f60b29
NM
1088 * @dev: device for which we do this operation
1089 * @freq: OPP frequency to disable
1090 *
1091 * Disables a provided opp. If the operation is valid, this returns
1092 * 0, else the corresponding error value. It is meant to be a temporary
1093 * control by users to make this OPP not available until the circumstances are
5d4879cd 1094 * right to make it available again (with a call to dev_pm_opp_enable).
e1f60b29
NM
1095 *
1096 * Locking: The internal device_opp and opp structures are RCU protected.
1097 * Hence this function indirectly uses RCU and mutex locks to keep the
1098 * integrity of the internal data structures. Callers should ensure that
1099 * this function is *NOT* called under RCU protection or in contexts where
1100 * mutex locking or synchronize_rcu() blocking calls cannot be used.
984f16c8
NM
1101 *
1102 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1103 * copy operation, returns 0 if no modifcation was done OR modification was
1104 * successful.
e1f60b29 1105 */
5d4879cd 1106int dev_pm_opp_disable(struct device *dev, unsigned long freq)
e1f60b29 1107{
327854c8 1108 return _opp_set_availability(dev, freq, false);
e1f60b29 1109}
5d4879cd 1110EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
e1f60b29 1111
03ca370f 1112/**
5d4879cd 1113 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
03ca370f 1114 * @dev: device pointer used to lookup device OPPs.
984f16c8
NM
1115 *
1116 * Return: pointer to notifier head if found, otherwise -ENODEV or
1117 * -EINVAL based on type of error casted as pointer. value must be checked
1118 * with IS_ERR to determine valid pointer or error result.
1119 *
1120 * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
1121 * protected pointer. The reason for the same is that the opp pointer which is
1122 * returned will remain valid for use with opp_get_{voltage, freq} only while
1123 * under the locked area. The pointer returned must be used prior to unlocking
1124 * with rcu_read_unlock() to maintain the integrity of the pointer.
03ca370f 1125 */
5d4879cd 1126struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
03ca370f 1127{
327854c8 1128 struct device_opp *dev_opp = _find_device_opp(dev);
03ca370f
MH
1129
1130 if (IS_ERR(dev_opp))
156acb16 1131 return ERR_CAST(dev_opp); /* matching type */
03ca370f 1132
cd1a068a 1133 return &dev_opp->srcu_head;
03ca370f 1134}
4679ec37 1135EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
b496dfbc
SG
1136
1137#ifdef CONFIG_OF
737002b5
VK
1138/**
1139 * of_free_opp_table() - Free OPP table entries created from static DT entries
1140 * @dev: device pointer used to lookup device OPPs.
1141 *
1142 * Free OPPs created using static entries present in DT.
1143 *
1144 * Locking: The internal device_opp and opp structures are RCU protected.
1145 * Hence this function indirectly uses RCU updater strategy with mutex locks
1146 * to keep the integrity of the internal data structures. Callers should ensure
1147 * that this function is *NOT* called under RCU protection or in contexts where
1148 * mutex cannot be locked.
1149 */
1150void of_free_opp_table(struct device *dev)
1151{
1152 struct device_opp *dev_opp;
1153 struct dev_pm_opp *opp, *tmp;
1154
06441658
VK
1155 /* Hold our list modification lock here */
1156 mutex_lock(&dev_opp_list_lock);
1157
737002b5
VK
1158 /* Check for existing list for 'dev' */
1159 dev_opp = _find_device_opp(dev);
1160 if (IS_ERR(dev_opp)) {
1161 int error = PTR_ERR(dev_opp);
1162
1163 if (error != -ENODEV)
1164 WARN(1, "%s: dev_opp: %d\n",
1165 IS_ERR_OR_NULL(dev) ?
1166 "Invalid device" : dev_name(dev),
1167 error);
06441658 1168 goto unlock;
737002b5
VK
1169 }
1170
06441658
VK
1171 /* Find if dev_opp manages a single device */
1172 if (list_is_singular(&dev_opp->dev_list)) {
1173 /* Free static OPPs */
1174 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
1175 if (!opp->dynamic)
1176 _opp_remove(dev_opp, opp, true);
1177 }
1178 } else {
1179 _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
737002b5
VK
1180 }
1181
06441658 1182unlock:
737002b5
VK
1183 mutex_unlock(&dev_opp_list_lock);
1184}
1185EXPORT_SYMBOL_GPL(of_free_opp_table);
1186
27465902
VK
1187/* Returns opp descriptor node from its phandle. Caller must do of_node_put() */
1188static struct device_node *
1189_of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop)
1190{
1191 struct device_node *opp_np;
1192
1193 opp_np = of_find_node_by_phandle(be32_to_cpup(prop->value));
1194 if (!opp_np) {
1195 dev_err(dev, "%s: Prop: %s contains invalid opp desc phandle\n",
1196 __func__, prop->name);
1197 return ERR_PTR(-EINVAL);
1198 }
1199
1200 return opp_np;
1201}
1202
1203/* Initializes OPP tables based on new bindings */
1204static int _of_init_opp_table_v2(struct device *dev,
1205 const struct property *prop)
1206{
1207 struct device_node *opp_np, *np;
06441658 1208 struct device_opp *dev_opp;
27465902
VK
1209 int ret = 0, count = 0;
1210
1211 if (!prop->value)
1212 return -ENODATA;
1213
1214 /* Get opp node */
1215 opp_np = _of_get_opp_desc_node_from_prop(dev, prop);
1216 if (IS_ERR(opp_np))
1217 return PTR_ERR(opp_np);
1218
06441658
VK
1219 dev_opp = _managed_opp(opp_np);
1220 if (dev_opp) {
1221 /* OPPs are already managed */
1222 if (!_add_list_dev(dev, dev_opp))
1223 ret = -ENOMEM;
1224 goto put_opp_np;
1225 }
1226
27465902
VK
1227 /* We have opp-list node now, iterate over it and add OPPs */
1228 for_each_available_child_of_node(opp_np, np) {
1229 count++;
1230
1231 ret = _opp_add_static_v2(dev, np);
1232 if (ret) {
1233 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1234 ret);
1235 break;
1236 }
1237 }
1238
1239 /* There should be one of more OPP defined */
1240 if (WARN_ON(!count))
1241 goto put_opp_np;
1242
06441658
VK
1243 if (!ret) {
1244 if (!dev_opp) {
1245 dev_opp = _find_device_opp(dev);
1246 if (WARN_ON(!dev_opp))
1247 goto put_opp_np;
1248 }
1249
1250 dev_opp->np = opp_np;
1251 dev_opp->shared_opp = of_property_read_bool(opp_np,
1252 "opp-shared");
1253 } else {
27465902 1254 of_free_opp_table(dev);
06441658 1255 }
27465902
VK
1256
1257put_opp_np:
1258 of_node_put(opp_np);
1259
1260 return ret;
1261}
1262
1263/* Initializes OPP tables based on old-deprecated bindings */
1264static int _of_init_opp_table_v1(struct device *dev)
b496dfbc
SG
1265{
1266 const struct property *prop;
1267 const __be32 *val;
1268 int nr;
1269
1270 prop = of_find_property(dev->of_node, "operating-points", NULL);
1271 if (!prop)
1272 return -ENODEV;
1273 if (!prop->value)
1274 return -ENODATA;
1275
1276 /*
1277 * Each OPP is a set of tuples consisting of frequency and
1278 * voltage like <freq-kHz vol-uV>.
1279 */
1280 nr = prop->length / sizeof(u32);
1281 if (nr % 2) {
1282 dev_err(dev, "%s: Invalid OPP list\n", __func__);
1283 return -EINVAL;
1284 }
1285
1286 val = prop->value;
1287 while (nr) {
1288 unsigned long freq = be32_to_cpup(val++) * 1000;
1289 unsigned long volt = be32_to_cpup(val++);
1290
327854c8 1291 if (_opp_add_dynamic(dev, freq, volt, false))
b496dfbc
SG
1292 dev_warn(dev, "%s: Failed to add OPP %ld\n",
1293 __func__, freq);
b496dfbc
SG
1294 nr -= 2;
1295 }
1296
1297 return 0;
1298}
27465902
VK
1299
1300/**
1301 * of_init_opp_table() - Initialize opp table from device tree
1302 * @dev: device pointer used to lookup device OPPs.
1303 *
1304 * Register the initial OPP table with the OPP library for given device.
1305 *
1306 * Locking: The internal device_opp and opp structures are RCU protected.
1307 * Hence this function indirectly uses RCU updater strategy with mutex locks
1308 * to keep the integrity of the internal data structures. Callers should ensure
1309 * that this function is *NOT* called under RCU protection or in contexts where
1310 * mutex cannot be locked.
1311 *
1312 * Return:
1313 * 0 On success OR
1314 * Duplicate OPPs (both freq and volt are same) and opp->available
1315 * -EEXIST Freq are same and volt are different OR
1316 * Duplicate OPPs (both freq and volt are same) and !opp->available
1317 * -ENOMEM Memory allocation failure
1318 * -ENODEV when 'operating-points' property is not found or is invalid data
1319 * in device node.
1320 * -ENODATA when empty 'operating-points' property is found
1321 * -EINVAL when invalid entries are found in opp-v2 table
1322 */
1323int of_init_opp_table(struct device *dev)
1324{
1325 const struct property *prop;
1326
1327 /*
1328 * OPPs have two version of bindings now. The older one is deprecated,
1329 * try for the new binding first.
1330 */
1331 prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
1332 if (!prop) {
1333 /*
1334 * Try old-deprecated bindings for backward compatibility with
1335 * older dtbs.
1336 */
1337 return _of_init_opp_table_v1(dev);
1338 }
1339
1340 return _of_init_opp_table_v2(dev, prop);
1341}
74c46c6e 1342EXPORT_SYMBOL_GPL(of_init_opp_table);
b496dfbc 1343#endif
This page took 0.439832 seconds and 5 git commands to generate.