433b60092972d56abba55897158d6c22156cf631
[deliverable/linux.git] / drivers / base / power / opp / core.c
1 /*
2 * Generic OPP Interface
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/clk.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/device.h>
21 #include <linux/of.h>
22 #include <linux/export.h>
23 #include <linux/regulator/consumer.h>
24
25 #include "opp.h"
26
27 /*
28 * The root of the list of all opp-tables. All opp_table structures branch off
29 * from here, with each opp_table containing the list of opps it supports in
30 * various states of availability.
31 */
32 static LIST_HEAD(opp_tables);
33 /* Lock to allow exclusive modification to the device and opp lists */
34 DEFINE_MUTEX(opp_table_lock);
35
36 #define opp_rcu_lockdep_assert() \
37 do { \
38 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
39 !lockdep_is_held(&opp_table_lock), \
40 "Missing rcu_read_lock() or " \
41 "opp_table_lock protection"); \
42 } while (0)
43
44 static struct opp_device *_find_opp_dev(const struct device *dev,
45 struct opp_table *opp_table)
46 {
47 struct opp_device *opp_dev;
48
49 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
50 if (opp_dev->dev == dev)
51 return opp_dev;
52
53 return NULL;
54 }
55
56 static struct opp_table *_managed_opp(const struct device_node *np)
57 {
58 struct opp_table *opp_table;
59
60 list_for_each_entry_rcu(opp_table, &opp_tables, node) {
61 if (opp_table->np == np) {
62 /*
63 * Multiple devices can point to the same OPP table and
64 * so will have same node-pointer, np.
65 *
66 * But the OPPs will be considered as shared only if the
67 * OPP table contains a "opp-shared" property.
68 */
69 return opp_table->shared_opp ? opp_table : NULL;
70 }
71 }
72
73 return NULL;
74 }
75
76 /**
77 * _find_opp_table() - find opp_table struct using device pointer
78 * @dev: device pointer used to lookup OPP table
79 *
80 * Search OPP table for one containing matching device. Does a RCU reader
81 * operation to grab the pointer needed.
82 *
83 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
84 * -EINVAL based on type of error.
85 *
86 * Locking: For readers, this function must be called under rcu_read_lock().
87 * opp_table is a RCU protected pointer, which means that opp_table is valid
88 * as long as we are under RCU lock.
89 *
90 * For Writers, this function must be called with opp_table_lock held.
91 */
92 struct opp_table *_find_opp_table(struct device *dev)
93 {
94 struct opp_table *opp_table;
95
96 opp_rcu_lockdep_assert();
97
98 if (IS_ERR_OR_NULL(dev)) {
99 pr_err("%s: Invalid parameters\n", __func__);
100 return ERR_PTR(-EINVAL);
101 }
102
103 list_for_each_entry_rcu(opp_table, &opp_tables, node)
104 if (_find_opp_dev(dev, opp_table))
105 return opp_table;
106
107 return ERR_PTR(-ENODEV);
108 }
109
110 /**
111 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
112 * @opp: opp for which voltage has to be returned for
113 *
114 * Return: voltage in micro volt corresponding to the opp, else
115 * return 0
116 *
117 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
118 * protected pointer. This means that opp which could have been fetched by
119 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
120 * under RCU lock. The pointer returned by the opp_find_freq family must be
121 * used in the same section as the usage of this function with the pointer
122 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
123 * pointer.
124 */
125 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
126 {
127 struct dev_pm_opp *tmp_opp;
128 unsigned long v = 0;
129
130 opp_rcu_lockdep_assert();
131
132 tmp_opp = rcu_dereference(opp);
133 if (IS_ERR_OR_NULL(tmp_opp))
134 pr_err("%s: Invalid parameters\n", __func__);
135 else
136 v = tmp_opp->u_volt;
137
138 return v;
139 }
140 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
141
142 /**
143 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
144 * @opp: opp for which frequency has to be returned for
145 *
146 * Return: frequency in hertz corresponding to the opp, else
147 * return 0
148 *
149 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
150 * protected pointer. This means that opp which could have been fetched by
151 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
152 * under RCU lock. The pointer returned by the opp_find_freq family must be
153 * used in the same section as the usage of this function with the pointer
154 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
155 * pointer.
156 */
157 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
158 {
159 struct dev_pm_opp *tmp_opp;
160 unsigned long f = 0;
161
162 opp_rcu_lockdep_assert();
163
164 tmp_opp = rcu_dereference(opp);
165 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
166 pr_err("%s: Invalid parameters\n", __func__);
167 else
168 f = tmp_opp->rate;
169
170 return f;
171 }
172 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
173
174 /**
175 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
176 * @opp: opp for which turbo mode is being verified
177 *
178 * Turbo OPPs are not for normal use, and can be enabled (under certain
179 * conditions) for short duration of times to finish high throughput work
180 * quickly. Running on them for longer times may overheat the chip.
181 *
182 * Return: true if opp is turbo opp, else false.
183 *
184 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
185 * protected pointer. This means that opp which could have been fetched by
186 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
187 * under RCU lock. The pointer returned by the opp_find_freq family must be
188 * used in the same section as the usage of this function with the pointer
189 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
190 * pointer.
191 */
192 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
193 {
194 struct dev_pm_opp *tmp_opp;
195
196 opp_rcu_lockdep_assert();
197
198 tmp_opp = rcu_dereference(opp);
199 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
200 pr_err("%s: Invalid parameters\n", __func__);
201 return false;
202 }
203
204 return tmp_opp->turbo;
205 }
206 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
207
208 /**
209 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
210 * @dev: device for which we do this operation
211 *
212 * Return: This function returns the max clock latency in nanoseconds.
213 *
214 * Locking: This function takes rcu_read_lock().
215 */
216 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
217 {
218 struct opp_table *opp_table;
219 unsigned long clock_latency_ns;
220
221 rcu_read_lock();
222
223 opp_table = _find_opp_table(dev);
224 if (IS_ERR(opp_table))
225 clock_latency_ns = 0;
226 else
227 clock_latency_ns = opp_table->clock_latency_ns_max;
228
229 rcu_read_unlock();
230 return clock_latency_ns;
231 }
232 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
233
234 /**
235 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
236 * @dev: device for which we do this operation
237 *
238 * Return: This function returns the max voltage latency in nanoseconds.
239 *
240 * Locking: This function takes rcu_read_lock().
241 */
242 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
243 {
244 struct opp_table *opp_table;
245 struct dev_pm_opp *opp;
246 struct regulator *reg;
247 unsigned long latency_ns = 0;
248 unsigned long min_uV = ~0, max_uV = 0;
249 int ret;
250
251 rcu_read_lock();
252
253 opp_table = _find_opp_table(dev);
254 if (IS_ERR(opp_table)) {
255 rcu_read_unlock();
256 return 0;
257 }
258
259 reg = opp_table->regulator;
260 if (IS_ERR(reg)) {
261 /* Regulator may not be required for device */
262 if (reg)
263 dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
264 PTR_ERR(reg));
265 rcu_read_unlock();
266 return 0;
267 }
268
269 list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
270 if (!opp->available)
271 continue;
272
273 if (opp->u_volt_min < min_uV)
274 min_uV = opp->u_volt_min;
275 if (opp->u_volt_max > max_uV)
276 max_uV = opp->u_volt_max;
277 }
278
279 rcu_read_unlock();
280
281 /*
282 * The caller needs to ensure that opp_table (and hence the regulator)
283 * isn't freed, while we are executing this routine.
284 */
285 ret = regulator_set_voltage_time(reg, min_uV, max_uV);
286 if (ret > 0)
287 latency_ns = ret * 1000;
288
289 return latency_ns;
290 }
291 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
292
293 /**
294 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
295 * nanoseconds
296 * @dev: device for which we do this operation
297 *
298 * Return: This function returns the max transition latency, in nanoseconds, to
299 * switch from one OPP to other.
300 *
301 * Locking: This function takes rcu_read_lock().
302 */
303 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
304 {
305 return dev_pm_opp_get_max_volt_latency(dev) +
306 dev_pm_opp_get_max_clock_latency(dev);
307 }
308 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
309
310 /**
311 * dev_pm_opp_get_suspend_opp() - Get suspend opp
312 * @dev: device for which we do this operation
313 *
314 * Return: This function returns pointer to the suspend opp if it is
315 * defined and available, otherwise it returns NULL.
316 *
317 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
318 * protected pointer. The reason for the same is that the opp pointer which is
319 * returned will remain valid for use with opp_get_{voltage, freq} only while
320 * under the locked area. The pointer returned must be used prior to unlocking
321 * with rcu_read_unlock() to maintain the integrity of the pointer.
322 */
323 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
324 {
325 struct opp_table *opp_table;
326
327 opp_rcu_lockdep_assert();
328
329 opp_table = _find_opp_table(dev);
330 if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
331 !opp_table->suspend_opp->available)
332 return NULL;
333
334 return opp_table->suspend_opp;
335 }
336 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
337
338 /**
339 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
340 * @dev: device for which we do this operation
341 *
342 * Return: This function returns the number of available opps if there are any,
343 * else returns 0 if none or the corresponding error value.
344 *
345 * Locking: This function takes rcu_read_lock().
346 */
347 int dev_pm_opp_get_opp_count(struct device *dev)
348 {
349 struct opp_table *opp_table;
350 struct dev_pm_opp *temp_opp;
351 int count = 0;
352
353 rcu_read_lock();
354
355 opp_table = _find_opp_table(dev);
356 if (IS_ERR(opp_table)) {
357 count = PTR_ERR(opp_table);
358 dev_err(dev, "%s: OPP table not found (%d)\n",
359 __func__, count);
360 goto out_unlock;
361 }
362
363 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
364 if (temp_opp->available)
365 count++;
366 }
367
368 out_unlock:
369 rcu_read_unlock();
370 return count;
371 }
372 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
373
374 /**
375 * dev_pm_opp_find_freq_exact() - search for an exact frequency
376 * @dev: device for which we do this operation
377 * @freq: frequency to search for
378 * @available: true/false - match for available opp
379 *
380 * Return: Searches for exact match in the opp table and returns pointer to the
381 * matching opp if found, else returns ERR_PTR in case of error and should
382 * be handled using IS_ERR. Error return values can be:
383 * EINVAL: for bad pointer
384 * ERANGE: no match found for search
385 * ENODEV: if device not found in list of registered devices
386 *
387 * Note: available is a modifier for the search. if available=true, then the
388 * match is for exact matching frequency and is available in the stored OPP
389 * table. if false, the match is for exact frequency which is not available.
390 *
391 * This provides a mechanism to enable an opp which is not available currently
392 * or the opposite as well.
393 *
394 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
395 * protected pointer. The reason for the same is that the opp pointer which is
396 * returned will remain valid for use with opp_get_{voltage, freq} only while
397 * under the locked area. The pointer returned must be used prior to unlocking
398 * with rcu_read_unlock() to maintain the integrity of the pointer.
399 */
400 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
401 unsigned long freq,
402 bool available)
403 {
404 struct opp_table *opp_table;
405 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
406
407 opp_rcu_lockdep_assert();
408
409 opp_table = _find_opp_table(dev);
410 if (IS_ERR(opp_table)) {
411 int r = PTR_ERR(opp_table);
412
413 dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
414 return ERR_PTR(r);
415 }
416
417 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
418 if (temp_opp->available == available &&
419 temp_opp->rate == freq) {
420 opp = temp_opp;
421 break;
422 }
423 }
424
425 return opp;
426 }
427 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
428
429 /**
430 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
431 * @dev: device for which we do this operation
432 * @freq: Start frequency
433 *
434 * Search for the matching ceil *available* OPP from a starting freq
435 * for a device.
436 *
437 * Return: matching *opp and refreshes *freq accordingly, else returns
438 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
439 * values can be:
440 * EINVAL: for bad pointer
441 * ERANGE: no match found for search
442 * ENODEV: if device not found in list of registered devices
443 *
444 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
445 * protected pointer. The reason for the same is that the opp pointer which is
446 * returned will remain valid for use with opp_get_{voltage, freq} only while
447 * under the locked area. The pointer returned must be used prior to unlocking
448 * with rcu_read_unlock() to maintain the integrity of the pointer.
449 */
450 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
451 unsigned long *freq)
452 {
453 struct opp_table *opp_table;
454 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
455
456 opp_rcu_lockdep_assert();
457
458 if (!dev || !freq) {
459 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
460 return ERR_PTR(-EINVAL);
461 }
462
463 opp_table = _find_opp_table(dev);
464 if (IS_ERR(opp_table))
465 return ERR_CAST(opp_table);
466
467 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
468 if (temp_opp->available && temp_opp->rate >= *freq) {
469 opp = temp_opp;
470 *freq = opp->rate;
471 break;
472 }
473 }
474
475 return opp;
476 }
477 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
478
479 /**
480 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
481 * @dev: device for which we do this operation
482 * @freq: Start frequency
483 *
484 * Search for the matching floor *available* OPP from a starting freq
485 * for a device.
486 *
487 * Return: matching *opp and refreshes *freq accordingly, else returns
488 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
489 * values can be:
490 * EINVAL: for bad pointer
491 * ERANGE: no match found for search
492 * ENODEV: if device not found in list of registered devices
493 *
494 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
495 * protected pointer. The reason for the same is that the opp pointer which is
496 * returned will remain valid for use with opp_get_{voltage, freq} only while
497 * under the locked area. The pointer returned must be used prior to unlocking
498 * with rcu_read_unlock() to maintain the integrity of the pointer.
499 */
500 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
501 unsigned long *freq)
502 {
503 struct opp_table *opp_table;
504 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
505
506 opp_rcu_lockdep_assert();
507
508 if (!dev || !freq) {
509 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
510 return ERR_PTR(-EINVAL);
511 }
512
513 opp_table = _find_opp_table(dev);
514 if (IS_ERR(opp_table))
515 return ERR_CAST(opp_table);
516
517 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
518 if (temp_opp->available) {
519 /* go to the next node, before choosing prev */
520 if (temp_opp->rate > *freq)
521 break;
522 else
523 opp = temp_opp;
524 }
525 }
526 if (!IS_ERR(opp))
527 *freq = opp->rate;
528
529 return opp;
530 }
531 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
532
533 /*
534 * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
535 * while clk returned here is used.
536 */
537 static struct clk *_get_opp_clk(struct device *dev)
538 {
539 struct opp_table *opp_table;
540 struct clk *clk;
541
542 rcu_read_lock();
543
544 opp_table = _find_opp_table(dev);
545 if (IS_ERR(opp_table)) {
546 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
547 clk = ERR_CAST(opp_table);
548 goto unlock;
549 }
550
551 clk = opp_table->clk;
552 if (IS_ERR(clk))
553 dev_err(dev, "%s: No clock available for the device\n",
554 __func__);
555
556 unlock:
557 rcu_read_unlock();
558 return clk;
559 }
560
561 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
562 unsigned long u_volt, unsigned long u_volt_min,
563 unsigned long u_volt_max)
564 {
565 int ret;
566
567 /* Regulator not available for device */
568 if (IS_ERR(reg)) {
569 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
570 PTR_ERR(reg));
571 return 0;
572 }
573
574 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
575 u_volt, u_volt_max);
576
577 ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
578 u_volt_max);
579 if (ret)
580 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
581 __func__, u_volt_min, u_volt, u_volt_max, ret);
582
583 return ret;
584 }
585
586 /**
587 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
588 * @dev: device for which we do this operation
589 * @target_freq: frequency to achieve
590 *
591 * This configures the power-supplies and clock source to the levels specified
592 * by the OPP corresponding to the target_freq.
593 *
594 * Locking: This function takes rcu_read_lock().
595 */
596 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
597 {
598 struct opp_table *opp_table;
599 struct dev_pm_opp *old_opp, *opp;
600 struct regulator *reg;
601 struct clk *clk;
602 unsigned long freq, old_freq;
603 unsigned long u_volt, u_volt_min, u_volt_max;
604 unsigned long ou_volt, ou_volt_min, ou_volt_max;
605 int ret;
606
607 if (unlikely(!target_freq)) {
608 dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
609 target_freq);
610 return -EINVAL;
611 }
612
613 clk = _get_opp_clk(dev);
614 if (IS_ERR(clk))
615 return PTR_ERR(clk);
616
617 freq = clk_round_rate(clk, target_freq);
618 if ((long)freq <= 0)
619 freq = target_freq;
620
621 old_freq = clk_get_rate(clk);
622
623 /* Return early if nothing to do */
624 if (old_freq == freq) {
625 dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
626 __func__, freq);
627 return 0;
628 }
629
630 rcu_read_lock();
631
632 opp_table = _find_opp_table(dev);
633 if (IS_ERR(opp_table)) {
634 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
635 rcu_read_unlock();
636 return PTR_ERR(opp_table);
637 }
638
639 old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
640 if (!IS_ERR(old_opp)) {
641 ou_volt = old_opp->u_volt;
642 ou_volt_min = old_opp->u_volt_min;
643 ou_volt_max = old_opp->u_volt_max;
644 } else {
645 dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
646 __func__, old_freq, PTR_ERR(old_opp));
647 }
648
649 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
650 if (IS_ERR(opp)) {
651 ret = PTR_ERR(opp);
652 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
653 __func__, freq, ret);
654 rcu_read_unlock();
655 return ret;
656 }
657
658 u_volt = opp->u_volt;
659 u_volt_min = opp->u_volt_min;
660 u_volt_max = opp->u_volt_max;
661
662 reg = opp_table->regulator;
663
664 rcu_read_unlock();
665
666 /* Scaling up? Scale voltage before frequency */
667 if (freq > old_freq) {
668 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
669 u_volt_max);
670 if (ret)
671 goto restore_voltage;
672 }
673
674 /* Change frequency */
675
676 dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
677 __func__, old_freq, freq);
678
679 ret = clk_set_rate(clk, freq);
680 if (ret) {
681 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
682 ret);
683 goto restore_voltage;
684 }
685
686 /* Scaling down? Scale voltage after frequency */
687 if (freq < old_freq) {
688 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
689 u_volt_max);
690 if (ret)
691 goto restore_freq;
692 }
693
694 return 0;
695
696 restore_freq:
697 if (clk_set_rate(clk, old_freq))
698 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
699 __func__, old_freq);
700 restore_voltage:
701 /* This shouldn't harm even if the voltages weren't updated earlier */
702 if (!IS_ERR(old_opp))
703 _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
704
705 return ret;
706 }
707 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
708
709 /* OPP-dev Helpers */
710 static void _kfree_opp_dev_rcu(struct rcu_head *head)
711 {
712 struct opp_device *opp_dev;
713
714 opp_dev = container_of(head, struct opp_device, rcu_head);
715 kfree_rcu(opp_dev, rcu_head);
716 }
717
718 static void _remove_opp_dev(struct opp_device *opp_dev,
719 struct opp_table *opp_table)
720 {
721 opp_debug_unregister(opp_dev, opp_table);
722 list_del(&opp_dev->node);
723 call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
724 _kfree_opp_dev_rcu);
725 }
726
727 struct opp_device *_add_opp_dev(const struct device *dev,
728 struct opp_table *opp_table)
729 {
730 struct opp_device *opp_dev;
731 int ret;
732
733 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
734 if (!opp_dev)
735 return NULL;
736
737 /* Initialize opp-dev */
738 opp_dev->dev = dev;
739 list_add_rcu(&opp_dev->node, &opp_table->dev_list);
740
741 /* Create debugfs entries for the opp_table */
742 ret = opp_debug_register(opp_dev, opp_table);
743 if (ret)
744 dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
745 __func__, ret);
746
747 return opp_dev;
748 }
749
750 /**
751 * _add_opp_table() - Find OPP table or allocate a new one
752 * @dev: device for which we do this operation
753 *
754 * It tries to find an existing table first, if it couldn't find one, it
755 * allocates a new OPP table and returns that.
756 *
757 * Return: valid opp_table pointer if success, else NULL.
758 */
759 static struct opp_table *_add_opp_table(struct device *dev)
760 {
761 struct opp_table *opp_table;
762 struct opp_device *opp_dev;
763 struct device_node *np;
764 int ret;
765
766 /* Check for existing table for 'dev' first */
767 opp_table = _find_opp_table(dev);
768 if (!IS_ERR(opp_table))
769 return opp_table;
770
771 /*
772 * Allocate a new OPP table. In the infrequent case where a new
773 * device is needed to be added, we pay this penalty.
774 */
775 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
776 if (!opp_table)
777 return NULL;
778
779 INIT_LIST_HEAD(&opp_table->dev_list);
780
781 opp_dev = _add_opp_dev(dev, opp_table);
782 if (!opp_dev) {
783 kfree(opp_table);
784 return NULL;
785 }
786
787 /*
788 * Only required for backward compatibility with v1 bindings, but isn't
789 * harmful for other cases. And so we do it unconditionally.
790 */
791 np = of_node_get(dev->of_node);
792 if (np) {
793 u32 val;
794
795 if (!of_property_read_u32(np, "clock-latency", &val))
796 opp_table->clock_latency_ns_max = val;
797 of_property_read_u32(np, "voltage-tolerance",
798 &opp_table->voltage_tolerance_v1);
799 of_node_put(np);
800 }
801
802 /* Set regulator to a non-NULL error value */
803 opp_table->regulator = ERR_PTR(-ENXIO);
804
805 /* Find clk for the device */
806 opp_table->clk = clk_get(dev, NULL);
807 if (IS_ERR(opp_table->clk)) {
808 ret = PTR_ERR(opp_table->clk);
809 if (ret != -EPROBE_DEFER)
810 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
811 ret);
812 }
813
814 srcu_init_notifier_head(&opp_table->srcu_head);
815 INIT_LIST_HEAD(&opp_table->opp_list);
816
817 /* Secure the device table modification */
818 list_add_rcu(&opp_table->node, &opp_tables);
819 return opp_table;
820 }
821
822 /**
823 * _kfree_device_rcu() - Free opp_table RCU handler
824 * @head: RCU head
825 */
826 static void _kfree_device_rcu(struct rcu_head *head)
827 {
828 struct opp_table *opp_table = container_of(head, struct opp_table,
829 rcu_head);
830
831 kfree_rcu(opp_table, rcu_head);
832 }
833
834 /**
835 * _remove_opp_table() - Removes a OPP table
836 * @opp_table: OPP table to be removed.
837 *
838 * Removes/frees OPP table if it doesn't contain any OPPs.
839 */
840 static void _remove_opp_table(struct opp_table *opp_table)
841 {
842 struct opp_device *opp_dev;
843
844 if (!list_empty(&opp_table->opp_list))
845 return;
846
847 if (opp_table->supported_hw)
848 return;
849
850 if (opp_table->prop_name)
851 return;
852
853 if (!IS_ERR(opp_table->regulator))
854 return;
855
856 /* Release clk */
857 if (!IS_ERR(opp_table->clk))
858 clk_put(opp_table->clk);
859
860 opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
861 node);
862
863 _remove_opp_dev(opp_dev, opp_table);
864
865 /* dev_list must be empty now */
866 WARN_ON(!list_empty(&opp_table->dev_list));
867
868 list_del_rcu(&opp_table->node);
869 call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
870 _kfree_device_rcu);
871 }
872
873 /**
874 * _kfree_opp_rcu() - Free OPP RCU handler
875 * @head: RCU head
876 */
877 static void _kfree_opp_rcu(struct rcu_head *head)
878 {
879 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
880
881 kfree_rcu(opp, rcu_head);
882 }
883
884 /**
885 * _opp_remove() - Remove an OPP from a table definition
886 * @opp_table: points back to the opp_table struct this opp belongs to
887 * @opp: pointer to the OPP to remove
888 * @notify: OPP_EVENT_REMOVE notification should be sent or not
889 *
890 * This function removes an opp definition from the opp table.
891 *
892 * Locking: The internal opp_table and opp structures are RCU protected.
893 * It is assumed that the caller holds required mutex for an RCU updater
894 * strategy.
895 */
896 static void _opp_remove(struct opp_table *opp_table,
897 struct dev_pm_opp *opp, bool notify)
898 {
899 /*
900 * Notify the changes in the availability of the operable
901 * frequency/voltage list.
902 */
903 if (notify)
904 srcu_notifier_call_chain(&opp_table->srcu_head,
905 OPP_EVENT_REMOVE, opp);
906 opp_debug_remove_one(opp);
907 list_del_rcu(&opp->node);
908 call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
909
910 _remove_opp_table(opp_table);
911 }
912
913 /**
914 * dev_pm_opp_remove() - Remove an OPP from OPP table
915 * @dev: device for which we do this operation
916 * @freq: OPP to remove with matching 'freq'
917 *
918 * This function removes an opp from the opp table.
919 *
920 * Locking: The internal opp_table and opp structures are RCU protected.
921 * Hence this function internally uses RCU updater strategy with mutex locks
922 * to keep the integrity of the internal data structures. Callers should ensure
923 * that this function is *NOT* called under RCU protection or in contexts where
924 * mutex cannot be locked.
925 */
926 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
927 {
928 struct dev_pm_opp *opp;
929 struct opp_table *opp_table;
930 bool found = false;
931
932 /* Hold our table modification lock here */
933 mutex_lock(&opp_table_lock);
934
935 opp_table = _find_opp_table(dev);
936 if (IS_ERR(opp_table))
937 goto unlock;
938
939 list_for_each_entry(opp, &opp_table->opp_list, node) {
940 if (opp->rate == freq) {
941 found = true;
942 break;
943 }
944 }
945
946 if (!found) {
947 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
948 __func__, freq);
949 goto unlock;
950 }
951
952 _opp_remove(opp_table, opp, true);
953 unlock:
954 mutex_unlock(&opp_table_lock);
955 }
956 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
957
958 static struct dev_pm_opp *_allocate_opp(struct device *dev,
959 struct opp_table **opp_table)
960 {
961 struct dev_pm_opp *opp;
962
963 /* allocate new OPP node */
964 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
965 if (!opp)
966 return NULL;
967
968 INIT_LIST_HEAD(&opp->node);
969
970 *opp_table = _add_opp_table(dev);
971 if (!*opp_table) {
972 kfree(opp);
973 return NULL;
974 }
975
976 return opp;
977 }
978
979 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
980 struct opp_table *opp_table)
981 {
982 struct regulator *reg = opp_table->regulator;
983
984 if (!IS_ERR(reg) &&
985 !regulator_is_supported_voltage(reg, opp->u_volt_min,
986 opp->u_volt_max)) {
987 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
988 __func__, opp->u_volt_min, opp->u_volt_max);
989 return false;
990 }
991
992 return true;
993 }
994
995 static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
996 struct opp_table *opp_table)
997 {
998 struct dev_pm_opp *opp;
999 struct list_head *head = &opp_table->opp_list;
1000 int ret;
1001
1002 /*
1003 * Insert new OPP in order of increasing frequency and discard if
1004 * already present.
1005 *
1006 * Need to use &opp_table->opp_list in the condition part of the 'for'
1007 * loop, don't replace it with head otherwise it will become an infinite
1008 * loop.
1009 */
1010 list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
1011 if (new_opp->rate > opp->rate) {
1012 head = &opp->node;
1013 continue;
1014 }
1015
1016 if (new_opp->rate < opp->rate)
1017 break;
1018
1019 /* Duplicate OPPs */
1020 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
1021 __func__, opp->rate, opp->u_volt, opp->available,
1022 new_opp->rate, new_opp->u_volt, new_opp->available);
1023
1024 return opp->available && new_opp->u_volt == opp->u_volt ?
1025 0 : -EEXIST;
1026 }
1027
1028 new_opp->opp_table = opp_table;
1029 list_add_rcu(&new_opp->node, head);
1030
1031 ret = opp_debug_create_one(new_opp, opp_table);
1032 if (ret)
1033 dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
1034 __func__, ret);
1035
1036 if (!_opp_supported_by_regulators(new_opp, opp_table)) {
1037 new_opp->available = false;
1038 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1039 __func__, new_opp->rate);
1040 }
1041
1042 return 0;
1043 }
1044
1045 /**
1046 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1047 * @dev: device for which we do this operation
1048 * @freq: Frequency in Hz for this OPP
1049 * @u_volt: Voltage in uVolts for this OPP
1050 * @dynamic: Dynamically added OPPs.
1051 *
1052 * This function adds an opp definition to the opp table and returns status.
1053 * The opp is made available by default and it can be controlled using
1054 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1055 *
1056 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1057 * and freed by dev_pm_opp_of_remove_table.
1058 *
1059 * Locking: The internal opp_table and opp structures are RCU protected.
1060 * Hence this function internally uses RCU updater strategy with mutex locks
1061 * to keep the integrity of the internal data structures. Callers should ensure
1062 * that this function is *NOT* called under RCU protection or in contexts where
1063 * mutex cannot be locked.
1064 *
1065 * Return:
1066 * 0 On success OR
1067 * Duplicate OPPs (both freq and volt are same) and opp->available
1068 * -EEXIST Freq are same and volt are different OR
1069 * Duplicate OPPs (both freq and volt are same) and !opp->available
1070 * -ENOMEM Memory allocation failure
1071 */
1072 static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
1073 bool dynamic)
1074 {
1075 struct opp_table *opp_table;
1076 struct dev_pm_opp *new_opp;
1077 unsigned long tol;
1078 int ret;
1079
1080 /* Hold our table modification lock here */
1081 mutex_lock(&opp_table_lock);
1082
1083 new_opp = _allocate_opp(dev, &opp_table);
1084 if (!new_opp) {
1085 ret = -ENOMEM;
1086 goto unlock;
1087 }
1088
1089 /* populate the opp table */
1090 new_opp->rate = freq;
1091 tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
1092 new_opp->u_volt = u_volt;
1093 new_opp->u_volt_min = u_volt - tol;
1094 new_opp->u_volt_max = u_volt + tol;
1095 new_opp->available = true;
1096 new_opp->dynamic = dynamic;
1097
1098 ret = _opp_add(dev, new_opp, opp_table);
1099 if (ret)
1100 goto free_opp;
1101
1102 mutex_unlock(&opp_table_lock);
1103
1104 /*
1105 * Notify the changes in the availability of the operable
1106 * frequency/voltage list.
1107 */
1108 srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
1109 return 0;
1110
1111 free_opp:
1112 _opp_remove(opp_table, new_opp, false);
1113 unlock:
1114 mutex_unlock(&opp_table_lock);
1115 return ret;
1116 }
1117
1118 /* TODO: Support multiple regulators */
1119 static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
1120 struct opp_table *opp_table)
1121 {
1122 u32 microvolt[3] = {0};
1123 u32 val;
1124 int count, ret;
1125 struct property *prop = NULL;
1126 char name[NAME_MAX];
1127
1128 /* Search for "opp-microvolt-<name>" */
1129 if (opp_table->prop_name) {
1130 snprintf(name, sizeof(name), "opp-microvolt-%s",
1131 opp_table->prop_name);
1132 prop = of_find_property(opp->np, name, NULL);
1133 }
1134
1135 if (!prop) {
1136 /* Search for "opp-microvolt" */
1137 sprintf(name, "opp-microvolt");
1138 prop = of_find_property(opp->np, name, NULL);
1139
1140 /* Missing property isn't a problem, but an invalid entry is */
1141 if (!prop)
1142 return 0;
1143 }
1144
1145 count = of_property_count_u32_elems(opp->np, name);
1146 if (count < 0) {
1147 dev_err(dev, "%s: Invalid %s property (%d)\n",
1148 __func__, name, count);
1149 return count;
1150 }
1151
1152 /* There can be one or three elements here */
1153 if (count != 1 && count != 3) {
1154 dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
1155 __func__, name, count);
1156 return -EINVAL;
1157 }
1158
1159 ret = of_property_read_u32_array(opp->np, name, microvolt, count);
1160 if (ret) {
1161 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
1162 return -EINVAL;
1163 }
1164
1165 opp->u_volt = microvolt[0];
1166
1167 if (count == 1) {
1168 opp->u_volt_min = opp->u_volt;
1169 opp->u_volt_max = opp->u_volt;
1170 } else {
1171 opp->u_volt_min = microvolt[1];
1172 opp->u_volt_max = microvolt[2];
1173 }
1174
1175 /* Search for "opp-microamp-<name>" */
1176 prop = NULL;
1177 if (opp_table->prop_name) {
1178 snprintf(name, sizeof(name), "opp-microamp-%s",
1179 opp_table->prop_name);
1180 prop = of_find_property(opp->np, name, NULL);
1181 }
1182
1183 if (!prop) {
1184 /* Search for "opp-microamp" */
1185 sprintf(name, "opp-microamp");
1186 prop = of_find_property(opp->np, name, NULL);
1187 }
1188
1189 if (prop && !of_property_read_u32(opp->np, name, &val))
1190 opp->u_amp = val;
1191
1192 return 0;
1193 }
1194
1195 /**
1196 * dev_pm_opp_set_supported_hw() - Set supported platforms
1197 * @dev: Device for which supported-hw has to be set.
1198 * @versions: Array of hierarchy of versions to match.
1199 * @count: Number of elements in the array.
1200 *
1201 * This is required only for the V2 bindings, and it enables a platform to
1202 * specify the hierarchy of versions it supports. OPP layer will then enable
1203 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1204 * property.
1205 *
1206 * Locking: The internal opp_table and opp structures are RCU protected.
1207 * Hence this function internally uses RCU updater strategy with mutex locks
1208 * to keep the integrity of the internal data structures. Callers should ensure
1209 * that this function is *NOT* called under RCU protection or in contexts where
1210 * mutex cannot be locked.
1211 */
1212 int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
1213 unsigned int count)
1214 {
1215 struct opp_table *opp_table;
1216 int ret = 0;
1217
1218 /* Hold our table modification lock here */
1219 mutex_lock(&opp_table_lock);
1220
1221 opp_table = _add_opp_table(dev);
1222 if (!opp_table) {
1223 ret = -ENOMEM;
1224 goto unlock;
1225 }
1226
1227 /* Make sure there are no concurrent readers while updating opp_table */
1228 WARN_ON(!list_empty(&opp_table->opp_list));
1229
1230 /* Do we already have a version hierarchy associated with opp_table? */
1231 if (opp_table->supported_hw) {
1232 dev_err(dev, "%s: Already have supported hardware list\n",
1233 __func__);
1234 ret = -EBUSY;
1235 goto err;
1236 }
1237
1238 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
1239 GFP_KERNEL);
1240 if (!opp_table->supported_hw) {
1241 ret = -ENOMEM;
1242 goto err;
1243 }
1244
1245 opp_table->supported_hw_count = count;
1246 mutex_unlock(&opp_table_lock);
1247 return 0;
1248
1249 err:
1250 _remove_opp_table(opp_table);
1251 unlock:
1252 mutex_unlock(&opp_table_lock);
1253
1254 return ret;
1255 }
1256 EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1257
1258 /**
1259 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1260 * @dev: Device for which supported-hw has to be put.
1261 *
1262 * This is required only for the V2 bindings, and is called for a matching
1263 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
1264 * will not be freed.
1265 *
1266 * Locking: The internal opp_table and opp structures are RCU protected.
1267 * Hence this function internally uses RCU updater strategy with mutex locks
1268 * to keep the integrity of the internal data structures. Callers should ensure
1269 * that this function is *NOT* called under RCU protection or in contexts where
1270 * mutex cannot be locked.
1271 */
1272 void dev_pm_opp_put_supported_hw(struct device *dev)
1273 {
1274 struct opp_table *opp_table;
1275
1276 /* Hold our table modification lock here */
1277 mutex_lock(&opp_table_lock);
1278
1279 /* Check for existing table for 'dev' first */
1280 opp_table = _find_opp_table(dev);
1281 if (IS_ERR(opp_table)) {
1282 dev_err(dev, "Failed to find opp_table: %ld\n",
1283 PTR_ERR(opp_table));
1284 goto unlock;
1285 }
1286
1287 /* Make sure there are no concurrent readers while updating opp_table */
1288 WARN_ON(!list_empty(&opp_table->opp_list));
1289
1290 if (!opp_table->supported_hw) {
1291 dev_err(dev, "%s: Doesn't have supported hardware list\n",
1292 __func__);
1293 goto unlock;
1294 }
1295
1296 kfree(opp_table->supported_hw);
1297 opp_table->supported_hw = NULL;
1298 opp_table->supported_hw_count = 0;
1299
1300 /* Try freeing opp_table if this was the last blocking resource */
1301 _remove_opp_table(opp_table);
1302
1303 unlock:
1304 mutex_unlock(&opp_table_lock);
1305 }
1306 EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1307
1308 /**
1309 * dev_pm_opp_set_prop_name() - Set prop-extn name
1310 * @dev: Device for which the prop-name has to be set.
1311 * @name: name to postfix to properties.
1312 *
1313 * This is required only for the V2 bindings, and it enables a platform to
1314 * specify the extn to be used for certain property names. The properties to
1315 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1316 * should postfix the property name with -<name> while looking for them.
1317 *
1318 * Locking: The internal opp_table and opp structures are RCU protected.
1319 * Hence this function internally uses RCU updater strategy with mutex locks
1320 * to keep the integrity of the internal data structures. Callers should ensure
1321 * that this function is *NOT* called under RCU protection or in contexts where
1322 * mutex cannot be locked.
1323 */
1324 int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1325 {
1326 struct opp_table *opp_table;
1327 int ret = 0;
1328
1329 /* Hold our table modification lock here */
1330 mutex_lock(&opp_table_lock);
1331
1332 opp_table = _add_opp_table(dev);
1333 if (!opp_table) {
1334 ret = -ENOMEM;
1335 goto unlock;
1336 }
1337
1338 /* Make sure there are no concurrent readers while updating opp_table */
1339 WARN_ON(!list_empty(&opp_table->opp_list));
1340
1341 /* Do we already have a prop-name associated with opp_table? */
1342 if (opp_table->prop_name) {
1343 dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1344 opp_table->prop_name);
1345 ret = -EBUSY;
1346 goto err;
1347 }
1348
1349 opp_table->prop_name = kstrdup(name, GFP_KERNEL);
1350 if (!opp_table->prop_name) {
1351 ret = -ENOMEM;
1352 goto err;
1353 }
1354
1355 mutex_unlock(&opp_table_lock);
1356 return 0;
1357
1358 err:
1359 _remove_opp_table(opp_table);
1360 unlock:
1361 mutex_unlock(&opp_table_lock);
1362
1363 return ret;
1364 }
1365 EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1366
1367 /**
1368 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1369 * @dev: Device for which the prop-name has to be put.
1370 *
1371 * This is required only for the V2 bindings, and is called for a matching
1372 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
1373 * will not be freed.
1374 *
1375 * Locking: The internal opp_table and opp structures are RCU protected.
1376 * Hence this function internally uses RCU updater strategy with mutex locks
1377 * to keep the integrity of the internal data structures. Callers should ensure
1378 * that this function is *NOT* called under RCU protection or in contexts where
1379 * mutex cannot be locked.
1380 */
1381 void dev_pm_opp_put_prop_name(struct device *dev)
1382 {
1383 struct opp_table *opp_table;
1384
1385 /* Hold our table modification lock here */
1386 mutex_lock(&opp_table_lock);
1387
1388 /* Check for existing table for 'dev' first */
1389 opp_table = _find_opp_table(dev);
1390 if (IS_ERR(opp_table)) {
1391 dev_err(dev, "Failed to find opp_table: %ld\n",
1392 PTR_ERR(opp_table));
1393 goto unlock;
1394 }
1395
1396 /* Make sure there are no concurrent readers while updating opp_table */
1397 WARN_ON(!list_empty(&opp_table->opp_list));
1398
1399 if (!opp_table->prop_name) {
1400 dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1401 goto unlock;
1402 }
1403
1404 kfree(opp_table->prop_name);
1405 opp_table->prop_name = NULL;
1406
1407 /* Try freeing opp_table if this was the last blocking resource */
1408 _remove_opp_table(opp_table);
1409
1410 unlock:
1411 mutex_unlock(&opp_table_lock);
1412 }
1413 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1414
1415 /**
1416 * dev_pm_opp_set_regulator() - Set regulator name for the device
1417 * @dev: Device for which regulator name is being set.
1418 * @name: Name of the regulator.
1419 *
1420 * In order to support OPP switching, OPP layer needs to know the name of the
1421 * device's regulator, as the core would be required to switch voltages as well.
1422 *
1423 * This must be called before any OPPs are initialized for the device.
1424 *
1425 * Locking: The internal opp_table and opp structures are RCU protected.
1426 * Hence this function internally uses RCU updater strategy with mutex locks
1427 * to keep the integrity of the internal data structures. Callers should ensure
1428 * that this function is *NOT* called under RCU protection or in contexts where
1429 * mutex cannot be locked.
1430 */
1431 int dev_pm_opp_set_regulator(struct device *dev, const char *name)
1432 {
1433 struct opp_table *opp_table;
1434 struct regulator *reg;
1435 int ret;
1436
1437 mutex_lock(&opp_table_lock);
1438
1439 opp_table = _add_opp_table(dev);
1440 if (!opp_table) {
1441 ret = -ENOMEM;
1442 goto unlock;
1443 }
1444
1445 /* This should be called before OPPs are initialized */
1446 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1447 ret = -EBUSY;
1448 goto err;
1449 }
1450
1451 /* Already have a regulator set */
1452 if (WARN_ON(!IS_ERR(opp_table->regulator))) {
1453 ret = -EBUSY;
1454 goto err;
1455 }
1456 /* Allocate the regulator */
1457 reg = regulator_get_optional(dev, name);
1458 if (IS_ERR(reg)) {
1459 ret = PTR_ERR(reg);
1460 if (ret != -EPROBE_DEFER)
1461 dev_err(dev, "%s: no regulator (%s) found: %d\n",
1462 __func__, name, ret);
1463 goto err;
1464 }
1465
1466 opp_table->regulator = reg;
1467
1468 mutex_unlock(&opp_table_lock);
1469 return 0;
1470
1471 err:
1472 _remove_opp_table(opp_table);
1473 unlock:
1474 mutex_unlock(&opp_table_lock);
1475
1476 return ret;
1477 }
1478 EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
1479
1480 /**
1481 * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
1482 * @dev: Device for which regulator was set.
1483 *
1484 * Locking: The internal opp_table and opp structures are RCU protected.
1485 * Hence this function internally uses RCU updater strategy with mutex locks
1486 * to keep the integrity of the internal data structures. Callers should ensure
1487 * that this function is *NOT* called under RCU protection or in contexts where
1488 * mutex cannot be locked.
1489 */
1490 void dev_pm_opp_put_regulator(struct device *dev)
1491 {
1492 struct opp_table *opp_table;
1493
1494 mutex_lock(&opp_table_lock);
1495
1496 /* Check for existing table for 'dev' first */
1497 opp_table = _find_opp_table(dev);
1498 if (IS_ERR(opp_table)) {
1499 dev_err(dev, "Failed to find opp_table: %ld\n",
1500 PTR_ERR(opp_table));
1501 goto unlock;
1502 }
1503
1504 if (IS_ERR(opp_table->regulator)) {
1505 dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
1506 goto unlock;
1507 }
1508
1509 /* Make sure there are no concurrent readers while updating opp_table */
1510 WARN_ON(!list_empty(&opp_table->opp_list));
1511
1512 regulator_put(opp_table->regulator);
1513 opp_table->regulator = ERR_PTR(-ENXIO);
1514
1515 /* Try freeing opp_table if this was the last blocking resource */
1516 _remove_opp_table(opp_table);
1517
1518 unlock:
1519 mutex_unlock(&opp_table_lock);
1520 }
1521 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
1522
1523 static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
1524 struct device_node *np)
1525 {
1526 unsigned int count = opp_table->supported_hw_count;
1527 u32 version;
1528 int ret;
1529
1530 if (!opp_table->supported_hw)
1531 return true;
1532
1533 while (count--) {
1534 ret = of_property_read_u32_index(np, "opp-supported-hw", count,
1535 &version);
1536 if (ret) {
1537 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
1538 __func__, count, ret);
1539 return false;
1540 }
1541
1542 /* Both of these are bitwise masks of the versions */
1543 if (!(version & opp_table->supported_hw[count]))
1544 return false;
1545 }
1546
1547 return true;
1548 }
1549
1550 /**
1551 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
1552 * @dev: device for which we do this operation
1553 * @np: device node
1554 *
1555 * This function adds an opp definition to the opp table and returns status. The
1556 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
1557 * removed by dev_pm_opp_remove.
1558 *
1559 * Locking: The internal opp_table and opp structures are RCU protected.
1560 * Hence this function internally uses RCU updater strategy with mutex locks
1561 * to keep the integrity of the internal data structures. Callers should ensure
1562 * that this function is *NOT* called under RCU protection or in contexts where
1563 * mutex cannot be locked.
1564 *
1565 * Return:
1566 * 0 On success OR
1567 * Duplicate OPPs (both freq and volt are same) and opp->available
1568 * -EEXIST Freq are same and volt are different OR
1569 * Duplicate OPPs (both freq and volt are same) and !opp->available
1570 * -ENOMEM Memory allocation failure
1571 * -EINVAL Failed parsing the OPP node
1572 */
1573 static int _opp_add_static_v2(struct device *dev, struct device_node *np)
1574 {
1575 struct opp_table *opp_table;
1576 struct dev_pm_opp *new_opp;
1577 u64 rate;
1578 u32 val;
1579 int ret;
1580
1581 /* Hold our table modification lock here */
1582 mutex_lock(&opp_table_lock);
1583
1584 new_opp = _allocate_opp(dev, &opp_table);
1585 if (!new_opp) {
1586 ret = -ENOMEM;
1587 goto unlock;
1588 }
1589
1590 ret = of_property_read_u64(np, "opp-hz", &rate);
1591 if (ret < 0) {
1592 dev_err(dev, "%s: opp-hz not found\n", __func__);
1593 goto free_opp;
1594 }
1595
1596 /* Check if the OPP supports hardware's hierarchy of versions or not */
1597 if (!_opp_is_supported(dev, opp_table, np)) {
1598 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
1599 goto free_opp;
1600 }
1601
1602 /*
1603 * Rate is defined as an unsigned long in clk API, and so casting
1604 * explicitly to its type. Must be fixed once rate is 64 bit
1605 * guaranteed in clk API.
1606 */
1607 new_opp->rate = (unsigned long)rate;
1608 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
1609
1610 new_opp->np = np;
1611 new_opp->dynamic = false;
1612 new_opp->available = true;
1613
1614 if (!of_property_read_u32(np, "clock-latency-ns", &val))
1615 new_opp->clock_latency_ns = val;
1616
1617 ret = opp_parse_supplies(new_opp, dev, opp_table);
1618 if (ret)
1619 goto free_opp;
1620
1621 ret = _opp_add(dev, new_opp, opp_table);
1622 if (ret)
1623 goto free_opp;
1624
1625 /* OPP to select on device suspend */
1626 if (of_property_read_bool(np, "opp-suspend")) {
1627 if (opp_table->suspend_opp) {
1628 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
1629 __func__, opp_table->suspend_opp->rate,
1630 new_opp->rate);
1631 } else {
1632 new_opp->suspend = true;
1633 opp_table->suspend_opp = new_opp;
1634 }
1635 }
1636
1637 if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
1638 opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
1639
1640 mutex_unlock(&opp_table_lock);
1641
1642 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
1643 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
1644 new_opp->u_volt_min, new_opp->u_volt_max,
1645 new_opp->clock_latency_ns);
1646
1647 /*
1648 * Notify the changes in the availability of the operable
1649 * frequency/voltage list.
1650 */
1651 srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
1652 return 0;
1653
1654 free_opp:
1655 _opp_remove(opp_table, new_opp, false);
1656 unlock:
1657 mutex_unlock(&opp_table_lock);
1658 return ret;
1659 }
1660
1661 /**
1662 * dev_pm_opp_add() - Add an OPP table from a table definitions
1663 * @dev: device for which we do this operation
1664 * @freq: Frequency in Hz for this OPP
1665 * @u_volt: Voltage in uVolts for this OPP
1666 *
1667 * This function adds an opp definition to the opp table and returns status.
1668 * The opp is made available by default and it can be controlled using
1669 * dev_pm_opp_enable/disable functions.
1670 *
1671 * Locking: The internal opp_table and opp structures are RCU protected.
1672 * Hence this function internally uses RCU updater strategy with mutex locks
1673 * to keep the integrity of the internal data structures. Callers should ensure
1674 * that this function is *NOT* called under RCU protection or in contexts where
1675 * mutex cannot be locked.
1676 *
1677 * Return:
1678 * 0 On success OR
1679 * Duplicate OPPs (both freq and volt are same) and opp->available
1680 * -EEXIST Freq are same and volt are different OR
1681 * Duplicate OPPs (both freq and volt are same) and !opp->available
1682 * -ENOMEM Memory allocation failure
1683 */
1684 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1685 {
1686 return _opp_add_v1(dev, freq, u_volt, true);
1687 }
1688 EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1689
1690 /**
1691 * _opp_set_availability() - helper to set the availability of an opp
1692 * @dev: device for which we do this operation
1693 * @freq: OPP frequency to modify availability
1694 * @availability_req: availability status requested for this opp
1695 *
1696 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1697 * share a common logic which is isolated here.
1698 *
1699 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1700 * copy operation, returns 0 if no modification was done OR modification was
1701 * successful.
1702 *
1703 * Locking: The internal opp_table and opp structures are RCU protected.
1704 * Hence this function internally uses RCU updater strategy with mutex locks to
1705 * keep the integrity of the internal data structures. Callers should ensure
1706 * that this function is *NOT* called under RCU protection or in contexts where
1707 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1708 */
1709 static int _opp_set_availability(struct device *dev, unsigned long freq,
1710 bool availability_req)
1711 {
1712 struct opp_table *opp_table;
1713 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
1714 int r = 0;
1715
1716 /* keep the node allocated */
1717 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
1718 if (!new_opp)
1719 return -ENOMEM;
1720
1721 mutex_lock(&opp_table_lock);
1722
1723 /* Find the opp_table */
1724 opp_table = _find_opp_table(dev);
1725 if (IS_ERR(opp_table)) {
1726 r = PTR_ERR(opp_table);
1727 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1728 goto unlock;
1729 }
1730
1731 /* Do we have the frequency? */
1732 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
1733 if (tmp_opp->rate == freq) {
1734 opp = tmp_opp;
1735 break;
1736 }
1737 }
1738 if (IS_ERR(opp)) {
1739 r = PTR_ERR(opp);
1740 goto unlock;
1741 }
1742
1743 /* Is update really needed? */
1744 if (opp->available == availability_req)
1745 goto unlock;
1746 /* copy the old data over */
1747 *new_opp = *opp;
1748
1749 /* plug in new node */
1750 new_opp->available = availability_req;
1751
1752 list_replace_rcu(&opp->node, &new_opp->node);
1753 mutex_unlock(&opp_table_lock);
1754 call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1755
1756 /* Notify the change of the OPP availability */
1757 if (availability_req)
1758 srcu_notifier_call_chain(&opp_table->srcu_head,
1759 OPP_EVENT_ENABLE, new_opp);
1760 else
1761 srcu_notifier_call_chain(&opp_table->srcu_head,
1762 OPP_EVENT_DISABLE, new_opp);
1763
1764 return 0;
1765
1766 unlock:
1767 mutex_unlock(&opp_table_lock);
1768 kfree(new_opp);
1769 return r;
1770 }
1771
1772 /**
1773 * dev_pm_opp_enable() - Enable a specific OPP
1774 * @dev: device for which we do this operation
1775 * @freq: OPP frequency to enable
1776 *
1777 * Enables a provided opp. If the operation is valid, this returns 0, else the
1778 * corresponding error value. It is meant to be used for users an OPP available
1779 * after being temporarily made unavailable with dev_pm_opp_disable.
1780 *
1781 * Locking: The internal opp_table and opp structures are RCU protected.
1782 * Hence this function indirectly uses RCU and mutex locks to keep the
1783 * integrity of the internal data structures. Callers should ensure that
1784 * this function is *NOT* called under RCU protection or in contexts where
1785 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1786 *
1787 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1788 * copy operation, returns 0 if no modification was done OR modification was
1789 * successful.
1790 */
1791 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
1792 {
1793 return _opp_set_availability(dev, freq, true);
1794 }
1795 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1796
1797 /**
1798 * dev_pm_opp_disable() - Disable a specific OPP
1799 * @dev: device for which we do this operation
1800 * @freq: OPP frequency to disable
1801 *
1802 * Disables a provided opp. If the operation is valid, this returns
1803 * 0, else the corresponding error value. It is meant to be a temporary
1804 * control by users to make this OPP not available until the circumstances are
1805 * right to make it available again (with a call to dev_pm_opp_enable).
1806 *
1807 * Locking: The internal opp_table and opp structures are RCU protected.
1808 * Hence this function indirectly uses RCU and mutex locks to keep the
1809 * integrity of the internal data structures. Callers should ensure that
1810 * this function is *NOT* called under RCU protection or in contexts where
1811 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1812 *
1813 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1814 * copy operation, returns 0 if no modification was done OR modification was
1815 * successful.
1816 */
1817 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
1818 {
1819 return _opp_set_availability(dev, freq, false);
1820 }
1821 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
1822
1823 /**
1824 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
1825 * @dev: device pointer used to lookup OPP table.
1826 *
1827 * Return: pointer to notifier head if found, otherwise -ENODEV or
1828 * -EINVAL based on type of error casted as pointer. value must be checked
1829 * with IS_ERR to determine valid pointer or error result.
1830 *
1831 * Locking: This function must be called under rcu_read_lock(). opp_table is a
1832 * RCU protected pointer. The reason for the same is that the opp pointer which
1833 * is returned will remain valid for use with opp_get_{voltage, freq} only while
1834 * under the locked area. The pointer returned must be used prior to unlocking
1835 * with rcu_read_unlock() to maintain the integrity of the pointer.
1836 */
1837 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
1838 {
1839 struct opp_table *opp_table = _find_opp_table(dev);
1840
1841 if (IS_ERR(opp_table))
1842 return ERR_CAST(opp_table); /* matching type */
1843
1844 return &opp_table->srcu_head;
1845 }
1846 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
1847
1848 #ifdef CONFIG_OF
1849 /**
1850 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
1851 * entries
1852 * @dev: device pointer used to lookup OPP table.
1853 *
1854 * Free OPPs created using static entries present in DT.
1855 *
1856 * Locking: The internal opp_table and opp structures are RCU protected.
1857 * Hence this function indirectly uses RCU updater strategy with mutex locks
1858 * to keep the integrity of the internal data structures. Callers should ensure
1859 * that this function is *NOT* called under RCU protection or in contexts where
1860 * mutex cannot be locked.
1861 */
1862 void dev_pm_opp_of_remove_table(struct device *dev)
1863 {
1864 struct opp_table *opp_table;
1865 struct dev_pm_opp *opp, *tmp;
1866
1867 /* Hold our table modification lock here */
1868 mutex_lock(&opp_table_lock);
1869
1870 /* Check for existing table for 'dev' */
1871 opp_table = _find_opp_table(dev);
1872 if (IS_ERR(opp_table)) {
1873 int error = PTR_ERR(opp_table);
1874
1875 if (error != -ENODEV)
1876 WARN(1, "%s: opp_table: %d\n",
1877 IS_ERR_OR_NULL(dev) ?
1878 "Invalid device" : dev_name(dev),
1879 error);
1880 goto unlock;
1881 }
1882
1883 /* Find if opp_table manages a single device */
1884 if (list_is_singular(&opp_table->dev_list)) {
1885 /* Free static OPPs */
1886 list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
1887 if (!opp->dynamic)
1888 _opp_remove(opp_table, opp, true);
1889 }
1890 } else {
1891 _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
1892 }
1893
1894 unlock:
1895 mutex_unlock(&opp_table_lock);
1896 }
1897 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
1898
1899 /* Returns opp descriptor node for a device, caller must do of_node_put() */
1900 struct device_node *_of_get_opp_desc_node(struct device *dev)
1901 {
1902 /*
1903 * TODO: Support for multiple OPP tables.
1904 *
1905 * There should be only ONE phandle present in "operating-points-v2"
1906 * property.
1907 */
1908
1909 return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
1910 }
1911
1912 /* Initializes OPP tables based on new bindings */
1913 static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
1914 {
1915 struct device_node *np;
1916 struct opp_table *opp_table;
1917 int ret = 0, count = 0;
1918
1919 mutex_lock(&opp_table_lock);
1920
1921 opp_table = _managed_opp(opp_np);
1922 if (opp_table) {
1923 /* OPPs are already managed */
1924 if (!_add_opp_dev(dev, opp_table))
1925 ret = -ENOMEM;
1926 mutex_unlock(&opp_table_lock);
1927 return ret;
1928 }
1929 mutex_unlock(&opp_table_lock);
1930
1931 /* We have opp-table node now, iterate over it and add OPPs */
1932 for_each_available_child_of_node(opp_np, np) {
1933 count++;
1934
1935 ret = _opp_add_static_v2(dev, np);
1936 if (ret) {
1937 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1938 ret);
1939 goto free_table;
1940 }
1941 }
1942
1943 /* There should be one of more OPP defined */
1944 if (WARN_ON(!count))
1945 return -ENOENT;
1946
1947 mutex_lock(&opp_table_lock);
1948
1949 opp_table = _find_opp_table(dev);
1950 if (WARN_ON(IS_ERR(opp_table))) {
1951 ret = PTR_ERR(opp_table);
1952 mutex_unlock(&opp_table_lock);
1953 goto free_table;
1954 }
1955
1956 opp_table->np = opp_np;
1957 opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
1958
1959 mutex_unlock(&opp_table_lock);
1960
1961 return 0;
1962
1963 free_table:
1964 dev_pm_opp_of_remove_table(dev);
1965
1966 return ret;
1967 }
1968
1969 /* Initializes OPP tables based on old-deprecated bindings */
1970 static int _of_add_opp_table_v1(struct device *dev)
1971 {
1972 const struct property *prop;
1973 const __be32 *val;
1974 int nr;
1975
1976 prop = of_find_property(dev->of_node, "operating-points", NULL);
1977 if (!prop)
1978 return -ENODEV;
1979 if (!prop->value)
1980 return -ENODATA;
1981
1982 /*
1983 * Each OPP is a set of tuples consisting of frequency and
1984 * voltage like <freq-kHz vol-uV>.
1985 */
1986 nr = prop->length / sizeof(u32);
1987 if (nr % 2) {
1988 dev_err(dev, "%s: Invalid OPP table\n", __func__);
1989 return -EINVAL;
1990 }
1991
1992 val = prop->value;
1993 while (nr) {
1994 unsigned long freq = be32_to_cpup(val++) * 1000;
1995 unsigned long volt = be32_to_cpup(val++);
1996
1997 if (_opp_add_v1(dev, freq, volt, false))
1998 dev_warn(dev, "%s: Failed to add OPP %ld\n",
1999 __func__, freq);
2000 nr -= 2;
2001 }
2002
2003 return 0;
2004 }
2005
2006 /**
2007 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
2008 * @dev: device pointer used to lookup OPP table.
2009 *
2010 * Register the initial OPP table with the OPP library for given device.
2011 *
2012 * Locking: The internal opp_table and opp structures are RCU protected.
2013 * Hence this function indirectly uses RCU updater strategy with mutex locks
2014 * to keep the integrity of the internal data structures. Callers should ensure
2015 * that this function is *NOT* called under RCU protection or in contexts where
2016 * mutex cannot be locked.
2017 *
2018 * Return:
2019 * 0 On success OR
2020 * Duplicate OPPs (both freq and volt are same) and opp->available
2021 * -EEXIST Freq are same and volt are different OR
2022 * Duplicate OPPs (both freq and volt are same) and !opp->available
2023 * -ENOMEM Memory allocation failure
2024 * -ENODEV when 'operating-points' property is not found or is invalid data
2025 * in device node.
2026 * -ENODATA when empty 'operating-points' property is found
2027 * -EINVAL when invalid entries are found in opp-v2 table
2028 */
2029 int dev_pm_opp_of_add_table(struct device *dev)
2030 {
2031 struct device_node *opp_np;
2032 int ret;
2033
2034 /*
2035 * OPPs have two version of bindings now. The older one is deprecated,
2036 * try for the new binding first.
2037 */
2038 opp_np = _of_get_opp_desc_node(dev);
2039 if (!opp_np) {
2040 /*
2041 * Try old-deprecated bindings for backward compatibility with
2042 * older dtbs.
2043 */
2044 return _of_add_opp_table_v1(dev);
2045 }
2046
2047 ret = _of_add_opp_table_v2(dev, opp_np);
2048 of_node_put(opp_np);
2049
2050 return ret;
2051 }
2052 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
2053 #endif
This page took 0.114407 seconds and 4 git commands to generate.