PM / OPP: Remove useless check
[deliverable/linux.git] / drivers / base / power / opp / core.c
1 /*
2 * Generic OPP Interface
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/clk.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/device.h>
21 #include <linux/of.h>
22 #include <linux/export.h>
23 #include <linux/regulator/consumer.h>
24
25 #include "opp.h"
26
27 /*
28 * The root of the list of all opp-tables. All opp_table structures branch off
29 * from here, with each opp_table containing the list of opps it supports in
30 * various states of availability.
31 */
32 static LIST_HEAD(opp_tables);
33 /* Lock to allow exclusive modification to the device and opp lists */
34 DEFINE_MUTEX(opp_table_lock);
35
36 #define opp_rcu_lockdep_assert() \
37 do { \
38 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
39 !lockdep_is_held(&opp_table_lock), \
40 "Missing rcu_read_lock() or " \
41 "opp_table_lock protection"); \
42 } while (0)
43
44 static struct opp_device *_find_opp_dev(const struct device *dev,
45 struct opp_table *opp_table)
46 {
47 struct opp_device *opp_dev;
48
49 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
50 if (opp_dev->dev == dev)
51 return opp_dev;
52
53 return NULL;
54 }
55
56 static struct opp_table *_managed_opp(const struct device_node *np)
57 {
58 struct opp_table *opp_table;
59
60 list_for_each_entry_rcu(opp_table, &opp_tables, node) {
61 if (opp_table->np == np) {
62 /*
63 * Multiple devices can point to the same OPP table and
64 * so will have same node-pointer, np.
65 *
66 * But the OPPs will be considered as shared only if the
67 * OPP table contains a "opp-shared" property.
68 */
69 return opp_table->shared_opp ? opp_table : NULL;
70 }
71 }
72
73 return NULL;
74 }
75
76 /**
77 * _find_opp_table() - find opp_table struct using device pointer
78 * @dev: device pointer used to lookup OPP table
79 *
80 * Search OPP table for one containing matching device. Does a RCU reader
81 * operation to grab the pointer needed.
82 *
83 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
84 * -EINVAL based on type of error.
85 *
86 * Locking: For readers, this function must be called under rcu_read_lock().
87 * opp_table is a RCU protected pointer, which means that opp_table is valid
88 * as long as we are under RCU lock.
89 *
90 * For Writers, this function must be called with opp_table_lock held.
91 */
92 struct opp_table *_find_opp_table(struct device *dev)
93 {
94 struct opp_table *opp_table;
95
96 opp_rcu_lockdep_assert();
97
98 if (IS_ERR_OR_NULL(dev)) {
99 pr_err("%s: Invalid parameters\n", __func__);
100 return ERR_PTR(-EINVAL);
101 }
102
103 list_for_each_entry_rcu(opp_table, &opp_tables, node)
104 if (_find_opp_dev(dev, opp_table))
105 return opp_table;
106
107 return ERR_PTR(-ENODEV);
108 }
109
110 /**
111 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
112 * @opp: opp for which voltage has to be returned for
113 *
114 * Return: voltage in micro volt corresponding to the opp, else
115 * return 0
116 *
117 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
118 * protected pointer. This means that opp which could have been fetched by
119 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
120 * under RCU lock. The pointer returned by the opp_find_freq family must be
121 * used in the same section as the usage of this function with the pointer
122 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
123 * pointer.
124 */
125 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
126 {
127 struct dev_pm_opp *tmp_opp;
128 unsigned long v = 0;
129
130 opp_rcu_lockdep_assert();
131
132 tmp_opp = rcu_dereference(opp);
133 if (IS_ERR_OR_NULL(tmp_opp))
134 pr_err("%s: Invalid parameters\n", __func__);
135 else
136 v = tmp_opp->u_volt;
137
138 return v;
139 }
140 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
141
142 /**
143 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
144 * @opp: opp for which frequency has to be returned for
145 *
146 * Return: frequency in hertz corresponding to the opp, else
147 * return 0
148 *
149 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
150 * protected pointer. This means that opp which could have been fetched by
151 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
152 * under RCU lock. The pointer returned by the opp_find_freq family must be
153 * used in the same section as the usage of this function with the pointer
154 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
155 * pointer.
156 */
157 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
158 {
159 struct dev_pm_opp *tmp_opp;
160 unsigned long f = 0;
161
162 opp_rcu_lockdep_assert();
163
164 tmp_opp = rcu_dereference(opp);
165 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
166 pr_err("%s: Invalid parameters\n", __func__);
167 else
168 f = tmp_opp->rate;
169
170 return f;
171 }
172 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
173
174 /**
175 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
176 * @opp: opp for which turbo mode is being verified
177 *
178 * Turbo OPPs are not for normal use, and can be enabled (under certain
179 * conditions) for short duration of times to finish high throughput work
180 * quickly. Running on them for longer times may overheat the chip.
181 *
182 * Return: true if opp is turbo opp, else false.
183 *
184 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
185 * protected pointer. This means that opp which could have been fetched by
186 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
187 * under RCU lock. The pointer returned by the opp_find_freq family must be
188 * used in the same section as the usage of this function with the pointer
189 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
190 * pointer.
191 */
192 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
193 {
194 struct dev_pm_opp *tmp_opp;
195
196 opp_rcu_lockdep_assert();
197
198 tmp_opp = rcu_dereference(opp);
199 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
200 pr_err("%s: Invalid parameters\n", __func__);
201 return false;
202 }
203
204 return tmp_opp->turbo;
205 }
206 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
207
208 /**
209 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
210 * @dev: device for which we do this operation
211 *
212 * Return: This function returns the max clock latency in nanoseconds.
213 *
214 * Locking: This function takes rcu_read_lock().
215 */
216 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
217 {
218 struct opp_table *opp_table;
219 unsigned long clock_latency_ns;
220
221 rcu_read_lock();
222
223 opp_table = _find_opp_table(dev);
224 if (IS_ERR(opp_table))
225 clock_latency_ns = 0;
226 else
227 clock_latency_ns = opp_table->clock_latency_ns_max;
228
229 rcu_read_unlock();
230 return clock_latency_ns;
231 }
232 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
233
234 /**
235 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
236 * @dev: device for which we do this operation
237 *
238 * Return: This function returns the max voltage latency in nanoseconds.
239 *
240 * Locking: This function takes rcu_read_lock().
241 */
242 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
243 {
244 struct opp_table *opp_table;
245 struct dev_pm_opp *opp;
246 struct regulator *reg;
247 unsigned long latency_ns = 0;
248 unsigned long min_uV = ~0, max_uV = 0;
249 int ret;
250
251 rcu_read_lock();
252
253 opp_table = _find_opp_table(dev);
254 if (IS_ERR(opp_table)) {
255 rcu_read_unlock();
256 return 0;
257 }
258
259 reg = opp_table->regulator;
260 if (IS_ERR(reg)) {
261 /* Regulator may not be required for device */
262 rcu_read_unlock();
263 return 0;
264 }
265
266 list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
267 if (!opp->available)
268 continue;
269
270 if (opp->u_volt_min < min_uV)
271 min_uV = opp->u_volt_min;
272 if (opp->u_volt_max > max_uV)
273 max_uV = opp->u_volt_max;
274 }
275
276 rcu_read_unlock();
277
278 /*
279 * The caller needs to ensure that opp_table (and hence the regulator)
280 * isn't freed, while we are executing this routine.
281 */
282 ret = regulator_set_voltage_time(reg, min_uV, max_uV);
283 if (ret > 0)
284 latency_ns = ret * 1000;
285
286 return latency_ns;
287 }
288 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
289
290 /**
291 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
292 * nanoseconds
293 * @dev: device for which we do this operation
294 *
295 * Return: This function returns the max transition latency, in nanoseconds, to
296 * switch from one OPP to other.
297 *
298 * Locking: This function takes rcu_read_lock().
299 */
300 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
301 {
302 return dev_pm_opp_get_max_volt_latency(dev) +
303 dev_pm_opp_get_max_clock_latency(dev);
304 }
305 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
306
307 /**
308 * dev_pm_opp_get_suspend_opp() - Get suspend opp
309 * @dev: device for which we do this operation
310 *
311 * Return: This function returns pointer to the suspend opp if it is
312 * defined and available, otherwise it returns NULL.
313 *
314 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
315 * protected pointer. The reason for the same is that the opp pointer which is
316 * returned will remain valid for use with opp_get_{voltage, freq} only while
317 * under the locked area. The pointer returned must be used prior to unlocking
318 * with rcu_read_unlock() to maintain the integrity of the pointer.
319 */
320 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
321 {
322 struct opp_table *opp_table;
323
324 opp_rcu_lockdep_assert();
325
326 opp_table = _find_opp_table(dev);
327 if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
328 !opp_table->suspend_opp->available)
329 return NULL;
330
331 return opp_table->suspend_opp;
332 }
333 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
334
335 /**
336 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
337 * @dev: device for which we do this operation
338 *
339 * Return: This function returns the number of available opps if there are any,
340 * else returns 0 if none or the corresponding error value.
341 *
342 * Locking: This function takes rcu_read_lock().
343 */
344 int dev_pm_opp_get_opp_count(struct device *dev)
345 {
346 struct opp_table *opp_table;
347 struct dev_pm_opp *temp_opp;
348 int count = 0;
349
350 rcu_read_lock();
351
352 opp_table = _find_opp_table(dev);
353 if (IS_ERR(opp_table)) {
354 count = PTR_ERR(opp_table);
355 dev_err(dev, "%s: OPP table not found (%d)\n",
356 __func__, count);
357 goto out_unlock;
358 }
359
360 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
361 if (temp_opp->available)
362 count++;
363 }
364
365 out_unlock:
366 rcu_read_unlock();
367 return count;
368 }
369 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
370
371 /**
372 * dev_pm_opp_find_freq_exact() - search for an exact frequency
373 * @dev: device for which we do this operation
374 * @freq: frequency to search for
375 * @available: true/false - match for available opp
376 *
377 * Return: Searches for exact match in the opp table and returns pointer to the
378 * matching opp if found, else returns ERR_PTR in case of error and should
379 * be handled using IS_ERR. Error return values can be:
380 * EINVAL: for bad pointer
381 * ERANGE: no match found for search
382 * ENODEV: if device not found in list of registered devices
383 *
384 * Note: available is a modifier for the search. if available=true, then the
385 * match is for exact matching frequency and is available in the stored OPP
386 * table. if false, the match is for exact frequency which is not available.
387 *
388 * This provides a mechanism to enable an opp which is not available currently
389 * or the opposite as well.
390 *
391 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
392 * protected pointer. The reason for the same is that the opp pointer which is
393 * returned will remain valid for use with opp_get_{voltage, freq} only while
394 * under the locked area. The pointer returned must be used prior to unlocking
395 * with rcu_read_unlock() to maintain the integrity of the pointer.
396 */
397 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
398 unsigned long freq,
399 bool available)
400 {
401 struct opp_table *opp_table;
402 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
403
404 opp_rcu_lockdep_assert();
405
406 opp_table = _find_opp_table(dev);
407 if (IS_ERR(opp_table)) {
408 int r = PTR_ERR(opp_table);
409
410 dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
411 return ERR_PTR(r);
412 }
413
414 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
415 if (temp_opp->available == available &&
416 temp_opp->rate == freq) {
417 opp = temp_opp;
418 break;
419 }
420 }
421
422 return opp;
423 }
424 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
425
426 /**
427 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
428 * @dev: device for which we do this operation
429 * @freq: Start frequency
430 *
431 * Search for the matching ceil *available* OPP from a starting freq
432 * for a device.
433 *
434 * Return: matching *opp and refreshes *freq accordingly, else returns
435 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
436 * values can be:
437 * EINVAL: for bad pointer
438 * ERANGE: no match found for search
439 * ENODEV: if device not found in list of registered devices
440 *
441 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
442 * protected pointer. The reason for the same is that the opp pointer which is
443 * returned will remain valid for use with opp_get_{voltage, freq} only while
444 * under the locked area. The pointer returned must be used prior to unlocking
445 * with rcu_read_unlock() to maintain the integrity of the pointer.
446 */
447 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
448 unsigned long *freq)
449 {
450 struct opp_table *opp_table;
451 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
452
453 opp_rcu_lockdep_assert();
454
455 if (!dev || !freq) {
456 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
457 return ERR_PTR(-EINVAL);
458 }
459
460 opp_table = _find_opp_table(dev);
461 if (IS_ERR(opp_table))
462 return ERR_CAST(opp_table);
463
464 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
465 if (temp_opp->available && temp_opp->rate >= *freq) {
466 opp = temp_opp;
467 *freq = opp->rate;
468 break;
469 }
470 }
471
472 return opp;
473 }
474 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
475
476 /**
477 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
478 * @dev: device for which we do this operation
479 * @freq: Start frequency
480 *
481 * Search for the matching floor *available* OPP from a starting freq
482 * for a device.
483 *
484 * Return: matching *opp and refreshes *freq accordingly, else returns
485 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
486 * values can be:
487 * EINVAL: for bad pointer
488 * ERANGE: no match found for search
489 * ENODEV: if device not found in list of registered devices
490 *
491 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
492 * protected pointer. The reason for the same is that the opp pointer which is
493 * returned will remain valid for use with opp_get_{voltage, freq} only while
494 * under the locked area. The pointer returned must be used prior to unlocking
495 * with rcu_read_unlock() to maintain the integrity of the pointer.
496 */
497 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
498 unsigned long *freq)
499 {
500 struct opp_table *opp_table;
501 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
502
503 opp_rcu_lockdep_assert();
504
505 if (!dev || !freq) {
506 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
507 return ERR_PTR(-EINVAL);
508 }
509
510 opp_table = _find_opp_table(dev);
511 if (IS_ERR(opp_table))
512 return ERR_CAST(opp_table);
513
514 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
515 if (temp_opp->available) {
516 /* go to the next node, before choosing prev */
517 if (temp_opp->rate > *freq)
518 break;
519 else
520 opp = temp_opp;
521 }
522 }
523 if (!IS_ERR(opp))
524 *freq = opp->rate;
525
526 return opp;
527 }
528 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
529
530 /*
531 * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
532 * while clk returned here is used.
533 */
534 static struct clk *_get_opp_clk(struct device *dev)
535 {
536 struct opp_table *opp_table;
537 struct clk *clk;
538
539 rcu_read_lock();
540
541 opp_table = _find_opp_table(dev);
542 if (IS_ERR(opp_table)) {
543 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
544 clk = ERR_CAST(opp_table);
545 goto unlock;
546 }
547
548 clk = opp_table->clk;
549 if (IS_ERR(clk))
550 dev_err(dev, "%s: No clock available for the device\n",
551 __func__);
552
553 unlock:
554 rcu_read_unlock();
555 return clk;
556 }
557
558 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
559 unsigned long u_volt, unsigned long u_volt_min,
560 unsigned long u_volt_max)
561 {
562 int ret;
563
564 /* Regulator not available for device */
565 if (IS_ERR(reg)) {
566 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
567 PTR_ERR(reg));
568 return 0;
569 }
570
571 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
572 u_volt, u_volt_max);
573
574 ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
575 u_volt_max);
576 if (ret)
577 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
578 __func__, u_volt_min, u_volt, u_volt_max, ret);
579
580 return ret;
581 }
582
583 /**
584 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
585 * @dev: device for which we do this operation
586 * @target_freq: frequency to achieve
587 *
588 * This configures the power-supplies and clock source to the levels specified
589 * by the OPP corresponding to the target_freq.
590 *
591 * Locking: This function takes rcu_read_lock().
592 */
593 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
594 {
595 struct opp_table *opp_table;
596 struct dev_pm_opp *old_opp, *opp;
597 struct regulator *reg;
598 struct clk *clk;
599 unsigned long freq, old_freq;
600 unsigned long u_volt, u_volt_min, u_volt_max;
601 unsigned long ou_volt, ou_volt_min, ou_volt_max;
602 int ret;
603
604 if (unlikely(!target_freq)) {
605 dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
606 target_freq);
607 return -EINVAL;
608 }
609
610 clk = _get_opp_clk(dev);
611 if (IS_ERR(clk))
612 return PTR_ERR(clk);
613
614 freq = clk_round_rate(clk, target_freq);
615 if ((long)freq <= 0)
616 freq = target_freq;
617
618 old_freq = clk_get_rate(clk);
619
620 /* Return early if nothing to do */
621 if (old_freq == freq) {
622 dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
623 __func__, freq);
624 return 0;
625 }
626
627 rcu_read_lock();
628
629 opp_table = _find_opp_table(dev);
630 if (IS_ERR(opp_table)) {
631 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
632 rcu_read_unlock();
633 return PTR_ERR(opp_table);
634 }
635
636 old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
637 if (!IS_ERR(old_opp)) {
638 ou_volt = old_opp->u_volt;
639 ou_volt_min = old_opp->u_volt_min;
640 ou_volt_max = old_opp->u_volt_max;
641 } else {
642 dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
643 __func__, old_freq, PTR_ERR(old_opp));
644 }
645
646 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
647 if (IS_ERR(opp)) {
648 ret = PTR_ERR(opp);
649 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
650 __func__, freq, ret);
651 rcu_read_unlock();
652 return ret;
653 }
654
655 u_volt = opp->u_volt;
656 u_volt_min = opp->u_volt_min;
657 u_volt_max = opp->u_volt_max;
658
659 reg = opp_table->regulator;
660
661 rcu_read_unlock();
662
663 /* Scaling up? Scale voltage before frequency */
664 if (freq > old_freq) {
665 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
666 u_volt_max);
667 if (ret)
668 goto restore_voltage;
669 }
670
671 /* Change frequency */
672
673 dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
674 __func__, old_freq, freq);
675
676 ret = clk_set_rate(clk, freq);
677 if (ret) {
678 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
679 ret);
680 goto restore_voltage;
681 }
682
683 /* Scaling down? Scale voltage after frequency */
684 if (freq < old_freq) {
685 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
686 u_volt_max);
687 if (ret)
688 goto restore_freq;
689 }
690
691 return 0;
692
693 restore_freq:
694 if (clk_set_rate(clk, old_freq))
695 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
696 __func__, old_freq);
697 restore_voltage:
698 /* This shouldn't harm even if the voltages weren't updated earlier */
699 if (!IS_ERR(old_opp))
700 _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
701
702 return ret;
703 }
704 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
705
706 /* OPP-dev Helpers */
707 static void _kfree_opp_dev_rcu(struct rcu_head *head)
708 {
709 struct opp_device *opp_dev;
710
711 opp_dev = container_of(head, struct opp_device, rcu_head);
712 kfree_rcu(opp_dev, rcu_head);
713 }
714
715 static void _remove_opp_dev(struct opp_device *opp_dev,
716 struct opp_table *opp_table)
717 {
718 opp_debug_unregister(opp_dev, opp_table);
719 list_del(&opp_dev->node);
720 call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
721 _kfree_opp_dev_rcu);
722 }
723
724 struct opp_device *_add_opp_dev(const struct device *dev,
725 struct opp_table *opp_table)
726 {
727 struct opp_device *opp_dev;
728 int ret;
729
730 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
731 if (!opp_dev)
732 return NULL;
733
734 /* Initialize opp-dev */
735 opp_dev->dev = dev;
736 list_add_rcu(&opp_dev->node, &opp_table->dev_list);
737
738 /* Create debugfs entries for the opp_table */
739 ret = opp_debug_register(opp_dev, opp_table);
740 if (ret)
741 dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
742 __func__, ret);
743
744 return opp_dev;
745 }
746
747 /**
748 * _add_opp_table() - Find OPP table or allocate a new one
749 * @dev: device for which we do this operation
750 *
751 * It tries to find an existing table first, if it couldn't find one, it
752 * allocates a new OPP table and returns that.
753 *
754 * Return: valid opp_table pointer if success, else NULL.
755 */
756 static struct opp_table *_add_opp_table(struct device *dev)
757 {
758 struct opp_table *opp_table;
759 struct opp_device *opp_dev;
760 struct device_node *np;
761 int ret;
762
763 /* Check for existing table for 'dev' first */
764 opp_table = _find_opp_table(dev);
765 if (!IS_ERR(opp_table))
766 return opp_table;
767
768 /*
769 * Allocate a new OPP table. In the infrequent case where a new
770 * device is needed to be added, we pay this penalty.
771 */
772 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
773 if (!opp_table)
774 return NULL;
775
776 INIT_LIST_HEAD(&opp_table->dev_list);
777
778 opp_dev = _add_opp_dev(dev, opp_table);
779 if (!opp_dev) {
780 kfree(opp_table);
781 return NULL;
782 }
783
784 /*
785 * Only required for backward compatibility with v1 bindings, but isn't
786 * harmful for other cases. And so we do it unconditionally.
787 */
788 np = of_node_get(dev->of_node);
789 if (np) {
790 u32 val;
791
792 if (!of_property_read_u32(np, "clock-latency", &val))
793 opp_table->clock_latency_ns_max = val;
794 of_property_read_u32(np, "voltage-tolerance",
795 &opp_table->voltage_tolerance_v1);
796 of_node_put(np);
797 }
798
799 /* Set regulator to a non-NULL error value */
800 opp_table->regulator = ERR_PTR(-ENXIO);
801
802 /* Find clk for the device */
803 opp_table->clk = clk_get(dev, NULL);
804 if (IS_ERR(opp_table->clk)) {
805 ret = PTR_ERR(opp_table->clk);
806 if (ret != -EPROBE_DEFER)
807 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
808 ret);
809 }
810
811 srcu_init_notifier_head(&opp_table->srcu_head);
812 INIT_LIST_HEAD(&opp_table->opp_list);
813
814 /* Secure the device table modification */
815 list_add_rcu(&opp_table->node, &opp_tables);
816 return opp_table;
817 }
818
819 /**
820 * _kfree_device_rcu() - Free opp_table RCU handler
821 * @head: RCU head
822 */
823 static void _kfree_device_rcu(struct rcu_head *head)
824 {
825 struct opp_table *opp_table = container_of(head, struct opp_table,
826 rcu_head);
827
828 kfree_rcu(opp_table, rcu_head);
829 }
830
831 /**
832 * _remove_opp_table() - Removes a OPP table
833 * @opp_table: OPP table to be removed.
834 *
835 * Removes/frees OPP table if it doesn't contain any OPPs.
836 */
837 static void _remove_opp_table(struct opp_table *opp_table)
838 {
839 struct opp_device *opp_dev;
840
841 if (!list_empty(&opp_table->opp_list))
842 return;
843
844 if (opp_table->supported_hw)
845 return;
846
847 if (opp_table->prop_name)
848 return;
849
850 if (!IS_ERR(opp_table->regulator))
851 return;
852
853 /* Release clk */
854 if (!IS_ERR(opp_table->clk))
855 clk_put(opp_table->clk);
856
857 opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
858 node);
859
860 _remove_opp_dev(opp_dev, opp_table);
861
862 /* dev_list must be empty now */
863 WARN_ON(!list_empty(&opp_table->dev_list));
864
865 list_del_rcu(&opp_table->node);
866 call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
867 _kfree_device_rcu);
868 }
869
870 /**
871 * _kfree_opp_rcu() - Free OPP RCU handler
872 * @head: RCU head
873 */
874 static void _kfree_opp_rcu(struct rcu_head *head)
875 {
876 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
877
878 kfree_rcu(opp, rcu_head);
879 }
880
881 /**
882 * _opp_remove() - Remove an OPP from a table definition
883 * @opp_table: points back to the opp_table struct this opp belongs to
884 * @opp: pointer to the OPP to remove
885 * @notify: OPP_EVENT_REMOVE notification should be sent or not
886 *
887 * This function removes an opp definition from the opp table.
888 *
889 * Locking: The internal opp_table and opp structures are RCU protected.
890 * It is assumed that the caller holds required mutex for an RCU updater
891 * strategy.
892 */
893 static void _opp_remove(struct opp_table *opp_table,
894 struct dev_pm_opp *opp, bool notify)
895 {
896 /*
897 * Notify the changes in the availability of the operable
898 * frequency/voltage list.
899 */
900 if (notify)
901 srcu_notifier_call_chain(&opp_table->srcu_head,
902 OPP_EVENT_REMOVE, opp);
903 opp_debug_remove_one(opp);
904 list_del_rcu(&opp->node);
905 call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
906
907 _remove_opp_table(opp_table);
908 }
909
910 /**
911 * dev_pm_opp_remove() - Remove an OPP from OPP table
912 * @dev: device for which we do this operation
913 * @freq: OPP to remove with matching 'freq'
914 *
915 * This function removes an opp from the opp table.
916 *
917 * Locking: The internal opp_table and opp structures are RCU protected.
918 * Hence this function internally uses RCU updater strategy with mutex locks
919 * to keep the integrity of the internal data structures. Callers should ensure
920 * that this function is *NOT* called under RCU protection or in contexts where
921 * mutex cannot be locked.
922 */
923 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
924 {
925 struct dev_pm_opp *opp;
926 struct opp_table *opp_table;
927 bool found = false;
928
929 /* Hold our table modification lock here */
930 mutex_lock(&opp_table_lock);
931
932 opp_table = _find_opp_table(dev);
933 if (IS_ERR(opp_table))
934 goto unlock;
935
936 list_for_each_entry(opp, &opp_table->opp_list, node) {
937 if (opp->rate == freq) {
938 found = true;
939 break;
940 }
941 }
942
943 if (!found) {
944 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
945 __func__, freq);
946 goto unlock;
947 }
948
949 _opp_remove(opp_table, opp, true);
950 unlock:
951 mutex_unlock(&opp_table_lock);
952 }
953 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
954
955 static struct dev_pm_opp *_allocate_opp(struct device *dev,
956 struct opp_table **opp_table)
957 {
958 struct dev_pm_opp *opp;
959
960 /* allocate new OPP node */
961 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
962 if (!opp)
963 return NULL;
964
965 INIT_LIST_HEAD(&opp->node);
966
967 *opp_table = _add_opp_table(dev);
968 if (!*opp_table) {
969 kfree(opp);
970 return NULL;
971 }
972
973 return opp;
974 }
975
976 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
977 struct opp_table *opp_table)
978 {
979 struct regulator *reg = opp_table->regulator;
980
981 if (!IS_ERR(reg) &&
982 !regulator_is_supported_voltage(reg, opp->u_volt_min,
983 opp->u_volt_max)) {
984 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
985 __func__, opp->u_volt_min, opp->u_volt_max);
986 return false;
987 }
988
989 return true;
990 }
991
992 static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
993 struct opp_table *opp_table)
994 {
995 struct dev_pm_opp *opp;
996 struct list_head *head = &opp_table->opp_list;
997 int ret;
998
999 /*
1000 * Insert new OPP in order of increasing frequency and discard if
1001 * already present.
1002 *
1003 * Need to use &opp_table->opp_list in the condition part of the 'for'
1004 * loop, don't replace it with head otherwise it will become an infinite
1005 * loop.
1006 */
1007 list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
1008 if (new_opp->rate > opp->rate) {
1009 head = &opp->node;
1010 continue;
1011 }
1012
1013 if (new_opp->rate < opp->rate)
1014 break;
1015
1016 /* Duplicate OPPs */
1017 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
1018 __func__, opp->rate, opp->u_volt, opp->available,
1019 new_opp->rate, new_opp->u_volt, new_opp->available);
1020
1021 return opp->available && new_opp->u_volt == opp->u_volt ?
1022 0 : -EEXIST;
1023 }
1024
1025 new_opp->opp_table = opp_table;
1026 list_add_rcu(&new_opp->node, head);
1027
1028 ret = opp_debug_create_one(new_opp, opp_table);
1029 if (ret)
1030 dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
1031 __func__, ret);
1032
1033 if (!_opp_supported_by_regulators(new_opp, opp_table)) {
1034 new_opp->available = false;
1035 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1036 __func__, new_opp->rate);
1037 }
1038
1039 return 0;
1040 }
1041
1042 /**
1043 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1044 * @dev: device for which we do this operation
1045 * @freq: Frequency in Hz for this OPP
1046 * @u_volt: Voltage in uVolts for this OPP
1047 * @dynamic: Dynamically added OPPs.
1048 *
1049 * This function adds an opp definition to the opp table and returns status.
1050 * The opp is made available by default and it can be controlled using
1051 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1052 *
1053 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1054 * and freed by dev_pm_opp_of_remove_table.
1055 *
1056 * Locking: The internal opp_table and opp structures are RCU protected.
1057 * Hence this function internally uses RCU updater strategy with mutex locks
1058 * to keep the integrity of the internal data structures. Callers should ensure
1059 * that this function is *NOT* called under RCU protection or in contexts where
1060 * mutex cannot be locked.
1061 *
1062 * Return:
1063 * 0 On success OR
1064 * Duplicate OPPs (both freq and volt are same) and opp->available
1065 * -EEXIST Freq are same and volt are different OR
1066 * Duplicate OPPs (both freq and volt are same) and !opp->available
1067 * -ENOMEM Memory allocation failure
1068 */
1069 static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
1070 bool dynamic)
1071 {
1072 struct opp_table *opp_table;
1073 struct dev_pm_opp *new_opp;
1074 unsigned long tol;
1075 int ret;
1076
1077 /* Hold our table modification lock here */
1078 mutex_lock(&opp_table_lock);
1079
1080 new_opp = _allocate_opp(dev, &opp_table);
1081 if (!new_opp) {
1082 ret = -ENOMEM;
1083 goto unlock;
1084 }
1085
1086 /* populate the opp table */
1087 new_opp->rate = freq;
1088 tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
1089 new_opp->u_volt = u_volt;
1090 new_opp->u_volt_min = u_volt - tol;
1091 new_opp->u_volt_max = u_volt + tol;
1092 new_opp->available = true;
1093 new_opp->dynamic = dynamic;
1094
1095 ret = _opp_add(dev, new_opp, opp_table);
1096 if (ret)
1097 goto free_opp;
1098
1099 mutex_unlock(&opp_table_lock);
1100
1101 /*
1102 * Notify the changes in the availability of the operable
1103 * frequency/voltage list.
1104 */
1105 srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
1106 return 0;
1107
1108 free_opp:
1109 _opp_remove(opp_table, new_opp, false);
1110 unlock:
1111 mutex_unlock(&opp_table_lock);
1112 return ret;
1113 }
1114
1115 /* TODO: Support multiple regulators */
1116 static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
1117 struct opp_table *opp_table)
1118 {
1119 u32 microvolt[3] = {0};
1120 u32 val;
1121 int count, ret;
1122 struct property *prop = NULL;
1123 char name[NAME_MAX];
1124
1125 /* Search for "opp-microvolt-<name>" */
1126 if (opp_table->prop_name) {
1127 snprintf(name, sizeof(name), "opp-microvolt-%s",
1128 opp_table->prop_name);
1129 prop = of_find_property(opp->np, name, NULL);
1130 }
1131
1132 if (!prop) {
1133 /* Search for "opp-microvolt" */
1134 sprintf(name, "opp-microvolt");
1135 prop = of_find_property(opp->np, name, NULL);
1136
1137 /* Missing property isn't a problem, but an invalid entry is */
1138 if (!prop)
1139 return 0;
1140 }
1141
1142 count = of_property_count_u32_elems(opp->np, name);
1143 if (count < 0) {
1144 dev_err(dev, "%s: Invalid %s property (%d)\n",
1145 __func__, name, count);
1146 return count;
1147 }
1148
1149 /* There can be one or three elements here */
1150 if (count != 1 && count != 3) {
1151 dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
1152 __func__, name, count);
1153 return -EINVAL;
1154 }
1155
1156 ret = of_property_read_u32_array(opp->np, name, microvolt, count);
1157 if (ret) {
1158 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
1159 return -EINVAL;
1160 }
1161
1162 opp->u_volt = microvolt[0];
1163
1164 if (count == 1) {
1165 opp->u_volt_min = opp->u_volt;
1166 opp->u_volt_max = opp->u_volt;
1167 } else {
1168 opp->u_volt_min = microvolt[1];
1169 opp->u_volt_max = microvolt[2];
1170 }
1171
1172 /* Search for "opp-microamp-<name>" */
1173 prop = NULL;
1174 if (opp_table->prop_name) {
1175 snprintf(name, sizeof(name), "opp-microamp-%s",
1176 opp_table->prop_name);
1177 prop = of_find_property(opp->np, name, NULL);
1178 }
1179
1180 if (!prop) {
1181 /* Search for "opp-microamp" */
1182 sprintf(name, "opp-microamp");
1183 prop = of_find_property(opp->np, name, NULL);
1184 }
1185
1186 if (prop && !of_property_read_u32(opp->np, name, &val))
1187 opp->u_amp = val;
1188
1189 return 0;
1190 }
1191
1192 /**
1193 * dev_pm_opp_set_supported_hw() - Set supported platforms
1194 * @dev: Device for which supported-hw has to be set.
1195 * @versions: Array of hierarchy of versions to match.
1196 * @count: Number of elements in the array.
1197 *
1198 * This is required only for the V2 bindings, and it enables a platform to
1199 * specify the hierarchy of versions it supports. OPP layer will then enable
1200 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1201 * property.
1202 *
1203 * Locking: The internal opp_table and opp structures are RCU protected.
1204 * Hence this function internally uses RCU updater strategy with mutex locks
1205 * to keep the integrity of the internal data structures. Callers should ensure
1206 * that this function is *NOT* called under RCU protection or in contexts where
1207 * mutex cannot be locked.
1208 */
1209 int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
1210 unsigned int count)
1211 {
1212 struct opp_table *opp_table;
1213 int ret = 0;
1214
1215 /* Hold our table modification lock here */
1216 mutex_lock(&opp_table_lock);
1217
1218 opp_table = _add_opp_table(dev);
1219 if (!opp_table) {
1220 ret = -ENOMEM;
1221 goto unlock;
1222 }
1223
1224 /* Make sure there are no concurrent readers while updating opp_table */
1225 WARN_ON(!list_empty(&opp_table->opp_list));
1226
1227 /* Do we already have a version hierarchy associated with opp_table? */
1228 if (opp_table->supported_hw) {
1229 dev_err(dev, "%s: Already have supported hardware list\n",
1230 __func__);
1231 ret = -EBUSY;
1232 goto err;
1233 }
1234
1235 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
1236 GFP_KERNEL);
1237 if (!opp_table->supported_hw) {
1238 ret = -ENOMEM;
1239 goto err;
1240 }
1241
1242 opp_table->supported_hw_count = count;
1243 mutex_unlock(&opp_table_lock);
1244 return 0;
1245
1246 err:
1247 _remove_opp_table(opp_table);
1248 unlock:
1249 mutex_unlock(&opp_table_lock);
1250
1251 return ret;
1252 }
1253 EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1254
1255 /**
1256 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1257 * @dev: Device for which supported-hw has to be put.
1258 *
1259 * This is required only for the V2 bindings, and is called for a matching
1260 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
1261 * will not be freed.
1262 *
1263 * Locking: The internal opp_table and opp structures are RCU protected.
1264 * Hence this function internally uses RCU updater strategy with mutex locks
1265 * to keep the integrity of the internal data structures. Callers should ensure
1266 * that this function is *NOT* called under RCU protection or in contexts where
1267 * mutex cannot be locked.
1268 */
1269 void dev_pm_opp_put_supported_hw(struct device *dev)
1270 {
1271 struct opp_table *opp_table;
1272
1273 /* Hold our table modification lock here */
1274 mutex_lock(&opp_table_lock);
1275
1276 /* Check for existing table for 'dev' first */
1277 opp_table = _find_opp_table(dev);
1278 if (IS_ERR(opp_table)) {
1279 dev_err(dev, "Failed to find opp_table: %ld\n",
1280 PTR_ERR(opp_table));
1281 goto unlock;
1282 }
1283
1284 /* Make sure there are no concurrent readers while updating opp_table */
1285 WARN_ON(!list_empty(&opp_table->opp_list));
1286
1287 if (!opp_table->supported_hw) {
1288 dev_err(dev, "%s: Doesn't have supported hardware list\n",
1289 __func__);
1290 goto unlock;
1291 }
1292
1293 kfree(opp_table->supported_hw);
1294 opp_table->supported_hw = NULL;
1295 opp_table->supported_hw_count = 0;
1296
1297 /* Try freeing opp_table if this was the last blocking resource */
1298 _remove_opp_table(opp_table);
1299
1300 unlock:
1301 mutex_unlock(&opp_table_lock);
1302 }
1303 EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1304
1305 /**
1306 * dev_pm_opp_set_prop_name() - Set prop-extn name
1307 * @dev: Device for which the prop-name has to be set.
1308 * @name: name to postfix to properties.
1309 *
1310 * This is required only for the V2 bindings, and it enables a platform to
1311 * specify the extn to be used for certain property names. The properties to
1312 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1313 * should postfix the property name with -<name> while looking for them.
1314 *
1315 * Locking: The internal opp_table and opp structures are RCU protected.
1316 * Hence this function internally uses RCU updater strategy with mutex locks
1317 * to keep the integrity of the internal data structures. Callers should ensure
1318 * that this function is *NOT* called under RCU protection or in contexts where
1319 * mutex cannot be locked.
1320 */
1321 int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1322 {
1323 struct opp_table *opp_table;
1324 int ret = 0;
1325
1326 /* Hold our table modification lock here */
1327 mutex_lock(&opp_table_lock);
1328
1329 opp_table = _add_opp_table(dev);
1330 if (!opp_table) {
1331 ret = -ENOMEM;
1332 goto unlock;
1333 }
1334
1335 /* Make sure there are no concurrent readers while updating opp_table */
1336 WARN_ON(!list_empty(&opp_table->opp_list));
1337
1338 /* Do we already have a prop-name associated with opp_table? */
1339 if (opp_table->prop_name) {
1340 dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1341 opp_table->prop_name);
1342 ret = -EBUSY;
1343 goto err;
1344 }
1345
1346 opp_table->prop_name = kstrdup(name, GFP_KERNEL);
1347 if (!opp_table->prop_name) {
1348 ret = -ENOMEM;
1349 goto err;
1350 }
1351
1352 mutex_unlock(&opp_table_lock);
1353 return 0;
1354
1355 err:
1356 _remove_opp_table(opp_table);
1357 unlock:
1358 mutex_unlock(&opp_table_lock);
1359
1360 return ret;
1361 }
1362 EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1363
1364 /**
1365 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1366 * @dev: Device for which the prop-name has to be put.
1367 *
1368 * This is required only for the V2 bindings, and is called for a matching
1369 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
1370 * will not be freed.
1371 *
1372 * Locking: The internal opp_table and opp structures are RCU protected.
1373 * Hence this function internally uses RCU updater strategy with mutex locks
1374 * to keep the integrity of the internal data structures. Callers should ensure
1375 * that this function is *NOT* called under RCU protection or in contexts where
1376 * mutex cannot be locked.
1377 */
1378 void dev_pm_opp_put_prop_name(struct device *dev)
1379 {
1380 struct opp_table *opp_table;
1381
1382 /* Hold our table modification lock here */
1383 mutex_lock(&opp_table_lock);
1384
1385 /* Check for existing table for 'dev' first */
1386 opp_table = _find_opp_table(dev);
1387 if (IS_ERR(opp_table)) {
1388 dev_err(dev, "Failed to find opp_table: %ld\n",
1389 PTR_ERR(opp_table));
1390 goto unlock;
1391 }
1392
1393 /* Make sure there are no concurrent readers while updating opp_table */
1394 WARN_ON(!list_empty(&opp_table->opp_list));
1395
1396 if (!opp_table->prop_name) {
1397 dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1398 goto unlock;
1399 }
1400
1401 kfree(opp_table->prop_name);
1402 opp_table->prop_name = NULL;
1403
1404 /* Try freeing opp_table if this was the last blocking resource */
1405 _remove_opp_table(opp_table);
1406
1407 unlock:
1408 mutex_unlock(&opp_table_lock);
1409 }
1410 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1411
1412 /**
1413 * dev_pm_opp_set_regulator() - Set regulator name for the device
1414 * @dev: Device for which regulator name is being set.
1415 * @name: Name of the regulator.
1416 *
1417 * In order to support OPP switching, OPP layer needs to know the name of the
1418 * device's regulator, as the core would be required to switch voltages as well.
1419 *
1420 * This must be called before any OPPs are initialized for the device.
1421 *
1422 * Locking: The internal opp_table and opp structures are RCU protected.
1423 * Hence this function internally uses RCU updater strategy with mutex locks
1424 * to keep the integrity of the internal data structures. Callers should ensure
1425 * that this function is *NOT* called under RCU protection or in contexts where
1426 * mutex cannot be locked.
1427 */
1428 int dev_pm_opp_set_regulator(struct device *dev, const char *name)
1429 {
1430 struct opp_table *opp_table;
1431 struct regulator *reg;
1432 int ret;
1433
1434 mutex_lock(&opp_table_lock);
1435
1436 opp_table = _add_opp_table(dev);
1437 if (!opp_table) {
1438 ret = -ENOMEM;
1439 goto unlock;
1440 }
1441
1442 /* This should be called before OPPs are initialized */
1443 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1444 ret = -EBUSY;
1445 goto err;
1446 }
1447
1448 /* Already have a regulator set */
1449 if (WARN_ON(!IS_ERR(opp_table->regulator))) {
1450 ret = -EBUSY;
1451 goto err;
1452 }
1453 /* Allocate the regulator */
1454 reg = regulator_get_optional(dev, name);
1455 if (IS_ERR(reg)) {
1456 ret = PTR_ERR(reg);
1457 if (ret != -EPROBE_DEFER)
1458 dev_err(dev, "%s: no regulator (%s) found: %d\n",
1459 __func__, name, ret);
1460 goto err;
1461 }
1462
1463 opp_table->regulator = reg;
1464
1465 mutex_unlock(&opp_table_lock);
1466 return 0;
1467
1468 err:
1469 _remove_opp_table(opp_table);
1470 unlock:
1471 mutex_unlock(&opp_table_lock);
1472
1473 return ret;
1474 }
1475 EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
1476
1477 /**
1478 * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
1479 * @dev: Device for which regulator was set.
1480 *
1481 * Locking: The internal opp_table and opp structures are RCU protected.
1482 * Hence this function internally uses RCU updater strategy with mutex locks
1483 * to keep the integrity of the internal data structures. Callers should ensure
1484 * that this function is *NOT* called under RCU protection or in contexts where
1485 * mutex cannot be locked.
1486 */
1487 void dev_pm_opp_put_regulator(struct device *dev)
1488 {
1489 struct opp_table *opp_table;
1490
1491 mutex_lock(&opp_table_lock);
1492
1493 /* Check for existing table for 'dev' first */
1494 opp_table = _find_opp_table(dev);
1495 if (IS_ERR(opp_table)) {
1496 dev_err(dev, "Failed to find opp_table: %ld\n",
1497 PTR_ERR(opp_table));
1498 goto unlock;
1499 }
1500
1501 if (IS_ERR(opp_table->regulator)) {
1502 dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
1503 goto unlock;
1504 }
1505
1506 /* Make sure there are no concurrent readers while updating opp_table */
1507 WARN_ON(!list_empty(&opp_table->opp_list));
1508
1509 regulator_put(opp_table->regulator);
1510 opp_table->regulator = ERR_PTR(-ENXIO);
1511
1512 /* Try freeing opp_table if this was the last blocking resource */
1513 _remove_opp_table(opp_table);
1514
1515 unlock:
1516 mutex_unlock(&opp_table_lock);
1517 }
1518 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
1519
1520 static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
1521 struct device_node *np)
1522 {
1523 unsigned int count = opp_table->supported_hw_count;
1524 u32 version;
1525 int ret;
1526
1527 if (!opp_table->supported_hw)
1528 return true;
1529
1530 while (count--) {
1531 ret = of_property_read_u32_index(np, "opp-supported-hw", count,
1532 &version);
1533 if (ret) {
1534 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
1535 __func__, count, ret);
1536 return false;
1537 }
1538
1539 /* Both of these are bitwise masks of the versions */
1540 if (!(version & opp_table->supported_hw[count]))
1541 return false;
1542 }
1543
1544 return true;
1545 }
1546
1547 /**
1548 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
1549 * @dev: device for which we do this operation
1550 * @np: device node
1551 *
1552 * This function adds an opp definition to the opp table and returns status. The
1553 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
1554 * removed by dev_pm_opp_remove.
1555 *
1556 * Locking: The internal opp_table and opp structures are RCU protected.
1557 * Hence this function internally uses RCU updater strategy with mutex locks
1558 * to keep the integrity of the internal data structures. Callers should ensure
1559 * that this function is *NOT* called under RCU protection or in contexts where
1560 * mutex cannot be locked.
1561 *
1562 * Return:
1563 * 0 On success OR
1564 * Duplicate OPPs (both freq and volt are same) and opp->available
1565 * -EEXIST Freq are same and volt are different OR
1566 * Duplicate OPPs (both freq and volt are same) and !opp->available
1567 * -ENOMEM Memory allocation failure
1568 * -EINVAL Failed parsing the OPP node
1569 */
1570 static int _opp_add_static_v2(struct device *dev, struct device_node *np)
1571 {
1572 struct opp_table *opp_table;
1573 struct dev_pm_opp *new_opp;
1574 u64 rate;
1575 u32 val;
1576 int ret;
1577
1578 /* Hold our table modification lock here */
1579 mutex_lock(&opp_table_lock);
1580
1581 new_opp = _allocate_opp(dev, &opp_table);
1582 if (!new_opp) {
1583 ret = -ENOMEM;
1584 goto unlock;
1585 }
1586
1587 ret = of_property_read_u64(np, "opp-hz", &rate);
1588 if (ret < 0) {
1589 dev_err(dev, "%s: opp-hz not found\n", __func__);
1590 goto free_opp;
1591 }
1592
1593 /* Check if the OPP supports hardware's hierarchy of versions or not */
1594 if (!_opp_is_supported(dev, opp_table, np)) {
1595 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
1596 goto free_opp;
1597 }
1598
1599 /*
1600 * Rate is defined as an unsigned long in clk API, and so casting
1601 * explicitly to its type. Must be fixed once rate is 64 bit
1602 * guaranteed in clk API.
1603 */
1604 new_opp->rate = (unsigned long)rate;
1605 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
1606
1607 new_opp->np = np;
1608 new_opp->dynamic = false;
1609 new_opp->available = true;
1610
1611 if (!of_property_read_u32(np, "clock-latency-ns", &val))
1612 new_opp->clock_latency_ns = val;
1613
1614 ret = opp_parse_supplies(new_opp, dev, opp_table);
1615 if (ret)
1616 goto free_opp;
1617
1618 ret = _opp_add(dev, new_opp, opp_table);
1619 if (ret)
1620 goto free_opp;
1621
1622 /* OPP to select on device suspend */
1623 if (of_property_read_bool(np, "opp-suspend")) {
1624 if (opp_table->suspend_opp) {
1625 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
1626 __func__, opp_table->suspend_opp->rate,
1627 new_opp->rate);
1628 } else {
1629 new_opp->suspend = true;
1630 opp_table->suspend_opp = new_opp;
1631 }
1632 }
1633
1634 if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
1635 opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
1636
1637 mutex_unlock(&opp_table_lock);
1638
1639 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
1640 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
1641 new_opp->u_volt_min, new_opp->u_volt_max,
1642 new_opp->clock_latency_ns);
1643
1644 /*
1645 * Notify the changes in the availability of the operable
1646 * frequency/voltage list.
1647 */
1648 srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
1649 return 0;
1650
1651 free_opp:
1652 _opp_remove(opp_table, new_opp, false);
1653 unlock:
1654 mutex_unlock(&opp_table_lock);
1655 return ret;
1656 }
1657
1658 /**
1659 * dev_pm_opp_add() - Add an OPP table from a table definitions
1660 * @dev: device for which we do this operation
1661 * @freq: Frequency in Hz for this OPP
1662 * @u_volt: Voltage in uVolts for this OPP
1663 *
1664 * This function adds an opp definition to the opp table and returns status.
1665 * The opp is made available by default and it can be controlled using
1666 * dev_pm_opp_enable/disable functions.
1667 *
1668 * Locking: The internal opp_table and opp structures are RCU protected.
1669 * Hence this function internally uses RCU updater strategy with mutex locks
1670 * to keep the integrity of the internal data structures. Callers should ensure
1671 * that this function is *NOT* called under RCU protection or in contexts where
1672 * mutex cannot be locked.
1673 *
1674 * Return:
1675 * 0 On success OR
1676 * Duplicate OPPs (both freq and volt are same) and opp->available
1677 * -EEXIST Freq are same and volt are different OR
1678 * Duplicate OPPs (both freq and volt are same) and !opp->available
1679 * -ENOMEM Memory allocation failure
1680 */
1681 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1682 {
1683 return _opp_add_v1(dev, freq, u_volt, true);
1684 }
1685 EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1686
1687 /**
1688 * _opp_set_availability() - helper to set the availability of an opp
1689 * @dev: device for which we do this operation
1690 * @freq: OPP frequency to modify availability
1691 * @availability_req: availability status requested for this opp
1692 *
1693 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1694 * share a common logic which is isolated here.
1695 *
1696 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1697 * copy operation, returns 0 if no modification was done OR modification was
1698 * successful.
1699 *
1700 * Locking: The internal opp_table and opp structures are RCU protected.
1701 * Hence this function internally uses RCU updater strategy with mutex locks to
1702 * keep the integrity of the internal data structures. Callers should ensure
1703 * that this function is *NOT* called under RCU protection or in contexts where
1704 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1705 */
1706 static int _opp_set_availability(struct device *dev, unsigned long freq,
1707 bool availability_req)
1708 {
1709 struct opp_table *opp_table;
1710 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
1711 int r = 0;
1712
1713 /* keep the node allocated */
1714 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
1715 if (!new_opp)
1716 return -ENOMEM;
1717
1718 mutex_lock(&opp_table_lock);
1719
1720 /* Find the opp_table */
1721 opp_table = _find_opp_table(dev);
1722 if (IS_ERR(opp_table)) {
1723 r = PTR_ERR(opp_table);
1724 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1725 goto unlock;
1726 }
1727
1728 /* Do we have the frequency? */
1729 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
1730 if (tmp_opp->rate == freq) {
1731 opp = tmp_opp;
1732 break;
1733 }
1734 }
1735 if (IS_ERR(opp)) {
1736 r = PTR_ERR(opp);
1737 goto unlock;
1738 }
1739
1740 /* Is update really needed? */
1741 if (opp->available == availability_req)
1742 goto unlock;
1743 /* copy the old data over */
1744 *new_opp = *opp;
1745
1746 /* plug in new node */
1747 new_opp->available = availability_req;
1748
1749 list_replace_rcu(&opp->node, &new_opp->node);
1750 mutex_unlock(&opp_table_lock);
1751 call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1752
1753 /* Notify the change of the OPP availability */
1754 if (availability_req)
1755 srcu_notifier_call_chain(&opp_table->srcu_head,
1756 OPP_EVENT_ENABLE, new_opp);
1757 else
1758 srcu_notifier_call_chain(&opp_table->srcu_head,
1759 OPP_EVENT_DISABLE, new_opp);
1760
1761 return 0;
1762
1763 unlock:
1764 mutex_unlock(&opp_table_lock);
1765 kfree(new_opp);
1766 return r;
1767 }
1768
1769 /**
1770 * dev_pm_opp_enable() - Enable a specific OPP
1771 * @dev: device for which we do this operation
1772 * @freq: OPP frequency to enable
1773 *
1774 * Enables a provided opp. If the operation is valid, this returns 0, else the
1775 * corresponding error value. It is meant to be used for users an OPP available
1776 * after being temporarily made unavailable with dev_pm_opp_disable.
1777 *
1778 * Locking: The internal opp_table and opp structures are RCU protected.
1779 * Hence this function indirectly uses RCU and mutex locks to keep the
1780 * integrity of the internal data structures. Callers should ensure that
1781 * this function is *NOT* called under RCU protection or in contexts where
1782 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1783 *
1784 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1785 * copy operation, returns 0 if no modification was done OR modification was
1786 * successful.
1787 */
1788 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
1789 {
1790 return _opp_set_availability(dev, freq, true);
1791 }
1792 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1793
1794 /**
1795 * dev_pm_opp_disable() - Disable a specific OPP
1796 * @dev: device for which we do this operation
1797 * @freq: OPP frequency to disable
1798 *
1799 * Disables a provided opp. If the operation is valid, this returns
1800 * 0, else the corresponding error value. It is meant to be a temporary
1801 * control by users to make this OPP not available until the circumstances are
1802 * right to make it available again (with a call to dev_pm_opp_enable).
1803 *
1804 * Locking: The internal opp_table and opp structures are RCU protected.
1805 * Hence this function indirectly uses RCU and mutex locks to keep the
1806 * integrity of the internal data structures. Callers should ensure that
1807 * this function is *NOT* called under RCU protection or in contexts where
1808 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1809 *
1810 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1811 * copy operation, returns 0 if no modification was done OR modification was
1812 * successful.
1813 */
1814 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
1815 {
1816 return _opp_set_availability(dev, freq, false);
1817 }
1818 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
1819
1820 /**
1821 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
1822 * @dev: device pointer used to lookup OPP table.
1823 *
1824 * Return: pointer to notifier head if found, otherwise -ENODEV or
1825 * -EINVAL based on type of error casted as pointer. value must be checked
1826 * with IS_ERR to determine valid pointer or error result.
1827 *
1828 * Locking: This function must be called under rcu_read_lock(). opp_table is a
1829 * RCU protected pointer. The reason for the same is that the opp pointer which
1830 * is returned will remain valid for use with opp_get_{voltage, freq} only while
1831 * under the locked area. The pointer returned must be used prior to unlocking
1832 * with rcu_read_unlock() to maintain the integrity of the pointer.
1833 */
1834 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
1835 {
1836 struct opp_table *opp_table = _find_opp_table(dev);
1837
1838 if (IS_ERR(opp_table))
1839 return ERR_CAST(opp_table); /* matching type */
1840
1841 return &opp_table->srcu_head;
1842 }
1843 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
1844
1845 #ifdef CONFIG_OF
1846 /**
1847 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
1848 * entries
1849 * @dev: device pointer used to lookup OPP table.
1850 *
1851 * Free OPPs created using static entries present in DT.
1852 *
1853 * Locking: The internal opp_table and opp structures are RCU protected.
1854 * Hence this function indirectly uses RCU updater strategy with mutex locks
1855 * to keep the integrity of the internal data structures. Callers should ensure
1856 * that this function is *NOT* called under RCU protection or in contexts where
1857 * mutex cannot be locked.
1858 */
1859 void dev_pm_opp_of_remove_table(struct device *dev)
1860 {
1861 struct opp_table *opp_table;
1862 struct dev_pm_opp *opp, *tmp;
1863
1864 /* Hold our table modification lock here */
1865 mutex_lock(&opp_table_lock);
1866
1867 /* Check for existing table for 'dev' */
1868 opp_table = _find_opp_table(dev);
1869 if (IS_ERR(opp_table)) {
1870 int error = PTR_ERR(opp_table);
1871
1872 if (error != -ENODEV)
1873 WARN(1, "%s: opp_table: %d\n",
1874 IS_ERR_OR_NULL(dev) ?
1875 "Invalid device" : dev_name(dev),
1876 error);
1877 goto unlock;
1878 }
1879
1880 /* Find if opp_table manages a single device */
1881 if (list_is_singular(&opp_table->dev_list)) {
1882 /* Free static OPPs */
1883 list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
1884 if (!opp->dynamic)
1885 _opp_remove(opp_table, opp, true);
1886 }
1887 } else {
1888 _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
1889 }
1890
1891 unlock:
1892 mutex_unlock(&opp_table_lock);
1893 }
1894 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
1895
1896 /* Returns opp descriptor node for a device, caller must do of_node_put() */
1897 struct device_node *_of_get_opp_desc_node(struct device *dev)
1898 {
1899 /*
1900 * TODO: Support for multiple OPP tables.
1901 *
1902 * There should be only ONE phandle present in "operating-points-v2"
1903 * property.
1904 */
1905
1906 return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
1907 }
1908
1909 /* Initializes OPP tables based on new bindings */
1910 static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
1911 {
1912 struct device_node *np;
1913 struct opp_table *opp_table;
1914 int ret = 0, count = 0;
1915
1916 mutex_lock(&opp_table_lock);
1917
1918 opp_table = _managed_opp(opp_np);
1919 if (opp_table) {
1920 /* OPPs are already managed */
1921 if (!_add_opp_dev(dev, opp_table))
1922 ret = -ENOMEM;
1923 mutex_unlock(&opp_table_lock);
1924 return ret;
1925 }
1926 mutex_unlock(&opp_table_lock);
1927
1928 /* We have opp-table node now, iterate over it and add OPPs */
1929 for_each_available_child_of_node(opp_np, np) {
1930 count++;
1931
1932 ret = _opp_add_static_v2(dev, np);
1933 if (ret) {
1934 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1935 ret);
1936 goto free_table;
1937 }
1938 }
1939
1940 /* There should be one of more OPP defined */
1941 if (WARN_ON(!count))
1942 return -ENOENT;
1943
1944 mutex_lock(&opp_table_lock);
1945
1946 opp_table = _find_opp_table(dev);
1947 if (WARN_ON(IS_ERR(opp_table))) {
1948 ret = PTR_ERR(opp_table);
1949 mutex_unlock(&opp_table_lock);
1950 goto free_table;
1951 }
1952
1953 opp_table->np = opp_np;
1954 opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
1955
1956 mutex_unlock(&opp_table_lock);
1957
1958 return 0;
1959
1960 free_table:
1961 dev_pm_opp_of_remove_table(dev);
1962
1963 return ret;
1964 }
1965
1966 /* Initializes OPP tables based on old-deprecated bindings */
1967 static int _of_add_opp_table_v1(struct device *dev)
1968 {
1969 const struct property *prop;
1970 const __be32 *val;
1971 int nr;
1972
1973 prop = of_find_property(dev->of_node, "operating-points", NULL);
1974 if (!prop)
1975 return -ENODEV;
1976 if (!prop->value)
1977 return -ENODATA;
1978
1979 /*
1980 * Each OPP is a set of tuples consisting of frequency and
1981 * voltage like <freq-kHz vol-uV>.
1982 */
1983 nr = prop->length / sizeof(u32);
1984 if (nr % 2) {
1985 dev_err(dev, "%s: Invalid OPP table\n", __func__);
1986 return -EINVAL;
1987 }
1988
1989 val = prop->value;
1990 while (nr) {
1991 unsigned long freq = be32_to_cpup(val++) * 1000;
1992 unsigned long volt = be32_to_cpup(val++);
1993
1994 if (_opp_add_v1(dev, freq, volt, false))
1995 dev_warn(dev, "%s: Failed to add OPP %ld\n",
1996 __func__, freq);
1997 nr -= 2;
1998 }
1999
2000 return 0;
2001 }
2002
2003 /**
2004 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
2005 * @dev: device pointer used to lookup OPP table.
2006 *
2007 * Register the initial OPP table with the OPP library for given device.
2008 *
2009 * Locking: The internal opp_table and opp structures are RCU protected.
2010 * Hence this function indirectly uses RCU updater strategy with mutex locks
2011 * to keep the integrity of the internal data structures. Callers should ensure
2012 * that this function is *NOT* called under RCU protection or in contexts where
2013 * mutex cannot be locked.
2014 *
2015 * Return:
2016 * 0 On success OR
2017 * Duplicate OPPs (both freq and volt are same) and opp->available
2018 * -EEXIST Freq are same and volt are different OR
2019 * Duplicate OPPs (both freq and volt are same) and !opp->available
2020 * -ENOMEM Memory allocation failure
2021 * -ENODEV when 'operating-points' property is not found or is invalid data
2022 * in device node.
2023 * -ENODATA when empty 'operating-points' property is found
2024 * -EINVAL when invalid entries are found in opp-v2 table
2025 */
2026 int dev_pm_opp_of_add_table(struct device *dev)
2027 {
2028 struct device_node *opp_np;
2029 int ret;
2030
2031 /*
2032 * OPPs have two version of bindings now. The older one is deprecated,
2033 * try for the new binding first.
2034 */
2035 opp_np = _of_get_opp_desc_node(dev);
2036 if (!opp_np) {
2037 /*
2038 * Try old-deprecated bindings for backward compatibility with
2039 * older dtbs.
2040 */
2041 return _of_add_opp_table_v1(dev);
2042 }
2043
2044 ret = _of_add_opp_table_v2(dev, opp_np);
2045 of_node_put(opp_np);
2046
2047 return ret;
2048 }
2049 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
2050 #endif
This page took 0.105556 seconds and 5 git commands to generate.