Commit | Line | Data |
---|---|---|
543d9378 PW |
1 | /* |
2 | * linux/arch/arm/mach-omap2/clock.c | |
3 | * | |
a16e9703 TL |
4 | * Copyright (C) 2005-2008 Texas Instruments, Inc. |
5 | * Copyright (C) 2004-2008 Nokia Corporation | |
543d9378 | 6 | * |
a16e9703 TL |
7 | * Contacts: |
8 | * Richard Woodruff <r-woodruff2@ti.com> | |
543d9378 PW |
9 | * Paul Walmsley |
10 | * | |
543d9378 PW |
11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License version 2 as | |
13 | * published by the Free Software Foundation. | |
14 | */ | |
15 | #undef DEBUG | |
16 | ||
17 | #include <linux/module.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/device.h> | |
20 | #include <linux/list.h> | |
21 | #include <linux/errno.h> | |
22 | #include <linux/delay.h> | |
23 | #include <linux/clk.h> | |
fced80c7 | 24 | #include <linux/io.h> |
fbd3bdb2 | 25 | #include <linux/bitops.h> |
543d9378 | 26 | |
a09e64fb | 27 | #include <mach/clock.h> |
333943ba | 28 | #include <mach/clockdomain.h> |
a09e64fb | 29 | #include <mach/cpu.h> |
543d9378 PW |
30 | #include <asm/div64.h> |
31 | ||
f8de9b2c | 32 | #include <mach/sdrc.h> |
543d9378 PW |
33 | #include "sdrc.h" |
34 | #include "clock.h" | |
35 | #include "prm.h" | |
36 | #include "prm-regbits-24xx.h" | |
37 | #include "cm.h" | |
38 | #include "cm-regbits-24xx.h" | |
39 | #include "cm-regbits-34xx.h" | |
40 | ||
41 | #define MAX_CLOCK_ENABLE_WAIT 100000 | |
42 | ||
88b8ba90 PW |
43 | /* DPLL rate rounding: minimum DPLL multiplier, divider values */ |
44 | #define DPLL_MIN_MULTIPLIER 1 | |
45 | #define DPLL_MIN_DIVIDER 1 | |
46 | ||
47 | /* Possible error results from _dpll_test_mult */ | |
85a5f78d | 48 | #define DPLL_MULT_UNDERFLOW -1 |
88b8ba90 PW |
49 | |
50 | /* | |
51 | * Scale factor to mitigate roundoff errors in DPLL rate rounding. | |
52 | * The higher the scale factor, the greater the risk of arithmetic overflow, | |
53 | * but the closer the rounded rate to the target rate. DPLL_SCALE_FACTOR | |
54 | * must be a power of DPLL_SCALE_BASE. | |
55 | */ | |
56 | #define DPLL_SCALE_FACTOR 64 | |
57 | #define DPLL_SCALE_BASE 2 | |
58 | #define DPLL_ROUNDING_VAL ((DPLL_SCALE_BASE / 2) * \ | |
59 | (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE)) | |
60 | ||
95f538ac PW |
61 | /* DPLL valid Fint frequency band limits - from 34xx TRM Section 4.7.6.2 */ |
62 | #define DPLL_FINT_BAND1_MIN 750000 | |
63 | #define DPLL_FINT_BAND1_MAX 2100000 | |
64 | #define DPLL_FINT_BAND2_MIN 7500000 | |
65 | #define DPLL_FINT_BAND2_MAX 21000000 | |
66 | ||
67 | /* _dpll_test_fint() return codes */ | |
68 | #define DPLL_FINT_UNDERFLOW -1 | |
69 | #define DPLL_FINT_INVALID -2 | |
70 | ||
543d9378 PW |
71 | u8 cpu_mask; |
72 | ||
73 | /*------------------------------------------------------------------------- | |
333943ba | 74 | * OMAP2/3 specific clock functions |
543d9378 PW |
75 | *-------------------------------------------------------------------------*/ |
76 | ||
439764cc PW |
77 | /** |
78 | * _omap2xxx_clk_commit - commit clock parent/rate changes in hardware | |
79 | * @clk: struct clk * | |
80 | * | |
81 | * If @clk has the DELAYED_APP flag set, meaning that parent/rate changes | |
82 | * don't take effect until the VALID_CONFIG bit is written, write the | |
83 | * VALID_CONFIG bit and wait for the write to complete. No return value. | |
84 | */ | |
85 | static void _omap2xxx_clk_commit(struct clk *clk) | |
86 | { | |
87 | if (!cpu_is_omap24xx()) | |
88 | return; | |
89 | ||
90 | if (!(clk->flags & DELAYED_APP)) | |
91 | return; | |
92 | ||
93 | prm_write_mod_reg(OMAP24XX_VALID_CONFIG, OMAP24XX_GR_MOD, | |
94 | OMAP24XX_PRCM_CLKCFG_CTRL_OFFSET); | |
95 | /* OCP barrier */ | |
96 | prm_read_mod_reg(OMAP24XX_GR_MOD, OMAP24XX_PRCM_CLKCFG_CTRL_OFFSET); | |
97 | } | |
98 | ||
95f538ac PW |
99 | /* |
100 | * _dpll_test_fint - test whether an Fint value is valid for the DPLL | |
101 | * @clk: DPLL struct clk to test | |
102 | * @n: divider value (N) to test | |
103 | * | |
104 | * Tests whether a particular divider @n will result in a valid DPLL | |
105 | * internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter | |
106 | * Correction". Returns 0 if OK, -1 if the enclosing loop can terminate | |
107 | * (assuming that it is counting N upwards), or -2 if the enclosing loop | |
108 | * should skip to the next iteration (again assuming N is increasing). | |
109 | */ | |
110 | static int _dpll_test_fint(struct clk *clk, u8 n) | |
111 | { | |
112 | struct dpll_data *dd; | |
113 | long fint; | |
114 | int ret = 0; | |
115 | ||
116 | dd = clk->dpll_data; | |
117 | ||
118 | /* DPLL divider must result in a valid jitter correction val */ | |
119 | fint = clk->parent->rate / (n + 1); | |
120 | if (fint < DPLL_FINT_BAND1_MIN) { | |
121 | ||
122 | pr_debug("rejecting n=%d due to Fint failure, " | |
123 | "lowering max_divider\n", n); | |
124 | dd->max_divider = n; | |
125 | ret = DPLL_FINT_UNDERFLOW; | |
126 | ||
127 | } else if (fint > DPLL_FINT_BAND1_MAX && | |
128 | fint < DPLL_FINT_BAND2_MIN) { | |
129 | ||
130 | pr_debug("rejecting n=%d due to Fint failure\n", n); | |
131 | ret = DPLL_FINT_INVALID; | |
132 | ||
133 | } else if (fint > DPLL_FINT_BAND2_MAX) { | |
134 | ||
135 | pr_debug("rejecting n=%d due to Fint failure, " | |
136 | "boosting min_divider\n", n); | |
137 | dd->min_divider = n; | |
138 | ret = DPLL_FINT_INVALID; | |
139 | ||
140 | } | |
141 | ||
142 | return ret; | |
143 | } | |
144 | ||
333943ba PW |
145 | /** |
146 | * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk | |
147 | * @clk: OMAP clock struct ptr to use | |
148 | * | |
149 | * Convert a clockdomain name stored in a struct clk 'clk' into a | |
150 | * clockdomain pointer, and save it into the struct clk. Intended to be | |
151 | * called during clk_register(). No return value. | |
152 | */ | |
153 | void omap2_init_clk_clkdm(struct clk *clk) | |
154 | { | |
155 | struct clockdomain *clkdm; | |
156 | ||
157 | if (!clk->clkdm_name) | |
158 | return; | |
159 | ||
160 | clkdm = clkdm_lookup(clk->clkdm_name); | |
161 | if (clkdm) { | |
162 | pr_debug("clock: associated clk %s to clkdm %s\n", | |
163 | clk->name, clk->clkdm_name); | |
164 | clk->clkdm = clkdm; | |
165 | } else { | |
166 | pr_debug("clock: could not associate clk %s to " | |
167 | "clkdm %s\n", clk->name, clk->clkdm_name); | |
168 | } | |
169 | } | |
170 | ||
543d9378 PW |
171 | /** |
172 | * omap2_init_clksel_parent - set a clksel clk's parent field from the hardware | |
173 | * @clk: OMAP clock struct ptr to use | |
174 | * | |
175 | * Given a pointer to a source-selectable struct clk, read the hardware | |
176 | * register and determine what its parent is currently set to. Update the | |
177 | * clk->parent field with the appropriate clk ptr. | |
178 | */ | |
179 | void omap2_init_clksel_parent(struct clk *clk) | |
180 | { | |
181 | const struct clksel *clks; | |
182 | const struct clksel_rate *clkr; | |
183 | u32 r, found = 0; | |
184 | ||
185 | if (!clk->clksel) | |
186 | return; | |
187 | ||
188 | r = __raw_readl(clk->clksel_reg) & clk->clksel_mask; | |
189 | r >>= __ffs(clk->clksel_mask); | |
190 | ||
191 | for (clks = clk->clksel; clks->parent && !found; clks++) { | |
192 | for (clkr = clks->rates; clkr->div && !found; clkr++) { | |
193 | if ((clkr->flags & cpu_mask) && (clkr->val == r)) { | |
194 | if (clk->parent != clks->parent) { | |
195 | pr_debug("clock: inited %s parent " | |
196 | "to %s (was %s)\n", | |
197 | clk->name, clks->parent->name, | |
198 | ((clk->parent) ? | |
199 | clk->parent->name : "NULL")); | |
3f0a820c | 200 | clk_reparent(clk, clks->parent); |
543d9378 PW |
201 | }; |
202 | found = 1; | |
203 | } | |
204 | } | |
205 | } | |
206 | ||
207 | if (!found) | |
208 | printk(KERN_ERR "clock: init parent: could not find " | |
209 | "regval %0x for clock %s\n", r, clk->name); | |
210 | ||
211 | return; | |
212 | } | |
213 | ||
214 | /* Returns the DPLL rate */ | |
215 | u32 omap2_get_dpll_rate(struct clk *clk) | |
216 | { | |
217 | long long dpll_clk; | |
218 | u32 dpll_mult, dpll_div, dpll; | |
88b8ba90 | 219 | struct dpll_data *dd; |
543d9378 PW |
220 | |
221 | dd = clk->dpll_data; | |
222 | /* REVISIT: What do we return on error? */ | |
223 | if (!dd) | |
224 | return 0; | |
225 | ||
226 | dpll = __raw_readl(dd->mult_div1_reg); | |
227 | dpll_mult = dpll & dd->mult_mask; | |
228 | dpll_mult >>= __ffs(dd->mult_mask); | |
229 | dpll_div = dpll & dd->div1_mask; | |
230 | dpll_div >>= __ffs(dd->div1_mask); | |
231 | ||
232 | dpll_clk = (long long)clk->parent->rate * dpll_mult; | |
233 | do_div(dpll_clk, dpll_div + 1); | |
234 | ||
543d9378 PW |
235 | return dpll_clk; |
236 | } | |
237 | ||
238 | /* | |
239 | * Used for clocks that have the same value as the parent clock, | |
240 | * divided by some factor | |
241 | */ | |
242 | void omap2_fixed_divisor_recalc(struct clk *clk) | |
243 | { | |
244 | WARN_ON(!clk->fixed_div); | |
245 | ||
246 | clk->rate = clk->parent->rate / clk->fixed_div; | |
543d9378 PW |
247 | } |
248 | ||
249 | /** | |
250 | * omap2_wait_clock_ready - wait for clock to enable | |
251 | * @reg: physical address of clock IDLEST register | |
252 | * @mask: value to mask against to determine if the clock is active | |
253 | * @name: name of the clock (for printk) | |
254 | * | |
255 | * Returns 1 if the clock enabled in time, or 0 if it failed to enable | |
256 | * in roughly MAX_CLOCK_ENABLE_WAIT microseconds. | |
257 | */ | |
258 | int omap2_wait_clock_ready(void __iomem *reg, u32 mask, const char *name) | |
259 | { | |
260 | int i = 0; | |
261 | int ena = 0; | |
262 | ||
263 | /* | |
264 | * 24xx uses 0 to indicate not ready, and 1 to indicate ready. | |
265 | * 34xx reverses this, just to keep us on our toes | |
266 | */ | |
fecb494b | 267 | if (cpu_mask & (RATE_IN_242X | RATE_IN_243X)) |
543d9378 | 268 | ena = mask; |
fecb494b | 269 | else if (cpu_mask & RATE_IN_343X) |
543d9378 | 270 | ena = 0; |
543d9378 PW |
271 | |
272 | /* Wait for lock */ | |
273 | while (((__raw_readl(reg) & mask) != ena) && | |
274 | (i++ < MAX_CLOCK_ENABLE_WAIT)) { | |
275 | udelay(1); | |
276 | } | |
277 | ||
278 | if (i < MAX_CLOCK_ENABLE_WAIT) | |
279 | pr_debug("Clock %s stable after %d loops\n", name, i); | |
280 | else | |
281 | printk(KERN_ERR "Clock %s didn't enable in %d tries\n", | |
282 | name, MAX_CLOCK_ENABLE_WAIT); | |
283 | ||
284 | ||
285 | return (i < MAX_CLOCK_ENABLE_WAIT) ? 1 : 0; | |
286 | }; | |
287 | ||
288 | ||
289 | /* | |
290 | * Note: We don't need special code here for INVERT_ENABLE | |
291 | * for the time being since INVERT_ENABLE only applies to clocks enabled by | |
292 | * CM_CLKEN_PLL | |
293 | */ | |
294 | static void omap2_clk_wait_ready(struct clk *clk) | |
295 | { | |
296 | void __iomem *reg, *other_reg, *st_reg; | |
297 | u32 bit; | |
298 | ||
299 | /* | |
300 | * REVISIT: This code is pretty ugly. It would be nice to generalize | |
301 | * it and pull it into struct clk itself somehow. | |
302 | */ | |
303 | reg = clk->enable_reg; | |
c1168dc3 RK |
304 | |
305 | /* | |
306 | * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes | |
307 | * it's just a matter of XORing the bits. | |
308 | */ | |
309 | other_reg = (void __iomem *)((u32)reg ^ (CM_FCLKEN ^ CM_ICLKEN)); | |
543d9378 | 310 | |
543d9378 PW |
311 | /* Check if both functional and interface clocks |
312 | * are running. */ | |
313 | bit = 1 << clk->enable_bit; | |
314 | if (!(__raw_readl(other_reg) & bit)) | |
315 | return; | |
316 | st_reg = (void __iomem *)(((u32)other_reg & ~0xf0) | 0x20); /* CM_IDLEST* */ | |
317 | ||
318 | omap2_wait_clock_ready(st_reg, bit, clk->name); | |
319 | } | |
320 | ||
bc51da4e | 321 | static int omap2_dflt_clk_enable(struct clk *clk) |
543d9378 | 322 | { |
ee1eec36 | 323 | u32 v; |
543d9378 | 324 | |
c0fc18c5 | 325 | if (unlikely(clk->enable_reg == NULL)) { |
543d9378 PW |
326 | printk(KERN_ERR "clock.c: Enable for %s without enable code\n", |
327 | clk->name); | |
328 | return 0; /* REVISIT: -EINVAL */ | |
329 | } | |
330 | ||
ee1eec36 | 331 | v = __raw_readl(clk->enable_reg); |
543d9378 | 332 | if (clk->flags & INVERT_ENABLE) |
ee1eec36 | 333 | v &= ~(1 << clk->enable_bit); |
543d9378 | 334 | else |
ee1eec36 PW |
335 | v |= (1 << clk->enable_bit); |
336 | __raw_writel(v, clk->enable_reg); | |
f11fda6a | 337 | v = __raw_readl(clk->enable_reg); /* OCP barrier */ |
543d9378 | 338 | |
543d9378 PW |
339 | return 0; |
340 | } | |
341 | ||
bc51da4e RK |
342 | static int omap2_dflt_clk_enable_wait(struct clk *clk) |
343 | { | |
344 | int ret; | |
345 | ||
fecb494b | 346 | if (!clk->enable_reg) { |
bc51da4e RK |
347 | printk(KERN_ERR "clock.c: Enable for %s without enable code\n", |
348 | clk->name); | |
349 | return 0; /* REVISIT: -EINVAL */ | |
350 | } | |
351 | ||
352 | ret = omap2_dflt_clk_enable(clk); | |
353 | if (ret == 0) | |
354 | omap2_clk_wait_ready(clk); | |
355 | return ret; | |
356 | } | |
357 | ||
b36ee724 | 358 | static void omap2_dflt_clk_disable(struct clk *clk) |
543d9378 | 359 | { |
ee1eec36 | 360 | u32 v; |
543d9378 | 361 | |
fecb494b | 362 | if (!clk->enable_reg) { |
543d9378 PW |
363 | /* |
364 | * 'Independent' here refers to a clock which is not | |
365 | * controlled by its parent. | |
366 | */ | |
367 | printk(KERN_ERR "clock: clk_disable called on independent " | |
368 | "clock %s which has no enable_reg\n", clk->name); | |
369 | return; | |
370 | } | |
371 | ||
ee1eec36 | 372 | v = __raw_readl(clk->enable_reg); |
543d9378 | 373 | if (clk->flags & INVERT_ENABLE) |
ee1eec36 | 374 | v |= (1 << clk->enable_bit); |
543d9378 | 375 | else |
ee1eec36 PW |
376 | v &= ~(1 << clk->enable_bit); |
377 | __raw_writel(v, clk->enable_reg); | |
de07fedd | 378 | /* No OCP barrier needed here since it is a disable operation */ |
543d9378 PW |
379 | } |
380 | ||
b36ee724 RK |
381 | const struct clkops clkops_omap2_dflt_wait = { |
382 | .enable = omap2_dflt_clk_enable_wait, | |
383 | .disable = omap2_dflt_clk_disable, | |
384 | }; | |
385 | ||
bc51da4e RK |
386 | const struct clkops clkops_omap2_dflt = { |
387 | .enable = omap2_dflt_clk_enable, | |
388 | .disable = omap2_dflt_clk_disable, | |
389 | }; | |
390 | ||
b36ee724 RK |
391 | /* Enables clock without considering parent dependencies or use count |
392 | * REVISIT: Maybe change this to use clk->enable like on omap1? | |
393 | */ | |
394 | static int _omap2_clk_enable(struct clk *clk) | |
395 | { | |
396 | return clk->ops->enable(clk); | |
397 | } | |
398 | ||
399 | /* Disables clock without considering parent dependencies or use count */ | |
400 | static void _omap2_clk_disable(struct clk *clk) | |
401 | { | |
402 | clk->ops->disable(clk); | |
403 | } | |
404 | ||
543d9378 PW |
405 | void omap2_clk_disable(struct clk *clk) |
406 | { | |
407 | if (clk->usecount > 0 && !(--clk->usecount)) { | |
408 | _omap2_clk_disable(clk); | |
fecb494b | 409 | if (clk->parent) |
543d9378 | 410 | omap2_clk_disable(clk->parent); |
333943ba PW |
411 | if (clk->clkdm) |
412 | omap2_clkdm_clk_disable(clk->clkdm, clk); | |
413 | ||
543d9378 PW |
414 | } |
415 | } | |
416 | ||
417 | int omap2_clk_enable(struct clk *clk) | |
418 | { | |
419 | int ret = 0; | |
420 | ||
421 | if (clk->usecount++ == 0) { | |
a7f8c599 | 422 | if (clk->parent) { |
543d9378 | 423 | ret = omap2_clk_enable(clk->parent); |
a7f8c599 RK |
424 | if (ret) |
425 | goto err; | |
543d9378 PW |
426 | } |
427 | ||
333943ba PW |
428 | if (clk->clkdm) |
429 | omap2_clkdm_clk_enable(clk->clkdm, clk); | |
430 | ||
543d9378 | 431 | ret = _omap2_clk_enable(clk); |
a7f8c599 | 432 | if (ret) { |
333943ba PW |
433 | if (clk->clkdm) |
434 | omap2_clkdm_clk_disable(clk->clkdm, clk); | |
435 | ||
a7f8c599 | 436 | if (clk->parent) |
333943ba | 437 | omap2_clk_disable(clk->parent); |
a7f8c599 RK |
438 | |
439 | goto err; | |
543d9378 PW |
440 | } |
441 | } | |
a7f8c599 | 442 | return ret; |
543d9378 | 443 | |
a7f8c599 RK |
444 | err: |
445 | clk->usecount--; | |
543d9378 PW |
446 | return ret; |
447 | } | |
448 | ||
449 | /* | |
450 | * Used for clocks that are part of CLKSEL_xyz governed clocks. | |
451 | * REVISIT: Maybe change to use clk->enable() functions like on omap1? | |
452 | */ | |
453 | void omap2_clksel_recalc(struct clk *clk) | |
454 | { | |
455 | u32 div = 0; | |
456 | ||
457 | pr_debug("clock: recalc'ing clksel clk %s\n", clk->name); | |
458 | ||
459 | div = omap2_clksel_get_divisor(clk); | |
460 | if (div == 0) | |
461 | return; | |
462 | ||
fecb494b | 463 | if (clk->rate == (clk->parent->rate / div)) |
543d9378 PW |
464 | return; |
465 | clk->rate = clk->parent->rate / div; | |
466 | ||
467 | pr_debug("clock: new clock rate is %ld (div %d)\n", clk->rate, div); | |
543d9378 PW |
468 | } |
469 | ||
470 | /** | |
471 | * omap2_get_clksel_by_parent - return clksel struct for a given clk & parent | |
472 | * @clk: OMAP struct clk ptr to inspect | |
473 | * @src_clk: OMAP struct clk ptr of the parent clk to search for | |
474 | * | |
475 | * Scan the struct clksel array associated with the clock to find | |
476 | * the element associated with the supplied parent clock address. | |
477 | * Returns a pointer to the struct clksel on success or NULL on error. | |
478 | */ | |
fecb494b PW |
479 | static const struct clksel *omap2_get_clksel_by_parent(struct clk *clk, |
480 | struct clk *src_clk) | |
543d9378 PW |
481 | { |
482 | const struct clksel *clks; | |
483 | ||
484 | if (!clk->clksel) | |
485 | return NULL; | |
486 | ||
487 | for (clks = clk->clksel; clks->parent; clks++) { | |
488 | if (clks->parent == src_clk) | |
489 | break; /* Found the requested parent */ | |
490 | } | |
491 | ||
492 | if (!clks->parent) { | |
493 | printk(KERN_ERR "clock: Could not find parent clock %s in " | |
494 | "clksel array of clock %s\n", src_clk->name, | |
495 | clk->name); | |
496 | return NULL; | |
497 | } | |
498 | ||
499 | return clks; | |
500 | } | |
501 | ||
502 | /** | |
503 | * omap2_clksel_round_rate_div - find divisor for the given clock and rate | |
504 | * @clk: OMAP struct clk to use | |
505 | * @target_rate: desired clock rate | |
506 | * @new_div: ptr to where we should store the divisor | |
507 | * | |
508 | * Finds 'best' divider value in an array based on the source and target | |
509 | * rates. The divider array must be sorted with smallest divider first. | |
510 | * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT, | |
511 | * they are only settable as part of virtual_prcm set. | |
512 | * | |
513 | * Returns the rounded clock rate or returns 0xffffffff on error. | |
514 | */ | |
515 | u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate, | |
516 | u32 *new_div) | |
517 | { | |
518 | unsigned long test_rate; | |
519 | const struct clksel *clks; | |
520 | const struct clksel_rate *clkr; | |
521 | u32 last_div = 0; | |
522 | ||
523 | printk(KERN_INFO "clock: clksel_round_rate_div: %s target_rate %ld\n", | |
524 | clk->name, target_rate); | |
525 | ||
526 | *new_div = 1; | |
527 | ||
528 | clks = omap2_get_clksel_by_parent(clk, clk->parent); | |
fecb494b | 529 | if (!clks) |
543d9378 PW |
530 | return ~0; |
531 | ||
532 | for (clkr = clks->rates; clkr->div; clkr++) { | |
533 | if (!(clkr->flags & cpu_mask)) | |
534 | continue; | |
535 | ||
536 | /* Sanity check */ | |
537 | if (clkr->div <= last_div) | |
538 | printk(KERN_ERR "clock: clksel_rate table not sorted " | |
539 | "for clock %s", clk->name); | |
540 | ||
541 | last_div = clkr->div; | |
542 | ||
543 | test_rate = clk->parent->rate / clkr->div; | |
544 | ||
545 | if (test_rate <= target_rate) | |
546 | break; /* found it */ | |
547 | } | |
548 | ||
549 | if (!clkr->div) { | |
550 | printk(KERN_ERR "clock: Could not find divisor for target " | |
551 | "rate %ld for clock %s parent %s\n", target_rate, | |
552 | clk->name, clk->parent->name); | |
553 | return ~0; | |
554 | } | |
555 | ||
556 | *new_div = clkr->div; | |
557 | ||
558 | printk(KERN_INFO "clock: new_div = %d, new_rate = %ld\n", *new_div, | |
559 | (clk->parent->rate / clkr->div)); | |
560 | ||
561 | return (clk->parent->rate / clkr->div); | |
562 | } | |
563 | ||
564 | /** | |
565 | * omap2_clksel_round_rate - find rounded rate for the given clock and rate | |
566 | * @clk: OMAP struct clk to use | |
567 | * @target_rate: desired clock rate | |
568 | * | |
569 | * Compatibility wrapper for OMAP clock framework | |
570 | * Finds best target rate based on the source clock and possible dividers. | |
571 | * rates. The divider array must be sorted with smallest divider first. | |
572 | * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT, | |
573 | * they are only settable as part of virtual_prcm set. | |
574 | * | |
575 | * Returns the rounded clock rate or returns 0xffffffff on error. | |
576 | */ | |
577 | long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate) | |
578 | { | |
579 | u32 new_div; | |
580 | ||
581 | return omap2_clksel_round_rate_div(clk, target_rate, &new_div); | |
582 | } | |
583 | ||
584 | ||
585 | /* Given a clock and a rate apply a clock specific rounding function */ | |
586 | long omap2_clk_round_rate(struct clk *clk, unsigned long rate) | |
587 | { | |
fecb494b | 588 | if (clk->round_rate) |
543d9378 PW |
589 | return clk->round_rate(clk, rate); |
590 | ||
591 | if (clk->flags & RATE_FIXED) | |
592 | printk(KERN_ERR "clock: generic omap2_clk_round_rate called " | |
593 | "on fixed-rate clock %s\n", clk->name); | |
594 | ||
595 | return clk->rate; | |
596 | } | |
597 | ||
598 | /** | |
599 | * omap2_clksel_to_divisor() - turn clksel field value into integer divider | |
600 | * @clk: OMAP struct clk to use | |
601 | * @field_val: register field value to find | |
602 | * | |
603 | * Given a struct clk of a rate-selectable clksel clock, and a register field | |
604 | * value to search for, find the corresponding clock divisor. The register | |
605 | * field value should be pre-masked and shifted down so the LSB is at bit 0 | |
606 | * before calling. Returns 0 on error | |
607 | */ | |
608 | u32 omap2_clksel_to_divisor(struct clk *clk, u32 field_val) | |
609 | { | |
610 | const struct clksel *clks; | |
611 | const struct clksel_rate *clkr; | |
612 | ||
613 | clks = omap2_get_clksel_by_parent(clk, clk->parent); | |
fecb494b | 614 | if (!clks) |
543d9378 PW |
615 | return 0; |
616 | ||
617 | for (clkr = clks->rates; clkr->div; clkr++) { | |
618 | if ((clkr->flags & cpu_mask) && (clkr->val == field_val)) | |
619 | break; | |
620 | } | |
621 | ||
622 | if (!clkr->div) { | |
623 | printk(KERN_ERR "clock: Could not find fieldval %d for " | |
624 | "clock %s parent %s\n", field_val, clk->name, | |
625 | clk->parent->name); | |
626 | return 0; | |
627 | } | |
628 | ||
629 | return clkr->div; | |
630 | } | |
631 | ||
632 | /** | |
633 | * omap2_divisor_to_clksel() - turn clksel integer divisor into a field value | |
634 | * @clk: OMAP struct clk to use | |
635 | * @div: integer divisor to search for | |
636 | * | |
637 | * Given a struct clk of a rate-selectable clksel clock, and a clock divisor, | |
638 | * find the corresponding register field value. The return register value is | |
639 | * the value before left-shifting. Returns 0xffffffff on error | |
640 | */ | |
641 | u32 omap2_divisor_to_clksel(struct clk *clk, u32 div) | |
642 | { | |
643 | const struct clksel *clks; | |
644 | const struct clksel_rate *clkr; | |
645 | ||
646 | /* should never happen */ | |
647 | WARN_ON(div == 0); | |
648 | ||
649 | clks = omap2_get_clksel_by_parent(clk, clk->parent); | |
fecb494b | 650 | if (!clks) |
543d9378 PW |
651 | return 0; |
652 | ||
653 | for (clkr = clks->rates; clkr->div; clkr++) { | |
654 | if ((clkr->flags & cpu_mask) && (clkr->div == div)) | |
655 | break; | |
656 | } | |
657 | ||
658 | if (!clkr->div) { | |
659 | printk(KERN_ERR "clock: Could not find divisor %d for " | |
660 | "clock %s parent %s\n", div, clk->name, | |
661 | clk->parent->name); | |
662 | return 0; | |
663 | } | |
664 | ||
665 | return clkr->val; | |
666 | } | |
667 | ||
543d9378 PW |
668 | /** |
669 | * omap2_clksel_get_divisor - get current divider applied to parent clock. | |
670 | * @clk: OMAP struct clk to use. | |
671 | * | |
672 | * Returns the integer divisor upon success or 0 on error. | |
673 | */ | |
674 | u32 omap2_clksel_get_divisor(struct clk *clk) | |
675 | { | |
ee1eec36 | 676 | u32 v; |
543d9378 | 677 | |
ee1eec36 | 678 | if (!clk->clksel_mask) |
543d9378 PW |
679 | return 0; |
680 | ||
ee1eec36 PW |
681 | v = __raw_readl(clk->clksel_reg) & clk->clksel_mask; |
682 | v >>= __ffs(clk->clksel_mask); | |
543d9378 | 683 | |
ee1eec36 | 684 | return omap2_clksel_to_divisor(clk, v); |
543d9378 PW |
685 | } |
686 | ||
687 | int omap2_clksel_set_rate(struct clk *clk, unsigned long rate) | |
688 | { | |
ee1eec36 | 689 | u32 v, field_val, validrate, new_div = 0; |
543d9378 | 690 | |
ee1eec36 | 691 | if (!clk->clksel_mask) |
543d9378 PW |
692 | return -EINVAL; |
693 | ||
ee1eec36 PW |
694 | validrate = omap2_clksel_round_rate_div(clk, rate, &new_div); |
695 | if (validrate != rate) | |
543d9378 PW |
696 | return -EINVAL; |
697 | ||
698 | field_val = omap2_divisor_to_clksel(clk, new_div); | |
699 | if (field_val == ~0) | |
700 | return -EINVAL; | |
701 | ||
ee1eec36 PW |
702 | v = __raw_readl(clk->clksel_reg); |
703 | v &= ~clk->clksel_mask; | |
704 | v |= field_val << __ffs(clk->clksel_mask); | |
705 | __raw_writel(v, clk->clksel_reg); | |
f11fda6a | 706 | v = __raw_readl(clk->clksel_reg); /* OCP barrier */ |
543d9378 PW |
707 | |
708 | clk->rate = clk->parent->rate / new_div; | |
709 | ||
439764cc | 710 | _omap2xxx_clk_commit(clk); |
543d9378 PW |
711 | |
712 | return 0; | |
713 | } | |
714 | ||
715 | ||
716 | /* Set the clock rate for a clock source */ | |
717 | int omap2_clk_set_rate(struct clk *clk, unsigned long rate) | |
718 | { | |
719 | int ret = -EINVAL; | |
720 | ||
721 | pr_debug("clock: set_rate for clock %s to rate %ld\n", clk->name, rate); | |
722 | ||
723 | /* CONFIG_PARTICIPANT clocks are changed only in sets via the | |
724 | rate table mechanism, driven by mpu_speed */ | |
725 | if (clk->flags & CONFIG_PARTICIPANT) | |
726 | return -EINVAL; | |
727 | ||
728 | /* dpll_ck, core_ck, virt_prcm_set; plus all clksel clocks */ | |
fecb494b | 729 | if (clk->set_rate) |
543d9378 PW |
730 | ret = clk->set_rate(clk, rate); |
731 | ||
543d9378 PW |
732 | return ret; |
733 | } | |
734 | ||
735 | /* | |
736 | * Converts encoded control register address into a full address | |
ee1eec36 | 737 | * On error, the return value (parent_div) will be 0. |
543d9378 | 738 | */ |
ee1eec36 PW |
739 | static u32 _omap2_clksel_get_src_field(struct clk *src_clk, struct clk *clk, |
740 | u32 *field_val) | |
543d9378 PW |
741 | { |
742 | const struct clksel *clks; | |
743 | const struct clksel_rate *clkr; | |
744 | ||
543d9378 | 745 | clks = omap2_get_clksel_by_parent(clk, src_clk); |
fecb494b | 746 | if (!clks) |
543d9378 PW |
747 | return 0; |
748 | ||
749 | for (clkr = clks->rates; clkr->div; clkr++) { | |
750 | if (clkr->flags & (cpu_mask | DEFAULT_RATE)) | |
751 | break; /* Found the default rate for this platform */ | |
752 | } | |
753 | ||
754 | if (!clkr->div) { | |
755 | printk(KERN_ERR "clock: Could not find default rate for " | |
756 | "clock %s parent %s\n", clk->name, | |
757 | src_clk->parent->name); | |
758 | return 0; | |
759 | } | |
760 | ||
761 | /* Should never happen. Add a clksel mask to the struct clk. */ | |
762 | WARN_ON(clk->clksel_mask == 0); | |
763 | ||
ee1eec36 | 764 | *field_val = clkr->val; |
543d9378 | 765 | |
ee1eec36 | 766 | return clkr->div; |
543d9378 PW |
767 | } |
768 | ||
769 | int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent) | |
770 | { | |
ee1eec36 | 771 | u32 field_val, v, parent_div; |
543d9378 | 772 | |
fecb494b | 773 | if (clk->flags & CONFIG_PARTICIPANT) |
543d9378 PW |
774 | return -EINVAL; |
775 | ||
776 | if (!clk->clksel) | |
777 | return -EINVAL; | |
778 | ||
ee1eec36 PW |
779 | parent_div = _omap2_clksel_get_src_field(new_parent, clk, &field_val); |
780 | if (!parent_div) | |
543d9378 PW |
781 | return -EINVAL; |
782 | ||
783 | if (clk->usecount > 0) | |
784 | _omap2_clk_disable(clk); | |
785 | ||
786 | /* Set new source value (previous dividers if any in effect) */ | |
ee1eec36 PW |
787 | v = __raw_readl(clk->clksel_reg); |
788 | v &= ~clk->clksel_mask; | |
789 | v |= field_val << __ffs(clk->clksel_mask); | |
790 | __raw_writel(v, clk->clksel_reg); | |
f11fda6a | 791 | v = __raw_readl(clk->clksel_reg); /* OCP barrier */ |
543d9378 | 792 | |
439764cc | 793 | _omap2xxx_clk_commit(clk); |
543d9378 PW |
794 | |
795 | if (clk->usecount > 0) | |
796 | _omap2_clk_enable(clk); | |
797 | ||
3f0a820c | 798 | clk_reparent(clk, new_parent); |
543d9378 PW |
799 | |
800 | /* CLKSEL clocks follow their parents' rates, divided by a divisor */ | |
801 | clk->rate = new_parent->rate; | |
802 | ||
803 | if (parent_div > 0) | |
804 | clk->rate /= parent_div; | |
805 | ||
806 | pr_debug("clock: set parent of %s to %s (new rate %ld)\n", | |
807 | clk->name, clk->parent->name, clk->rate); | |
808 | ||
543d9378 PW |
809 | return 0; |
810 | } | |
811 | ||
88b8ba90 PW |
812 | /* DPLL rate rounding code */ |
813 | ||
814 | /** | |
815 | * omap2_dpll_set_rate_tolerance: set the error tolerance during rate rounding | |
816 | * @clk: struct clk * of the DPLL | |
817 | * @tolerance: maximum rate error tolerance | |
818 | * | |
819 | * Set the maximum DPLL rate error tolerance for the rate rounding | |
820 | * algorithm. The rate tolerance is an attempt to balance DPLL power | |
821 | * saving (the least divider value "n") vs. rate fidelity (the least | |
822 | * difference between the desired DPLL target rate and the rounded | |
823 | * rate out of the algorithm). So, increasing the tolerance is likely | |
824 | * to decrease DPLL power consumption and increase DPLL rate error. | |
825 | * Returns -EINVAL if provided a null clock ptr or a clk that is not a | |
826 | * DPLL; or 0 upon success. | |
827 | */ | |
828 | int omap2_dpll_set_rate_tolerance(struct clk *clk, unsigned int tolerance) | |
829 | { | |
830 | if (!clk || !clk->dpll_data) | |
831 | return -EINVAL; | |
832 | ||
833 | clk->dpll_data->rate_tolerance = tolerance; | |
834 | ||
835 | return 0; | |
836 | } | |
837 | ||
fecb494b PW |
838 | static unsigned long _dpll_compute_new_rate(unsigned long parent_rate, |
839 | unsigned int m, unsigned int n) | |
88b8ba90 PW |
840 | { |
841 | unsigned long long num; | |
842 | ||
843 | num = (unsigned long long)parent_rate * m; | |
844 | do_div(num, n); | |
845 | return num; | |
846 | } | |
847 | ||
848 | /* | |
849 | * _dpll_test_mult - test a DPLL multiplier value | |
850 | * @m: pointer to the DPLL m (multiplier) value under test | |
851 | * @n: current DPLL n (divider) value under test | |
852 | * @new_rate: pointer to storage for the resulting rounded rate | |
853 | * @target_rate: the desired DPLL rate | |
854 | * @parent_rate: the DPLL's parent clock rate | |
855 | * | |
856 | * This code tests a DPLL multiplier value, ensuring that the | |
857 | * resulting rate will not be higher than the target_rate, and that | |
858 | * the multiplier value itself is valid for the DPLL. Initially, the | |
859 | * integer pointed to by the m argument should be prescaled by | |
860 | * multiplying by DPLL_SCALE_FACTOR. The code will replace this with | |
861 | * a non-scaled m upon return. This non-scaled m will result in a | |
862 | * new_rate as close as possible to target_rate (but not greater than | |
863 | * target_rate) given the current (parent_rate, n, prescaled m) | |
864 | * triple. Returns DPLL_MULT_UNDERFLOW in the event that the | |
865 | * non-scaled m attempted to underflow, which can allow the calling | |
866 | * function to bail out early; or 0 upon success. | |
867 | */ | |
868 | static int _dpll_test_mult(int *m, int n, unsigned long *new_rate, | |
869 | unsigned long target_rate, | |
870 | unsigned long parent_rate) | |
871 | { | |
85a5f78d | 872 | int r = 0, carry = 0; |
88b8ba90 PW |
873 | |
874 | /* Unscale m and round if necessary */ | |
875 | if (*m % DPLL_SCALE_FACTOR >= DPLL_ROUNDING_VAL) | |
876 | carry = 1; | |
877 | *m = (*m / DPLL_SCALE_FACTOR) + carry; | |
878 | ||
879 | /* | |
880 | * The new rate must be <= the target rate to avoid programming | |
881 | * a rate that is impossible for the hardware to handle | |
882 | */ | |
883 | *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); | |
884 | if (*new_rate > target_rate) { | |
885 | (*m)--; | |
886 | *new_rate = 0; | |
887 | } | |
888 | ||
889 | /* Guard against m underflow */ | |
890 | if (*m < DPLL_MIN_MULTIPLIER) { | |
891 | *m = DPLL_MIN_MULTIPLIER; | |
892 | *new_rate = 0; | |
85a5f78d | 893 | r = DPLL_MULT_UNDERFLOW; |
88b8ba90 PW |
894 | } |
895 | ||
896 | if (*new_rate == 0) | |
897 | *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); | |
898 | ||
85a5f78d | 899 | return r; |
88b8ba90 PW |
900 | } |
901 | ||
902 | /** | |
903 | * omap2_dpll_round_rate - round a target rate for an OMAP DPLL | |
904 | * @clk: struct clk * for a DPLL | |
905 | * @target_rate: desired DPLL clock rate | |
906 | * | |
907 | * Given a DPLL, a desired target rate, and a rate tolerance, round | |
908 | * the target rate to a possible, programmable rate for this DPLL. | |
909 | * Rate tolerance is assumed to be set by the caller before this | |
910 | * function is called. Attempts to select the minimum possible n | |
911 | * within the tolerance to reduce power consumption. Stores the | |
912 | * computed (m, n) in the DPLL's dpll_data structure so set_rate() | |
913 | * will not need to call this (expensive) function again. Returns ~0 | |
914 | * if the target rate cannot be rounded, either because the rate is | |
915 | * too low or because the rate tolerance is set too tightly; or the | |
916 | * rounded rate upon success. | |
917 | */ | |
918 | long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate) | |
919 | { | |
920 | int m, n, r, e, scaled_max_m; | |
921 | unsigned long scaled_rt_rp, new_rate; | |
922 | int min_e = -1, min_e_m = -1, min_e_n = -1; | |
b3245040 | 923 | struct dpll_data *dd; |
88b8ba90 PW |
924 | |
925 | if (!clk || !clk->dpll_data) | |
926 | return ~0; | |
927 | ||
b3245040 PW |
928 | dd = clk->dpll_data; |
929 | ||
88b8ba90 PW |
930 | pr_debug("clock: starting DPLL round_rate for clock %s, target rate " |
931 | "%ld\n", clk->name, target_rate); | |
932 | ||
933 | scaled_rt_rp = target_rate / (clk->parent->rate / DPLL_SCALE_FACTOR); | |
b3245040 | 934 | scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR; |
88b8ba90 | 935 | |
b3245040 | 936 | dd->last_rounded_rate = 0; |
88b8ba90 | 937 | |
95f538ac PW |
938 | for (n = dd->min_divider; n <= dd->max_divider; n++) { |
939 | ||
940 | /* Is the (input clk, divider) pair valid for the DPLL? */ | |
941 | r = _dpll_test_fint(clk, n); | |
942 | if (r == DPLL_FINT_UNDERFLOW) | |
943 | break; | |
944 | else if (r == DPLL_FINT_INVALID) | |
945 | continue; | |
88b8ba90 PW |
946 | |
947 | /* Compute the scaled DPLL multiplier, based on the divider */ | |
948 | m = scaled_rt_rp * n; | |
949 | ||
950 | /* | |
85a5f78d PW |
951 | * Since we're counting n up, a m overflow means we |
952 | * can bail out completely (since as n increases in | |
953 | * the next iteration, there's no way that m can | |
954 | * increase beyond the current m) | |
88b8ba90 PW |
955 | */ |
956 | if (m > scaled_max_m) | |
85a5f78d | 957 | break; |
88b8ba90 PW |
958 | |
959 | r = _dpll_test_mult(&m, n, &new_rate, target_rate, | |
960 | clk->parent->rate); | |
961 | ||
85a5f78d PW |
962 | /* m can't be set low enough for this n - try with a larger n */ |
963 | if (r == DPLL_MULT_UNDERFLOW) | |
964 | continue; | |
965 | ||
88b8ba90 PW |
966 | e = target_rate - new_rate; |
967 | pr_debug("clock: n = %d: m = %d: rate error is %d " | |
968 | "(new_rate = %ld)\n", n, m, e, new_rate); | |
969 | ||
970 | if (min_e == -1 || | |
b3245040 | 971 | min_e >= (int)(abs(e) - dd->rate_tolerance)) { |
88b8ba90 PW |
972 | min_e = e; |
973 | min_e_m = m; | |
974 | min_e_n = n; | |
975 | ||
976 | pr_debug("clock: found new least error %d\n", min_e); | |
88b8ba90 | 977 | |
85a5f78d | 978 | /* We found good settings -- bail out now */ |
95f538ac | 979 | if (min_e <= dd->rate_tolerance) |
85a5f78d PW |
980 | break; |
981 | } | |
88b8ba90 PW |
982 | } |
983 | ||
984 | if (min_e < 0) { | |
985 | pr_debug("clock: error: target rate or tolerance too low\n"); | |
986 | return ~0; | |
987 | } | |
988 | ||
b3245040 PW |
989 | dd->last_rounded_m = min_e_m; |
990 | dd->last_rounded_n = min_e_n; | |
991 | dd->last_rounded_rate = _dpll_compute_new_rate(clk->parent->rate, | |
992 | min_e_m, min_e_n); | |
88b8ba90 PW |
993 | |
994 | pr_debug("clock: final least error: e = %d, m = %d, n = %d\n", | |
995 | min_e, min_e_m, min_e_n); | |
996 | pr_debug("clock: final rate: %ld (target rate: %ld)\n", | |
b3245040 | 997 | dd->last_rounded_rate, target_rate); |
88b8ba90 | 998 | |
b3245040 | 999 | return dd->last_rounded_rate; |
88b8ba90 PW |
1000 | } |
1001 | ||
543d9378 PW |
1002 | /*------------------------------------------------------------------------- |
1003 | * Omap2 clock reset and init functions | |
1004 | *-------------------------------------------------------------------------*/ | |
1005 | ||
1006 | #ifdef CONFIG_OMAP_RESET_CLOCKS | |
1007 | void omap2_clk_disable_unused(struct clk *clk) | |
1008 | { | |
1009 | u32 regval32, v; | |
1010 | ||
1011 | v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0; | |
1012 | ||
1013 | regval32 = __raw_readl(clk->enable_reg); | |
1014 | if ((regval32 & (1 << clk->enable_bit)) == v) | |
1015 | return; | |
1016 | ||
1017 | printk(KERN_INFO "Disabling unused clock \"%s\"\n", clk->name); | |
8463e20a TK |
1018 | if (cpu_is_omap34xx()) { |
1019 | omap2_clk_enable(clk); | |
1020 | omap2_clk_disable(clk); | |
1021 | } else | |
1022 | _omap2_clk_disable(clk); | |
543d9378 PW |
1023 | } |
1024 | #endif |