Commit | Line | Data |
---|---|---|
253b0887 PM |
1 | #include <linux/clk.h> |
2 | #include <linux/compiler.h> | |
4c7eb4eb | 3 | #include <linux/slab.h> |
6881e8bf | 4 | #include <linux/io.h> |
253b0887 PM |
5 | #include <asm/clock.h> |
6 | ||
6881e8bf MD |
7 | static int sh_clk_mstp32_enable(struct clk *clk) |
8 | { | |
9 | __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit), | |
10 | clk->enable_reg); | |
11 | return 0; | |
12 | } | |
13 | ||
14 | static void sh_clk_mstp32_disable(struct clk *clk) | |
15 | { | |
16 | __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit), | |
17 | clk->enable_reg); | |
18 | } | |
19 | ||
20 | static struct clk_ops sh_clk_mstp32_clk_ops = { | |
21 | .enable = sh_clk_mstp32_enable, | |
22 | .disable = sh_clk_mstp32_disable, | |
23 | .recalc = followparent_recalc, | |
24 | }; | |
25 | ||
26 | int __init sh_clk_mstp32_register(struct clk *clks, int nr) | |
27 | { | |
28 | struct clk *clkp; | |
29 | int ret = 0; | |
30 | int k; | |
31 | ||
32 | for (k = 0; !ret && (k < nr); k++) { | |
33 | clkp = clks + k; | |
34 | clkp->ops = &sh_clk_mstp32_clk_ops; | |
35 | ret |= clk_register(clkp); | |
36 | } | |
37 | ||
38 | return ret; | |
39 | } | |
40 | ||
2693e274 MD |
41 | static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate) |
42 | { | |
43 | return clk_rate_table_round(clk, clk->freq_table, rate); | |
44 | } | |
45 | ||
46 | static int sh_clk_div6_divisors[64] = { | |
47 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, | |
48 | 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, | |
49 | 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, | |
50 | 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 | |
51 | }; | |
52 | ||
53 | static struct clk_div_mult_table sh_clk_div6_table = { | |
54 | .divisors = sh_clk_div6_divisors, | |
55 | .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors), | |
56 | }; | |
57 | ||
58 | static unsigned long sh_clk_div6_recalc(struct clk *clk) | |
59 | { | |
60 | struct clk_div_mult_table *table = &sh_clk_div6_table; | |
61 | unsigned int idx; | |
62 | ||
63 | clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, | |
64 | table, NULL); | |
65 | ||
66 | idx = __raw_readl(clk->enable_reg) & 0x003f; | |
67 | ||
68 | return clk->freq_table[idx].frequency; | |
69 | } | |
70 | ||
098dee99 MD |
71 | static int sh_clk_div6_set_rate(struct clk *clk, |
72 | unsigned long rate, int algo_id) | |
73 | { | |
74 | unsigned long value; | |
75 | int idx; | |
76 | ||
77 | idx = clk_rate_table_find(clk, clk->freq_table, rate); | |
78 | if (idx < 0) | |
79 | return idx; | |
80 | ||
81 | value = __raw_readl(clk->enable_reg); | |
82 | value &= ~0x3f; | |
83 | value |= idx; | |
84 | __raw_writel(value, clk->enable_reg); | |
85 | return 0; | |
86 | } | |
87 | ||
88 | static int sh_clk_div6_enable(struct clk *clk) | |
89 | { | |
90 | unsigned long value; | |
91 | int ret; | |
92 | ||
93 | ret = sh_clk_div6_set_rate(clk, clk->rate, 0); | |
94 | if (ret == 0) { | |
95 | value = __raw_readl(clk->enable_reg); | |
96 | value &= ~0x100; /* clear stop bit to enable clock */ | |
97 | __raw_writel(value, clk->enable_reg); | |
98 | } | |
99 | return ret; | |
100 | } | |
101 | ||
102 | static void sh_clk_div6_disable(struct clk *clk) | |
103 | { | |
104 | unsigned long value; | |
105 | ||
106 | value = __raw_readl(clk->enable_reg); | |
107 | value |= 0x100; /* stop clock */ | |
108 | value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */ | |
109 | __raw_writel(value, clk->enable_reg); | |
110 | } | |
111 | ||
2693e274 MD |
112 | static struct clk_ops sh_clk_div6_clk_ops = { |
113 | .recalc = sh_clk_div6_recalc, | |
114 | .round_rate = sh_clk_div_round_rate, | |
098dee99 MD |
115 | .set_rate = sh_clk_div6_set_rate, |
116 | .enable = sh_clk_div6_enable, | |
117 | .disable = sh_clk_div6_disable, | |
2693e274 MD |
118 | }; |
119 | ||
120 | int __init sh_clk_div6_register(struct clk *clks, int nr) | |
121 | { | |
122 | struct clk *clkp; | |
123 | void *freq_table; | |
124 | int nr_divs = sh_clk_div6_table.nr_divisors; | |
125 | int freq_table_size = sizeof(struct cpufreq_frequency_table); | |
126 | int ret = 0; | |
127 | int k; | |
128 | ||
129 | freq_table_size *= (nr_divs + 1); | |
4c7eb4eb MD |
130 | freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL); |
131 | if (!freq_table) { | |
132 | pr_err("sh_clk_div6_register: unable to alloc memory\n"); | |
2693e274 | 133 | return -ENOMEM; |
4c7eb4eb | 134 | } |
2693e274 MD |
135 | |
136 | for (k = 0; !ret && (k < nr); k++) { | |
137 | clkp = clks + k; | |
138 | ||
139 | clkp->ops = &sh_clk_div6_clk_ops; | |
140 | clkp->id = -1; | |
141 | clkp->freq_table = freq_table + (k * freq_table_size); | |
142 | clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END; | |
143 | ||
144 | ret = clk_register(clkp); | |
145 | } | |
146 | ||
147 | return ret; | |
148 | } | |
149 | ||
a1153e27 MD |
150 | static unsigned long sh_clk_div4_recalc(struct clk *clk) |
151 | { | |
0a5f337e MD |
152 | struct clk_div4_table *d4t = clk->priv; |
153 | struct clk_div_mult_table *table = d4t->div_mult_table; | |
a1153e27 MD |
154 | unsigned int idx; |
155 | ||
156 | clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, | |
157 | table, &clk->arch_flags); | |
158 | ||
159 | idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f; | |
160 | ||
161 | return clk->freq_table[idx].frequency; | |
162 | } | |
163 | ||
31c3af50 GL |
164 | static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent) |
165 | { | |
0a5f337e MD |
166 | struct clk_div4_table *d4t = clk->priv; |
167 | struct clk_div_mult_table *table = d4t->div_mult_table; | |
31c3af50 GL |
168 | u32 value; |
169 | int ret; | |
170 | ||
171 | if (!strcmp("pll_clk", parent->name)) | |
172 | value = __raw_readl(clk->enable_reg) & ~(1 << 7); | |
173 | else | |
174 | value = __raw_readl(clk->enable_reg) | (1 << 7); | |
175 | ||
176 | ret = clk_reparent(clk, parent); | |
177 | if (ret < 0) | |
178 | return ret; | |
179 | ||
180 | __raw_writel(value, clk->enable_reg); | |
181 | ||
182 | /* Rebiuld the frequency table */ | |
183 | clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, | |
184 | table, &clk->arch_flags); | |
185 | ||
186 | return 0; | |
187 | } | |
188 | ||
189 | static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id) | |
190 | { | |
191 | unsigned long value; | |
192 | int idx = clk_rate_table_find(clk, clk->freq_table, rate); | |
193 | if (idx < 0) | |
194 | return idx; | |
195 | ||
196 | value = __raw_readl(clk->enable_reg); | |
de7ca214 MD |
197 | value &= ~(0xf << clk->enable_bit); |
198 | value |= (idx << clk->enable_bit); | |
31c3af50 GL |
199 | __raw_writel(value, clk->enable_reg); |
200 | ||
201 | return 0; | |
202 | } | |
203 | ||
204 | static int sh_clk_div4_enable(struct clk *clk) | |
205 | { | |
206 | __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg); | |
207 | return 0; | |
208 | } | |
209 | ||
210 | static void sh_clk_div4_disable(struct clk *clk) | |
211 | { | |
212 | __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg); | |
213 | } | |
214 | ||
a1153e27 MD |
215 | static struct clk_ops sh_clk_div4_clk_ops = { |
216 | .recalc = sh_clk_div4_recalc, | |
31c3af50 | 217 | .set_rate = sh_clk_div4_set_rate, |
2693e274 | 218 | .round_rate = sh_clk_div_round_rate, |
a1153e27 MD |
219 | }; |
220 | ||
31c3af50 GL |
221 | static struct clk_ops sh_clk_div4_enable_clk_ops = { |
222 | .recalc = sh_clk_div4_recalc, | |
223 | .set_rate = sh_clk_div4_set_rate, | |
224 | .round_rate = sh_clk_div_round_rate, | |
225 | .enable = sh_clk_div4_enable, | |
226 | .disable = sh_clk_div4_disable, | |
227 | }; | |
228 | ||
229 | static struct clk_ops sh_clk_div4_reparent_clk_ops = { | |
230 | .recalc = sh_clk_div4_recalc, | |
231 | .set_rate = sh_clk_div4_set_rate, | |
232 | .round_rate = sh_clk_div_round_rate, | |
233 | .enable = sh_clk_div4_enable, | |
234 | .disable = sh_clk_div4_disable, | |
235 | .set_parent = sh_clk_div4_set_parent, | |
236 | }; | |
237 | ||
238 | static int __init sh_clk_div4_register_ops(struct clk *clks, int nr, | |
0a5f337e | 239 | struct clk_div4_table *table, struct clk_ops *ops) |
a1153e27 MD |
240 | { |
241 | struct clk *clkp; | |
242 | void *freq_table; | |
0a5f337e | 243 | int nr_divs = table->div_mult_table->nr_divisors; |
a1153e27 MD |
244 | int freq_table_size = sizeof(struct cpufreq_frequency_table); |
245 | int ret = 0; | |
246 | int k; | |
247 | ||
a50de78d | 248 | freq_table_size *= (nr_divs + 1); |
4c7eb4eb MD |
249 | freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL); |
250 | if (!freq_table) { | |
251 | pr_err("sh_clk_div4_register: unable to alloc memory\n"); | |
a1153e27 | 252 | return -ENOMEM; |
4c7eb4eb | 253 | } |
a1153e27 MD |
254 | |
255 | for (k = 0; !ret && (k < nr); k++) { | |
256 | clkp = clks + k; | |
257 | ||
31c3af50 | 258 | clkp->ops = ops; |
a1153e27 MD |
259 | clkp->id = -1; |
260 | clkp->priv = table; | |
261 | ||
262 | clkp->freq_table = freq_table + (k * freq_table_size); | |
263 | clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END; | |
264 | ||
265 | ret = clk_register(clkp); | |
266 | } | |
267 | ||
268 | return ret; | |
269 | } | |
270 | ||
31c3af50 | 271 | int __init sh_clk_div4_register(struct clk *clks, int nr, |
0a5f337e | 272 | struct clk_div4_table *table) |
31c3af50 GL |
273 | { |
274 | return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops); | |
275 | } | |
276 | ||
277 | int __init sh_clk_div4_enable_register(struct clk *clks, int nr, | |
0a5f337e | 278 | struct clk_div4_table *table) |
31c3af50 GL |
279 | { |
280 | return sh_clk_div4_register_ops(clks, nr, table, | |
281 | &sh_clk_div4_enable_clk_ops); | |
282 | } | |
283 | ||
284 | int __init sh_clk_div4_reparent_register(struct clk *clks, int nr, | |
0a5f337e | 285 | struct clk_div4_table *table) |
31c3af50 GL |
286 | { |
287 | return sh_clk_div4_register_ops(clks, nr, table, | |
288 | &sh_clk_div4_reparent_clk_ops); | |
289 | } | |
290 | ||
36aa1e32 | 291 | #ifdef CONFIG_SH_CLK_CPG_LEGACY |
253b0887 PM |
292 | static struct clk master_clk = { |
293 | .name = "master_clk", | |
294 | .flags = CLK_ENABLE_ON_INIT, | |
295 | .rate = CONFIG_SH_PCLK_FREQ, | |
296 | }; | |
297 | ||
298 | static struct clk peripheral_clk = { | |
299 | .name = "peripheral_clk", | |
300 | .parent = &master_clk, | |
301 | .flags = CLK_ENABLE_ON_INIT, | |
302 | }; | |
303 | ||
304 | static struct clk bus_clk = { | |
305 | .name = "bus_clk", | |
306 | .parent = &master_clk, | |
307 | .flags = CLK_ENABLE_ON_INIT, | |
308 | }; | |
309 | ||
310 | static struct clk cpu_clk = { | |
311 | .name = "cpu_clk", | |
312 | .parent = &master_clk, | |
313 | .flags = CLK_ENABLE_ON_INIT, | |
314 | }; | |
315 | ||
316 | /* | |
317 | * The ordering of these clocks matters, do not change it. | |
318 | */ | |
319 | static struct clk *onchip_clocks[] = { | |
320 | &master_clk, | |
321 | &peripheral_clk, | |
322 | &bus_clk, | |
323 | &cpu_clk, | |
324 | }; | |
325 | ||
326 | int __init __deprecated cpg_clk_init(void) | |
327 | { | |
328 | int i, ret = 0; | |
329 | ||
330 | for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) { | |
331 | struct clk *clk = onchip_clocks[i]; | |
332 | arch_init_clk_ops(&clk->ops, i); | |
333 | if (clk->ops) | |
334 | ret |= clk_register(clk); | |
335 | } | |
336 | ||
337 | return ret; | |
338 | } | |
339 | ||
340 | /* | |
341 | * Placeholder for compatability, until the lazy CPUs do this | |
342 | * on their own. | |
343 | */ | |
344 | int __init __weak arch_clk_init(void) | |
345 | { | |
346 | return cpg_clk_init(); | |
347 | } | |
36aa1e32 | 348 | #endif /* CONFIG_SH_CPG_CLK_LEGACY */ |