Commit | Line | Data |
---|---|---|
63819cb1 LP |
1 | /* |
2 | * Versatile Express Serial Power Controller (SPC) support | |
3 | * | |
4 | * Copyright (C) 2013 ARM Ltd. | |
5 | * | |
6 | * Authors: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> | |
7 | * Achin Gupta <achin.gupta@arm.com> | |
8 | * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
15 | * kind, whether express or implied; without even the implied warranty | |
16 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | */ | |
19 | ||
4d910d5b SK |
20 | #include <linux/clk-provider.h> |
21 | #include <linux/clkdev.h> | |
22 | #include <linux/cpu.h> | |
f7cd2d83 | 23 | #include <linux/delay.h> |
63819cb1 | 24 | #include <linux/err.h> |
f7cd2d83 | 25 | #include <linux/interrupt.h> |
63819cb1 | 26 | #include <linux/io.h> |
9e941b6f | 27 | #include <linux/platform_device.h> |
f7cd2d83 | 28 | #include <linux/pm_opp.h> |
63819cb1 | 29 | #include <linux/slab.h> |
f7cd2d83 | 30 | #include <linux/semaphore.h> |
63819cb1 LP |
31 | |
32 | #include <asm/cacheflush.h> | |
33 | ||
34 | #define SPCLOG "vexpress-spc: " | |
35 | ||
f7cd2d83 SK |
36 | #define PERF_LVL_A15 0x00 |
37 | #define PERF_REQ_A15 0x04 | |
38 | #define PERF_LVL_A7 0x08 | |
39 | #define PERF_REQ_A7 0x0c | |
40 | #define COMMS 0x10 | |
41 | #define COMMS_REQ 0x14 | |
42 | #define PWC_STATUS 0x18 | |
43 | #define PWC_FLAG 0x1c | |
44 | ||
63819cb1 LP |
45 | /* SPC wake-up IRQs status and mask */ |
46 | #define WAKE_INT_MASK 0x24 | |
47 | #define WAKE_INT_RAW 0x28 | |
48 | #define WAKE_INT_STAT 0x2c | |
49 | /* SPC power down registers */ | |
50 | #define A15_PWRDN_EN 0x30 | |
51 | #define A7_PWRDN_EN 0x34 | |
52 | /* SPC per-CPU mailboxes */ | |
53 | #define A15_BX_ADDR0 0x68 | |
54 | #define A7_BX_ADDR0 0x78 | |
55 | ||
33cb667a DM |
56 | /* SPC CPU/cluster reset statue */ |
57 | #define STANDBYWFI_STAT 0x3c | |
58 | #define STANDBYWFI_STAT_A15_CPU_MASK(cpu) (1 << (cpu)) | |
59 | #define STANDBYWFI_STAT_A7_CPU_MASK(cpu) (1 << (3 + (cpu))) | |
60 | ||
f7cd2d83 SK |
61 | /* SPC system config interface registers */ |
62 | #define SYSCFG_WDATA 0x70 | |
63 | #define SYSCFG_RDATA 0x74 | |
64 | ||
65 | /* A15/A7 OPP virtual register base */ | |
66 | #define A15_PERFVAL_BASE 0xC10 | |
67 | #define A7_PERFVAL_BASE 0xC30 | |
68 | ||
69 | /* Config interface control bits */ | |
70 | #define SYSCFG_START (1 << 31) | |
71 | #define SYSCFG_SCC (6 << 20) | |
72 | #define SYSCFG_STAT (14 << 20) | |
73 | ||
63819cb1 LP |
74 | /* wake-up interrupt masks */ |
75 | #define GBL_WAKEUP_INT_MSK (0x3 << 10) | |
76 | ||
77 | /* TC2 static dual-cluster configuration */ | |
78 | #define MAX_CLUSTERS 2 | |
79 | ||
f7cd2d83 SK |
80 | /* |
81 | * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS | |
82 | * operation, the operation could start just before jiffie is about | |
83 | * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz | |
84 | */ | |
85 | #define TIMEOUT_US 20000 | |
86 | ||
87 | #define MAX_OPPS 8 | |
88 | #define CA15_DVFS 0 | |
89 | #define CA7_DVFS 1 | |
90 | #define SPC_SYS_CFG 2 | |
91 | #define STAT_COMPLETE(type) ((1 << 0) << (type << 2)) | |
92 | #define STAT_ERR(type) ((1 << 1) << (type << 2)) | |
93 | #define RESPONSE_MASK(type) (STAT_COMPLETE(type) | STAT_ERR(type)) | |
94 | ||
95 | struct ve_spc_opp { | |
96 | unsigned long freq; | |
97 | unsigned long u_volt; | |
98 | }; | |
99 | ||
63819cb1 LP |
100 | struct ve_spc_drvdata { |
101 | void __iomem *baseaddr; | |
102 | /* | |
103 | * A15s cluster identifier | |
104 | * It corresponds to A15 processors MPIDR[15:8] bitfield | |
105 | */ | |
106 | u32 a15_clusid; | |
f7cd2d83 SK |
107 | uint32_t cur_rsp_mask; |
108 | uint32_t cur_rsp_stat; | |
109 | struct semaphore sem; | |
110 | struct completion done; | |
111 | struct ve_spc_opp *opps[MAX_CLUSTERS]; | |
112 | int num_opps[MAX_CLUSTERS]; | |
63819cb1 LP |
113 | }; |
114 | ||
115 | static struct ve_spc_drvdata *info; | |
116 | ||
117 | static inline bool cluster_is_a15(u32 cluster) | |
118 | { | |
119 | return cluster == info->a15_clusid; | |
120 | } | |
121 | ||
122 | /** | |
123 | * ve_spc_global_wakeup_irq() | |
124 | * | |
125 | * Function to set/clear global wakeup IRQs. Not protected by locking since | |
126 | * it might be used in code paths where normal cacheable locks are not | |
127 | * working. Locking must be provided by the caller to ensure atomicity. | |
128 | * | |
129 | * @set: if true, global wake-up IRQs are set, if false they are cleared | |
130 | */ | |
131 | void ve_spc_global_wakeup_irq(bool set) | |
132 | { | |
133 | u32 reg; | |
134 | ||
135 | reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK); | |
136 | ||
137 | if (set) | |
138 | reg |= GBL_WAKEUP_INT_MSK; | |
139 | else | |
140 | reg &= ~GBL_WAKEUP_INT_MSK; | |
141 | ||
142 | writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK); | |
143 | } | |
144 | ||
145 | /** | |
146 | * ve_spc_cpu_wakeup_irq() | |
147 | * | |
148 | * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since | |
149 | * it might be used in code paths where normal cacheable locks are not | |
150 | * working. Locking must be provided by the caller to ensure atomicity. | |
151 | * | |
152 | * @cluster: mpidr[15:8] bitfield describing cluster affinity level | |
153 | * @cpu: mpidr[7:0] bitfield describing cpu affinity level | |
154 | * @set: if true, wake-up IRQs are set, if false they are cleared | |
155 | */ | |
156 | void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set) | |
157 | { | |
158 | u32 mask, reg; | |
159 | ||
160 | if (cluster >= MAX_CLUSTERS) | |
161 | return; | |
162 | ||
163 | mask = 1 << cpu; | |
164 | ||
165 | if (!cluster_is_a15(cluster)) | |
166 | mask <<= 4; | |
167 | ||
168 | reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK); | |
169 | ||
170 | if (set) | |
171 | reg |= mask; | |
172 | else | |
173 | reg &= ~mask; | |
174 | ||
175 | writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK); | |
176 | } | |
177 | ||
178 | /** | |
179 | * ve_spc_set_resume_addr() - set the jump address used for warm boot | |
180 | * | |
181 | * @cluster: mpidr[15:8] bitfield describing cluster affinity level | |
182 | * @cpu: mpidr[7:0] bitfield describing cpu affinity level | |
183 | * @addr: physical resume address | |
184 | */ | |
185 | void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr) | |
186 | { | |
187 | void __iomem *baseaddr; | |
188 | ||
189 | if (cluster >= MAX_CLUSTERS) | |
190 | return; | |
191 | ||
192 | if (cluster_is_a15(cluster)) | |
193 | baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2); | |
194 | else | |
195 | baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2); | |
196 | ||
197 | writel_relaxed(addr, baseaddr); | |
198 | } | |
199 | ||
200 | /** | |
201 | * ve_spc_powerdown() | |
202 | * | |
203 | * Function to enable/disable cluster powerdown. Not protected by locking | |
204 | * since it might be used in code paths where normal cacheable locks are not | |
205 | * working. Locking must be provided by the caller to ensure atomicity. | |
206 | * | |
207 | * @cluster: mpidr[15:8] bitfield describing cluster affinity level | |
208 | * @enable: if true enables powerdown, if false disables it | |
209 | */ | |
210 | void ve_spc_powerdown(u32 cluster, bool enable) | |
211 | { | |
212 | u32 pwdrn_reg; | |
213 | ||
214 | if (cluster >= MAX_CLUSTERS) | |
215 | return; | |
216 | ||
217 | pwdrn_reg = cluster_is_a15(cluster) ? A15_PWRDN_EN : A7_PWRDN_EN; | |
218 | writel_relaxed(enable, info->baseaddr + pwdrn_reg); | |
219 | } | |
220 | ||
33cb667a DM |
221 | static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster) |
222 | { | |
223 | return cluster_is_a15(cluster) ? | |
224 | STANDBYWFI_STAT_A15_CPU_MASK(cpu) | |
225 | : STANDBYWFI_STAT_A7_CPU_MASK(cpu); | |
226 | } | |
227 | ||
228 | /** | |
229 | * ve_spc_cpu_in_wfi(u32 cpu, u32 cluster) | |
230 | * | |
231 | * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster | |
232 | * @cluster: mpidr[15:8] bitfield describing cluster affinity level | |
233 | * | |
234 | * @return: non-zero if and only if the specified CPU is in WFI | |
235 | * | |
236 | * Take care when interpreting the result of this function: a CPU might | |
237 | * be in WFI temporarily due to idle, and is not necessarily safely | |
238 | * parked. | |
239 | */ | |
240 | int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster) | |
241 | { | |
242 | int ret; | |
243 | u32 mask = standbywfi_cpu_mask(cpu, cluster); | |
244 | ||
245 | if (cluster >= MAX_CLUSTERS) | |
246 | return 1; | |
247 | ||
248 | ret = readl_relaxed(info->baseaddr + STANDBYWFI_STAT); | |
249 | ||
250 | pr_debug("%s: PCFGREG[0x%X] = 0x%08X, mask = 0x%X\n", | |
251 | __func__, STANDBYWFI_STAT, ret, mask); | |
252 | ||
253 | return ret & mask; | |
254 | } | |
255 | ||
f7cd2d83 SK |
256 | static int ve_spc_get_performance(int cluster, u32 *freq) |
257 | { | |
258 | struct ve_spc_opp *opps = info->opps[cluster]; | |
259 | u32 perf_cfg_reg = 0; | |
260 | u32 perf; | |
261 | ||
262 | perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7; | |
263 | ||
264 | perf = readl_relaxed(info->baseaddr + perf_cfg_reg); | |
265 | if (perf >= info->num_opps[cluster]) | |
266 | return -EINVAL; | |
267 | ||
268 | opps += perf; | |
269 | *freq = opps->freq; | |
270 | ||
271 | return 0; | |
272 | } | |
273 | ||
274 | /* find closest match to given frequency in OPP table */ | |
275 | static int ve_spc_round_performance(int cluster, u32 freq) | |
276 | { | |
277 | int idx, max_opp = info->num_opps[cluster]; | |
278 | struct ve_spc_opp *opps = info->opps[cluster]; | |
279 | u32 fmin = 0, fmax = ~0, ftmp; | |
280 | ||
281 | freq /= 1000; /* OPP entries in kHz */ | |
282 | for (idx = 0; idx < max_opp; idx++, opps++) { | |
283 | ftmp = opps->freq; | |
284 | if (ftmp >= freq) { | |
285 | if (ftmp <= fmax) | |
286 | fmax = ftmp; | |
287 | } else { | |
288 | if (ftmp >= fmin) | |
289 | fmin = ftmp; | |
290 | } | |
291 | } | |
292 | if (fmax != ~0) | |
293 | return fmax * 1000; | |
294 | else | |
295 | return fmin * 1000; | |
296 | } | |
297 | ||
298 | static int ve_spc_find_performance_index(int cluster, u32 freq) | |
299 | { | |
300 | int idx, max_opp = info->num_opps[cluster]; | |
301 | struct ve_spc_opp *opps = info->opps[cluster]; | |
302 | ||
303 | for (idx = 0; idx < max_opp; idx++, opps++) | |
304 | if (opps->freq == freq) | |
305 | break; | |
306 | return (idx == max_opp) ? -EINVAL : idx; | |
307 | } | |
308 | ||
309 | static int ve_spc_waitforcompletion(int req_type) | |
310 | { | |
311 | int ret = wait_for_completion_interruptible_timeout( | |
312 | &info->done, usecs_to_jiffies(TIMEOUT_US)); | |
313 | if (ret == 0) | |
314 | ret = -ETIMEDOUT; | |
315 | else if (ret > 0) | |
316 | ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO; | |
317 | return ret; | |
318 | } | |
319 | ||
320 | static int ve_spc_set_performance(int cluster, u32 freq) | |
321 | { | |
322 | u32 perf_cfg_reg, perf_stat_reg; | |
323 | int ret, perf, req_type; | |
324 | ||
325 | if (cluster_is_a15(cluster)) { | |
326 | req_type = CA15_DVFS; | |
327 | perf_cfg_reg = PERF_LVL_A15; | |
328 | perf_stat_reg = PERF_REQ_A15; | |
329 | } else { | |
330 | req_type = CA7_DVFS; | |
331 | perf_cfg_reg = PERF_LVL_A7; | |
332 | perf_stat_reg = PERF_REQ_A7; | |
333 | } | |
334 | ||
335 | perf = ve_spc_find_performance_index(cluster, freq); | |
336 | ||
337 | if (perf < 0) | |
338 | return perf; | |
339 | ||
340 | if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US))) | |
341 | return -ETIME; | |
342 | ||
343 | init_completion(&info->done); | |
344 | info->cur_rsp_mask = RESPONSE_MASK(req_type); | |
345 | ||
346 | writel(perf, info->baseaddr + perf_cfg_reg); | |
347 | ret = ve_spc_waitforcompletion(req_type); | |
348 | ||
349 | info->cur_rsp_mask = 0; | |
350 | up(&info->sem); | |
351 | ||
352 | return ret; | |
353 | } | |
354 | ||
355 | static int ve_spc_read_sys_cfg(int func, int offset, uint32_t *data) | |
356 | { | |
357 | int ret; | |
358 | ||
359 | if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US))) | |
360 | return -ETIME; | |
361 | ||
362 | init_completion(&info->done); | |
363 | info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG); | |
364 | ||
365 | /* Set the control value */ | |
366 | writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS); | |
367 | ret = ve_spc_waitforcompletion(SPC_SYS_CFG); | |
368 | ||
369 | if (ret == 0) | |
370 | *data = readl(info->baseaddr + SYSCFG_RDATA); | |
371 | ||
372 | info->cur_rsp_mask = 0; | |
373 | up(&info->sem); | |
374 | ||
375 | return ret; | |
376 | } | |
377 | ||
378 | static irqreturn_t ve_spc_irq_handler(int irq, void *data) | |
379 | { | |
380 | struct ve_spc_drvdata *drv_data = data; | |
381 | uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS); | |
382 | ||
383 | if (info->cur_rsp_mask & status) { | |
384 | info->cur_rsp_stat = status; | |
385 | complete(&drv_data->done); | |
386 | } | |
387 | ||
388 | return IRQ_HANDLED; | |
389 | } | |
390 | ||
391 | /* | |
392 | * +--------------------------+ | |
393 | * | 31 20 | 19 0 | | |
394 | * +--------------------------+ | |
cf2e0a73 | 395 | * | m_volt | freq(kHz) | |
f7cd2d83 SK |
396 | * +--------------------------+ |
397 | */ | |
398 | #define MULT_FACTOR 20 | |
399 | #define VOLT_SHIFT 20 | |
400 | #define FREQ_MASK (0xFFFFF) | |
401 | static int ve_spc_populate_opps(uint32_t cluster) | |
402 | { | |
403 | uint32_t data = 0, off, ret, idx; | |
404 | struct ve_spc_opp *opps; | |
405 | ||
406 | opps = kzalloc(sizeof(*opps) * MAX_OPPS, GFP_KERNEL); | |
407 | if (!opps) | |
408 | return -ENOMEM; | |
409 | ||
410 | info->opps[cluster] = opps; | |
411 | ||
412 | off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE; | |
413 | for (idx = 0; idx < MAX_OPPS; idx++, off += 4, opps++) { | |
414 | ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data); | |
415 | if (!ret) { | |
416 | opps->freq = (data & FREQ_MASK) * MULT_FACTOR; | |
cf2e0a73 | 417 | opps->u_volt = (data >> VOLT_SHIFT) * 1000; |
f7cd2d83 SK |
418 | } else { |
419 | break; | |
420 | } | |
421 | } | |
422 | info->num_opps[cluster] = idx; | |
423 | ||
424 | return ret; | |
425 | } | |
426 | ||
427 | static int ve_init_opp_table(struct device *cpu_dev) | |
428 | { | |
e160cc17 AS |
429 | int cluster; |
430 | int idx, ret = 0, max_opp; | |
431 | struct ve_spc_opp *opps; | |
432 | ||
433 | cluster = topology_physical_package_id(cpu_dev->id); | |
434 | cluster = cluster < 0 ? 0 : cluster; | |
435 | ||
436 | max_opp = info->num_opps[cluster]; | |
437 | opps = info->opps[cluster]; | |
f7cd2d83 SK |
438 | |
439 | for (idx = 0; idx < max_opp; idx++, opps++) { | |
440 | ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt); | |
441 | if (ret) { | |
442 | dev_warn(cpu_dev, "failed to add opp %lu %lu\n", | |
443 | opps->freq, opps->u_volt); | |
444 | return ret; | |
445 | } | |
446 | } | |
447 | return ret; | |
448 | } | |
449 | ||
450 | int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq) | |
63819cb1 | 451 | { |
f7cd2d83 | 452 | int ret; |
63819cb1 LP |
453 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
454 | if (!info) { | |
455 | pr_err(SPCLOG "unable to allocate mem\n"); | |
456 | return -ENOMEM; | |
457 | } | |
458 | ||
459 | info->baseaddr = baseaddr; | |
460 | info->a15_clusid = a15_clusid; | |
461 | ||
f7cd2d83 SK |
462 | if (irq <= 0) { |
463 | pr_err(SPCLOG "Invalid IRQ %d\n", irq); | |
464 | kfree(info); | |
465 | return -EINVAL; | |
466 | } | |
467 | ||
468 | init_completion(&info->done); | |
469 | ||
470 | readl_relaxed(info->baseaddr + PWC_STATUS); | |
471 | ||
472 | ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH | |
473 | | IRQF_ONESHOT, "vexpress-spc", info); | |
474 | if (ret) { | |
475 | pr_err(SPCLOG "IRQ %d request failed\n", irq); | |
476 | kfree(info); | |
477 | return -ENODEV; | |
478 | } | |
479 | ||
480 | sema_init(&info->sem, 1); | |
63819cb1 LP |
481 | /* |
482 | * Multi-cluster systems may need this data when non-coherent, during | |
483 | * cluster power-up/power-down. Make sure driver info reaches main | |
484 | * memory. | |
485 | */ | |
486 | sync_cache_w(info); | |
487 | sync_cache_w(&info); | |
488 | ||
489 | return 0; | |
490 | } | |
4d910d5b SK |
491 | |
492 | struct clk_spc { | |
493 | struct clk_hw hw; | |
494 | int cluster; | |
495 | }; | |
496 | ||
497 | #define to_clk_spc(spc) container_of(spc, struct clk_spc, hw) | |
498 | static unsigned long spc_recalc_rate(struct clk_hw *hw, | |
499 | unsigned long parent_rate) | |
500 | { | |
501 | struct clk_spc *spc = to_clk_spc(hw); | |
502 | u32 freq; | |
503 | ||
504 | if (ve_spc_get_performance(spc->cluster, &freq)) | |
505 | return -EIO; | |
506 | ||
507 | return freq * 1000; | |
508 | } | |
509 | ||
510 | static long spc_round_rate(struct clk_hw *hw, unsigned long drate, | |
511 | unsigned long *parent_rate) | |
512 | { | |
513 | struct clk_spc *spc = to_clk_spc(hw); | |
514 | ||
515 | return ve_spc_round_performance(spc->cluster, drate); | |
516 | } | |
517 | ||
518 | static int spc_set_rate(struct clk_hw *hw, unsigned long rate, | |
519 | unsigned long parent_rate) | |
520 | { | |
521 | struct clk_spc *spc = to_clk_spc(hw); | |
522 | ||
523 | return ve_spc_set_performance(spc->cluster, rate / 1000); | |
524 | } | |
525 | ||
526 | static struct clk_ops clk_spc_ops = { | |
527 | .recalc_rate = spc_recalc_rate, | |
528 | .round_rate = spc_round_rate, | |
529 | .set_rate = spc_set_rate, | |
530 | }; | |
531 | ||
532 | static struct clk *ve_spc_clk_register(struct device *cpu_dev) | |
533 | { | |
534 | struct clk_init_data init; | |
535 | struct clk_spc *spc; | |
536 | ||
537 | spc = kzalloc(sizeof(*spc), GFP_KERNEL); | |
538 | if (!spc) { | |
539 | pr_err("could not allocate spc clk\n"); | |
540 | return ERR_PTR(-ENOMEM); | |
541 | } | |
542 | ||
543 | spc->hw.init = &init; | |
544 | spc->cluster = topology_physical_package_id(cpu_dev->id); | |
545 | ||
e160cc17 AS |
546 | spc->cluster = spc->cluster < 0 ? 0 : spc->cluster; |
547 | ||
4d910d5b SK |
548 | init.name = dev_name(cpu_dev); |
549 | init.ops = &clk_spc_ops; | |
550 | init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE; | |
551 | init.num_parents = 0; | |
552 | ||
553 | return devm_clk_register(cpu_dev, &spc->hw); | |
554 | } | |
555 | ||
556 | static int __init ve_spc_clk_init(void) | |
557 | { | |
558 | int cpu; | |
559 | struct clk *clk; | |
560 | ||
561 | if (!info) | |
562 | return 0; /* Continue only if SPC is initialised */ | |
563 | ||
564 | if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) { | |
565 | pr_err("failed to build OPP table\n"); | |
566 | return -ENODEV; | |
567 | } | |
568 | ||
569 | for_each_possible_cpu(cpu) { | |
570 | struct device *cpu_dev = get_cpu_device(cpu); | |
571 | if (!cpu_dev) { | |
572 | pr_warn("failed to get cpu%d device\n", cpu); | |
573 | continue; | |
574 | } | |
575 | clk = ve_spc_clk_register(cpu_dev); | |
576 | if (IS_ERR(clk)) { | |
577 | pr_warn("failed to register cpu%d clock\n", cpu); | |
578 | continue; | |
579 | } | |
580 | if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) { | |
581 | pr_warn("failed to register cpu%d clock lookup\n", cpu); | |
582 | continue; | |
583 | } | |
584 | ||
585 | if (ve_init_opp_table(cpu_dev)) | |
586 | pr_warn("failed to initialise cpu%d opp table\n", cpu); | |
587 | } | |
588 | ||
9e941b6f | 589 | platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0); |
4d910d5b SK |
590 | return 0; |
591 | } | |
4a0ece7c | 592 | device_initcall(ve_spc_clk_init); |