Commit | Line | Data |
---|---|---|
a112de8c MD |
1 | /* |
2 | * SMP support for SoCs with APMU | |
3 | * | |
4 | * Copyright (C) 2013 Magnus Damm | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #include <linux/delay.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/io.h> | |
13 | #include <linux/ioport.h> | |
14 | #include <linux/of_address.h> | |
15 | #include <linux/smp.h> | |
16 | #include <asm/cacheflush.h> | |
17 | #include <asm/cp15.h> | |
18 | #include <asm/smp_plat.h> | |
19 | #include <mach/common.h> | |
20 | ||
21 | static struct { | |
22 | void __iomem *iomem; | |
23 | int bit; | |
24 | } apmu_cpus[CONFIG_NR_CPUS]; | |
25 | ||
26 | #define WUPCR_OFFS 0x10 | |
27 | #define PSTR_OFFS 0x40 | |
28 | #define CPUNCR_OFFS(n) (0x100 + (0x10 * (n))) | |
29 | ||
30 | static int apmu_power_on(void __iomem *p, int bit) | |
31 | { | |
32 | /* request power on */ | |
33 | writel_relaxed(BIT(bit), p + WUPCR_OFFS); | |
34 | ||
35 | /* wait for APMU to finish */ | |
36 | while (readl_relaxed(p + WUPCR_OFFS) != 0) | |
37 | ; | |
38 | ||
39 | return 0; | |
40 | } | |
41 | ||
42 | static int apmu_power_off(void __iomem *p, int bit) | |
43 | { | |
44 | /* request Core Standby for next WFI */ | |
45 | writel_relaxed(3, p + CPUNCR_OFFS(bit)); | |
46 | return 0; | |
47 | } | |
48 | ||
49 | static int apmu_power_off_poll(void __iomem *p, int bit) | |
50 | { | |
51 | int k; | |
52 | ||
53 | for (k = 0; k < 1000; k++) { | |
54 | if (((readl_relaxed(p + PSTR_OFFS) >> (bit * 4)) & 0x03) == 3) | |
55 | return 1; | |
56 | ||
57 | mdelay(1); | |
58 | } | |
59 | ||
60 | return 0; | |
61 | } | |
62 | ||
63 | static int apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu)) | |
64 | { | |
65 | void __iomem *p = apmu_cpus[cpu].iomem; | |
66 | ||
67 | return p ? fn(p, apmu_cpus[cpu].bit) : -EINVAL; | |
68 | } | |
69 | ||
70 | static void apmu_init_cpu(struct resource *res, int cpu, int bit) | |
71 | { | |
72 | if (apmu_cpus[cpu].iomem) | |
73 | return; | |
74 | ||
75 | apmu_cpus[cpu].iomem = ioremap_nocache(res->start, resource_size(res)); | |
76 | apmu_cpus[cpu].bit = bit; | |
77 | ||
78 | pr_debug("apmu ioremap %d %d 0x%08x 0x%08x\n", cpu, bit, | |
79 | res->start, resource_size(res)); | |
80 | } | |
81 | ||
82 | static struct { | |
83 | struct resource iomem; | |
84 | int cpus[4]; | |
85 | } apmu_config[] = { | |
86 | { | |
87 | .iomem = DEFINE_RES_MEM(0xe6152000, 0x88), | |
88 | .cpus = { 0, 1, 2, 3 }, | |
89 | } | |
90 | }; | |
91 | ||
92 | static void apmu_parse_cfg(void (*fn)(struct resource *res, int cpu, int bit)) | |
93 | { | |
94 | u32 id; | |
95 | int k; | |
96 | int bit, index; | |
97 | ||
98 | for (k = 0; k < ARRAY_SIZE(apmu_config); k++) { | |
99 | for (bit = 0; bit < ARRAY_SIZE(apmu_config[k].cpus); bit++) { | |
100 | id = apmu_config[k].cpus[bit]; | |
101 | if (id >= 0) { | |
102 | index = get_logical_index(id); | |
103 | if (index >= 0) | |
104 | fn(&apmu_config[k].iomem, index, bit); | |
105 | } | |
106 | } | |
107 | } | |
108 | } | |
109 | ||
110 | void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus) | |
111 | { | |
112 | /* install boot code shared by all CPUs */ | |
113 | shmobile_boot_fn = virt_to_phys(shmobile_smp_boot); | |
114 | shmobile_boot_arg = MPIDR_HWID_BITMASK; | |
115 | ||
116 | /* perform per-cpu setup */ | |
117 | apmu_parse_cfg(apmu_init_cpu); | |
118 | } | |
119 | ||
120 | int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle) | |
121 | { | |
122 | /* For this particular CPU register boot vector */ | |
123 | shmobile_smp_hook(cpu, virt_to_phys(shmobile_invalidate_start), 0); | |
124 | ||
125 | return apmu_wrap(cpu, apmu_power_on); | |
126 | } | |
127 | ||
128 | #ifdef CONFIG_HOTPLUG_CPU | |
129 | /* nicked from arch/arm/mach-exynos/hotplug.c */ | |
130 | static inline void cpu_enter_lowpower_a15(void) | |
131 | { | |
132 | unsigned int v; | |
133 | ||
134 | asm volatile( | |
135 | " mrc p15, 0, %0, c1, c0, 0\n" | |
136 | " bic %0, %0, %1\n" | |
137 | " mcr p15, 0, %0, c1, c0, 0\n" | |
138 | : "=&r" (v) | |
139 | : "Ir" (CR_C) | |
140 | : "cc"); | |
141 | ||
142 | flush_cache_louis(); | |
143 | ||
144 | asm volatile( | |
145 | /* | |
146 | * Turn off coherency | |
147 | */ | |
148 | " mrc p15, 0, %0, c1, c0, 1\n" | |
149 | " bic %0, %0, %1\n" | |
150 | " mcr p15, 0, %0, c1, c0, 1\n" | |
151 | : "=&r" (v) | |
152 | : "Ir" (0x40) | |
153 | : "cc"); | |
154 | ||
155 | isb(); | |
156 | dsb(); | |
157 | } | |
158 | ||
159 | void shmobile_smp_apmu_cpu_die(unsigned int cpu) | |
160 | { | |
161 | /* For this particular CPU deregister boot vector */ | |
162 | shmobile_smp_hook(cpu, 0, 0); | |
163 | ||
164 | /* Select next sleep mode using the APMU */ | |
165 | apmu_wrap(cpu, apmu_power_off); | |
166 | ||
167 | /* Do ARM specific CPU shutdown */ | |
168 | cpu_enter_lowpower_a15(); | |
169 | ||
170 | /* jump to shared mach-shmobile sleep / reset code */ | |
171 | shmobile_smp_sleep(); | |
172 | } | |
173 | ||
174 | int shmobile_smp_apmu_cpu_kill(unsigned int cpu) | |
175 | { | |
176 | return apmu_wrap(cpu, apmu_power_off_poll); | |
177 | } | |
178 | #endif |