ARM: dts: kirkwood: add kirkwood-km_fixedeth DTS file
[deliverable/linux.git] / arch / arm / common / mcpm_entry.c
1 /*
2 * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
3 *
4 * Created by: Nicolas Pitre, March 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/irqflags.h>
15
16 #include <asm/mcpm.h>
17 #include <asm/cacheflush.h>
18 #include <asm/idmap.h>
19 #include <asm/cputype.h>
20
21 extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
22
23 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
24 {
25 unsigned long val = ptr ? virt_to_phys(ptr) : 0;
26 mcpm_entry_vectors[cluster][cpu] = val;
27 sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
28 }
29
30 extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
31
32 void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
33 unsigned long poke_phys_addr, unsigned long poke_val)
34 {
35 unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
36 poke[0] = poke_phys_addr;
37 poke[1] = poke_val;
38 __sync_cache_range_w(poke, 2 * sizeof(*poke));
39 }
40
41 static const struct mcpm_platform_ops *platform_ops;
42
43 int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
44 {
45 if (platform_ops)
46 return -EBUSY;
47 platform_ops = ops;
48 return 0;
49 }
50
51 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
52 {
53 if (!platform_ops)
54 return -EUNATCH; /* try not to shadow power_up errors */
55 might_sleep();
56 return platform_ops->power_up(cpu, cluster);
57 }
58
59 typedef void (*phys_reset_t)(unsigned long);
60
61 void mcpm_cpu_power_down(void)
62 {
63 phys_reset_t phys_reset;
64
65 if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
66 return;
67 BUG_ON(!irqs_disabled());
68
69 /*
70 * Do this before calling into the power_down method,
71 * as it might not always be safe to do afterwards.
72 */
73 setup_mm_for_reboot();
74
75 platform_ops->power_down();
76
77 /*
78 * It is possible for a power_up request to happen concurrently
79 * with a power_down request for the same CPU. In this case the
80 * power_down method might not be able to actually enter a
81 * powered down state with the WFI instruction if the power_up
82 * method has removed the required reset condition. The
83 * power_down method is then allowed to return. We must perform
84 * a re-entry in the kernel as if the power_up method just had
85 * deasserted reset on the CPU.
86 *
87 * To simplify race issues, the platform specific implementation
88 * must accommodate for the possibility of unordered calls to
89 * power_down and power_up with a usage count. Therefore, if a
90 * call to power_up is issued for a CPU that is not down, then
91 * the next call to power_down must not attempt a full shutdown
92 * but only do the minimum (normally disabling L1 cache and CPU
93 * coherency) and return just as if a concurrent power_up request
94 * had happened as described above.
95 */
96
97 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
98 phys_reset(virt_to_phys(mcpm_entry_point));
99
100 /* should never get here */
101 BUG();
102 }
103
104 int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster)
105 {
106 int ret;
107
108 if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down_finish))
109 return -EUNATCH;
110
111 ret = platform_ops->power_down_finish(cpu, cluster);
112 if (ret)
113 pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
114 __func__, cpu, cluster, ret);
115
116 return ret;
117 }
118
119 void mcpm_cpu_suspend(u64 expected_residency)
120 {
121 phys_reset_t phys_reset;
122
123 if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
124 return;
125 BUG_ON(!irqs_disabled());
126
127 /* Very similar to mcpm_cpu_power_down() */
128 setup_mm_for_reboot();
129 platform_ops->suspend(expected_residency);
130 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
131 phys_reset(virt_to_phys(mcpm_entry_point));
132 BUG();
133 }
134
135 int mcpm_cpu_powered_up(void)
136 {
137 if (!platform_ops)
138 return -EUNATCH;
139 if (platform_ops->powered_up)
140 platform_ops->powered_up();
141 return 0;
142 }
143
144 struct sync_struct mcpm_sync;
145
146 /*
147 * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
148 * This must be called at the point of committing to teardown of a CPU.
149 * The CPU cache (SCTRL.C bit) is expected to still be active.
150 */
151 void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
152 {
153 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
154 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
155 }
156
157 /*
158 * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
159 * cluster can be torn down without disrupting this CPU.
160 * To avoid deadlocks, this must be called before a CPU is powered down.
161 * The CPU cache (SCTRL.C bit) is expected to be off.
162 * However L2 cache might or might not be active.
163 */
164 void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
165 {
166 dmb();
167 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
168 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
169 sev();
170 }
171
172 /*
173 * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
174 * @state: the final state of the cluster:
175 * CLUSTER_UP: no destructive teardown was done and the cluster has been
176 * restored to the previous state (CPU cache still active); or
177 * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
178 * (CPU cache disabled, L2 cache either enabled or disabled).
179 */
180 void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
181 {
182 dmb();
183 mcpm_sync.clusters[cluster].cluster = state;
184 sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
185 sev();
186 }
187
188 /*
189 * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
190 * This function should be called by the last man, after local CPU teardown
191 * is complete. CPU cache expected to be active.
192 *
193 * Returns:
194 * false: the critical section was not entered because an inbound CPU was
195 * observed, or the cluster is already being set up;
196 * true: the critical section was entered: it is now safe to tear down the
197 * cluster.
198 */
199 bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
200 {
201 unsigned int i;
202 struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
203
204 /* Warn inbound CPUs that the cluster is being torn down: */
205 c->cluster = CLUSTER_GOING_DOWN;
206 sync_cache_w(&c->cluster);
207
208 /* Back out if the inbound cluster is already in the critical region: */
209 sync_cache_r(&c->inbound);
210 if (c->inbound == INBOUND_COMING_UP)
211 goto abort;
212
213 /*
214 * Wait for all CPUs to get out of the GOING_DOWN state, so that local
215 * teardown is complete on each CPU before tearing down the cluster.
216 *
217 * If any CPU has been woken up again from the DOWN state, then we
218 * shouldn't be taking the cluster down at all: abort in that case.
219 */
220 sync_cache_r(&c->cpus);
221 for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
222 int cpustate;
223
224 if (i == cpu)
225 continue;
226
227 while (1) {
228 cpustate = c->cpus[i].cpu;
229 if (cpustate != CPU_GOING_DOWN)
230 break;
231
232 wfe();
233 sync_cache_r(&c->cpus[i].cpu);
234 }
235
236 switch (cpustate) {
237 case CPU_DOWN:
238 continue;
239
240 default:
241 goto abort;
242 }
243 }
244
245 return true;
246
247 abort:
248 __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
249 return false;
250 }
251
252 int __mcpm_cluster_state(unsigned int cluster)
253 {
254 sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
255 return mcpm_sync.clusters[cluster].cluster;
256 }
257
258 extern unsigned long mcpm_power_up_setup_phys;
259
260 int __init mcpm_sync_init(
261 void (*power_up_setup)(unsigned int affinity_level))
262 {
263 unsigned int i, j, mpidr, this_cluster;
264
265 BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
266 BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
267
268 /*
269 * Set initial CPU and cluster states.
270 * Only one cluster is assumed to be active at this point.
271 */
272 for (i = 0; i < MAX_NR_CLUSTERS; i++) {
273 mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
274 mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
275 for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
276 mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
277 }
278 mpidr = read_cpuid_mpidr();
279 this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
280 for_each_online_cpu(i)
281 mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
282 mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
283 sync_cache_w(&mcpm_sync);
284
285 if (power_up_setup) {
286 mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
287 sync_cache_w(&mcpm_power_up_setup_phys);
288 }
289
290 return 0;
291 }
This page took 0.051214 seconds and 5 git commands to generate.