Commit | Line | Data |
---|---|---|
3179d37e PB |
1 | /* |
2 | * Copyright (C) 2014 Imagination Technologies | |
3 | * Author: Paul Burton <paul.burton@imgtec.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License as published by the | |
7 | * Free Software Foundation; either version 2 of the License, or (at your | |
8 | * option) any later version. | |
9 | */ | |
10 | ||
11 | #include <linux/init.h> | |
12 | #include <linux/percpu.h> | |
13 | #include <linux/slab.h> | |
14 | ||
15 | #include <asm/asm-offsets.h> | |
16 | #include <asm/cacheflush.h> | |
17 | #include <asm/cacheops.h> | |
18 | #include <asm/idle.h> | |
19 | #include <asm/mips-cm.h> | |
20 | #include <asm/mips-cpc.h> | |
21 | #include <asm/mipsmtregs.h> | |
22 | #include <asm/pm.h> | |
23 | #include <asm/pm-cps.h> | |
24 | #include <asm/smp-cps.h> | |
25 | #include <asm/uasm.h> | |
26 | ||
27 | /* | |
28 | * cps_nc_entry_fn - type of a generated non-coherent state entry function | |
29 | * @online: the count of online coupled VPEs | |
30 | * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count | |
31 | * | |
32 | * The code entering & exiting non-coherent states is generated at runtime | |
33 | * using uasm, in order to ensure that the compiler cannot insert a stray | |
34 | * memory access at an unfortunate time and to allow the generation of optimal | |
35 | * core-specific code particularly for cache routines. If coupled_coherence | |
36 | * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state, | |
37 | * returns the number of VPEs that were in the wait state at the point this | |
38 | * VPE left it. Returns garbage if coupled_coherence is zero or this is not | |
39 | * the entry function for CPS_PM_NC_WAIT. | |
40 | */ | |
41 | typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count); | |
42 | ||
43 | /* | |
44 | * The entry point of the generated non-coherent idle state entry/exit | |
45 | * functions. Actually per-core rather than per-CPU. | |
46 | */ | |
47 | static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT], | |
48 | nc_asm_enter); | |
49 | ||
50 | /* Bitmap indicating which states are supported by the system */ | |
51 | DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); | |
52 | ||
53 | /* | |
54 | * Indicates the number of coupled VPEs ready to operate in a non-coherent | |
55 | * state. Actually per-core rather than per-CPU. | |
56 | */ | |
57 | static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); | |
58 | static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); | |
59 | ||
60 | /* Indicates online CPUs coupled with the current CPU */ | |
61 | static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); | |
62 | ||
63 | /* | |
64 | * Used to synchronize entry to deep idle states. Actually per-core rather | |
65 | * than per-CPU. | |
66 | */ | |
67 | static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); | |
68 | ||
69 | /* Saved CPU state across the CPS_PM_POWER_GATED state */ | |
70 | DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state); | |
71 | ||
72 | /* A somewhat arbitrary number of labels & relocs for uasm */ | |
73 | static struct uasm_label labels[32] __initdata; | |
74 | static struct uasm_reloc relocs[32] __initdata; | |
75 | ||
76 | /* CPU dependant sync types */ | |
77 | static unsigned stype_intervention; | |
78 | static unsigned stype_memory; | |
79 | static unsigned stype_ordering; | |
80 | ||
81 | enum mips_reg { | |
82 | zero, at, v0, v1, a0, a1, a2, a3, | |
83 | t0, t1, t2, t3, t4, t5, t6, t7, | |
84 | s0, s1, s2, s3, s4, s5, s6, s7, | |
85 | t8, t9, k0, k1, gp, sp, fp, ra, | |
86 | }; | |
87 | ||
88 | bool cps_pm_support_state(enum cps_pm_state state) | |
89 | { | |
90 | return test_bit(state, state_support); | |
91 | } | |
92 | ||
93 | static void coupled_barrier(atomic_t *a, unsigned online) | |
94 | { | |
95 | /* | |
96 | * This function is effectively the same as | |
97 | * cpuidle_coupled_parallel_barrier, which can't be used here since | |
98 | * there's no cpuidle device. | |
99 | */ | |
100 | ||
101 | if (!coupled_coherence) | |
102 | return; | |
103 | ||
7c5491b8 | 104 | smp_mb__before_atomic(); |
3179d37e PB |
105 | atomic_inc(a); |
106 | ||
107 | while (atomic_read(a) < online) | |
108 | cpu_relax(); | |
109 | ||
110 | if (atomic_inc_return(a) == online * 2) { | |
111 | atomic_set(a, 0); | |
112 | return; | |
113 | } | |
114 | ||
115 | while (atomic_read(a) > online) | |
116 | cpu_relax(); | |
117 | } | |
118 | ||
119 | int cps_pm_enter_state(enum cps_pm_state state) | |
120 | { | |
121 | unsigned cpu = smp_processor_id(); | |
122 | unsigned core = current_cpu_data.core; | |
123 | unsigned online, left; | |
124 | cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); | |
125 | u32 *core_ready_count, *nc_core_ready_count; | |
126 | void *nc_addr; | |
127 | cps_nc_entry_fn entry; | |
128 | struct core_boot_config *core_cfg; | |
129 | struct vpe_boot_config *vpe_cfg; | |
130 | ||
131 | /* Check that there is an entry function for this state */ | |
132 | entry = per_cpu(nc_asm_enter, core)[state]; | |
133 | if (!entry) | |
134 | return -EINVAL; | |
135 | ||
136 | /* Calculate which coupled CPUs (VPEs) are online */ | |
137 | #ifdef CONFIG_MIPS_MT | |
138 | if (cpu_online(cpu)) { | |
139 | cpumask_and(coupled_mask, cpu_online_mask, | |
140 | &cpu_sibling_map[cpu]); | |
141 | online = cpumask_weight(coupled_mask); | |
142 | cpumask_clear_cpu(cpu, coupled_mask); | |
143 | } else | |
144 | #endif | |
145 | { | |
146 | cpumask_clear(coupled_mask); | |
147 | online = 1; | |
148 | } | |
149 | ||
150 | /* Setup the VPE to run mips_cps_pm_restore when started again */ | |
151 | if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { | |
064231e5 PB |
152 | /* Power gating relies upon CPS SMP */ |
153 | if (!mips_cps_smp_in_use()) | |
154 | return -EINVAL; | |
155 | ||
3179d37e | 156 | core_cfg = &mips_cps_core_bootcfg[core]; |
c90e49f2 | 157 | vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(¤t_cpu_data)]; |
3179d37e PB |
158 | vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; |
159 | vpe_cfg->gp = (unsigned long)current_thread_info(); | |
160 | vpe_cfg->sp = 0; | |
161 | } | |
162 | ||
163 | /* Indicate that this CPU might not be coherent */ | |
164 | cpumask_clear_cpu(cpu, &cpu_coherent_mask); | |
7c5491b8 | 165 | smp_mb__after_atomic(); |
3179d37e PB |
166 | |
167 | /* Create a non-coherent mapping of the core ready_count */ | |
168 | core_ready_count = per_cpu(ready_count, core); | |
169 | nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), | |
170 | (unsigned long)core_ready_count); | |
171 | nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); | |
172 | nc_core_ready_count = nc_addr; | |
173 | ||
174 | /* Ensure ready_count is zero-initialised before the assembly runs */ | |
175 | ACCESS_ONCE(*nc_core_ready_count) = 0; | |
176 | coupled_barrier(&per_cpu(pm_barrier, core), online); | |
177 | ||
178 | /* Run the generated entry code */ | |
179 | left = entry(online, nc_core_ready_count); | |
180 | ||
181 | /* Remove the non-coherent mapping of ready_count */ | |
182 | kunmap_noncoherent(); | |
183 | ||
184 | /* Indicate that this CPU is definitely coherent */ | |
185 | cpumask_set_cpu(cpu, &cpu_coherent_mask); | |
186 | ||
187 | /* | |
188 | * If this VPE is the first to leave the non-coherent wait state then | |
189 | * it needs to wake up any coupled VPEs still running their wait | |
190 | * instruction so that they return to cpuidle, which can then complete | |
191 | * coordination between the coupled VPEs & provide the governor with | |
192 | * a chance to reflect on the length of time the VPEs were in the | |
193 | * idle state. | |
194 | */ | |
195 | if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online)) | |
196 | arch_send_call_function_ipi_mask(coupled_mask); | |
197 | ||
198 | return 0; | |
199 | } | |
200 | ||
201 | static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, | |
202 | struct uasm_reloc **pr, | |
203 | const struct cache_desc *cache, | |
204 | unsigned op, int lbl) | |
205 | { | |
206 | unsigned cache_size = cache->ways << cache->waybit; | |
207 | unsigned i; | |
208 | const unsigned unroll_lines = 32; | |
209 | ||
210 | /* If the cache isn't present this function has it easy */ | |
211 | if (cache->flags & MIPS_CACHE_NOT_PRESENT) | |
212 | return; | |
213 | ||
214 | /* Load base address */ | |
215 | UASM_i_LA(pp, t0, (long)CKSEG0); | |
216 | ||
217 | /* Calculate end address */ | |
218 | if (cache_size < 0x8000) | |
219 | uasm_i_addiu(pp, t1, t0, cache_size); | |
220 | else | |
221 | UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size)); | |
222 | ||
223 | /* Start of cache op loop */ | |
224 | uasm_build_label(pl, *pp, lbl); | |
225 | ||
226 | /* Generate the cache ops */ | |
227 | for (i = 0; i < unroll_lines; i++) | |
228 | uasm_i_cache(pp, op, i * cache->linesz, t0); | |
229 | ||
230 | /* Update the base address */ | |
231 | uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz); | |
232 | ||
233 | /* Loop if we haven't reached the end address yet */ | |
234 | uasm_il_bne(pp, pr, t0, t1, lbl); | |
235 | uasm_i_nop(pp); | |
236 | } | |
237 | ||
238 | static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, | |
239 | struct uasm_reloc **pr, | |
240 | const struct cpuinfo_mips *cpu_info, | |
241 | int lbl) | |
242 | { | |
243 | unsigned i, fsb_size = 8; | |
244 | unsigned num_loads = (fsb_size * 3) / 2; | |
245 | unsigned line_stride = 2; | |
246 | unsigned line_size = cpu_info->dcache.linesz; | |
247 | unsigned perf_counter, perf_event; | |
248 | unsigned revision = cpu_info->processor_id & PRID_REV_MASK; | |
249 | ||
250 | /* | |
251 | * Determine whether this CPU requires an FSB flush, and if so which | |
252 | * performance counter/event reflect stalls due to a full FSB. | |
253 | */ | |
254 | switch (__get_cpu_type(cpu_info->cputype)) { | |
255 | case CPU_INTERAPTIV: | |
256 | perf_counter = 1; | |
257 | perf_event = 51; | |
258 | break; | |
259 | ||
260 | case CPU_PROAPTIV: | |
261 | /* Newer proAptiv cores don't require this workaround */ | |
262 | if (revision >= PRID_REV_ENCODE_332(1, 1, 0)) | |
263 | return 0; | |
264 | ||
265 | /* On older ones it's unavailable */ | |
266 | return -1; | |
267 | ||
268 | /* CPUs which do not require the workaround */ | |
269 | case CPU_P5600: | |
4e88a862 | 270 | case CPU_I6400: |
3179d37e PB |
271 | return 0; |
272 | ||
273 | default: | |
274 | WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n"); | |
275 | return -1; | |
276 | } | |
277 | ||
278 | /* | |
279 | * Ensure that the fill/store buffer (FSB) is not holding the results | |
280 | * of a prefetch, since if it is then the CPC sequencer may become | |
281 | * stuck in the D3 (ClrBus) state whilst entering a low power state. | |
282 | */ | |
283 | ||
284 | /* Preserve perf counter setup */ | |
285 | uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ | |
286 | uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ | |
287 | ||
288 | /* Setup perf counter to count FSB full pipeline stalls */ | |
289 | uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf); | |
290 | uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ | |
291 | uasm_i_ehb(pp); | |
292 | uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */ | |
293 | uasm_i_ehb(pp); | |
294 | ||
295 | /* Base address for loads */ | |
296 | UASM_i_LA(pp, t0, (long)CKSEG0); | |
297 | ||
298 | /* Start of clear loop */ | |
299 | uasm_build_label(pl, *pp, lbl); | |
300 | ||
301 | /* Perform some loads to fill the FSB */ | |
302 | for (i = 0; i < num_loads; i++) | |
303 | uasm_i_lw(pp, zero, i * line_size * line_stride, t0); | |
304 | ||
305 | /* | |
306 | * Invalidate the new D-cache entries so that the cache will need | |
307 | * refilling (via the FSB) if the loop is executed again. | |
308 | */ | |
309 | for (i = 0; i < num_loads; i++) { | |
310 | uasm_i_cache(pp, Hit_Invalidate_D, | |
311 | i * line_size * line_stride, t0); | |
312 | uasm_i_cache(pp, Hit_Writeback_Inv_SD, | |
313 | i * line_size * line_stride, t0); | |
314 | } | |
315 | ||
316 | /* Completion barrier */ | |
317 | uasm_i_sync(pp, stype_memory); | |
318 | uasm_i_ehb(pp); | |
319 | ||
320 | /* Check whether the pipeline stalled due to the FSB being full */ | |
321 | uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */ | |
322 | ||
323 | /* Loop if it didn't */ | |
324 | uasm_il_beqz(pp, pr, t1, lbl); | |
325 | uasm_i_nop(pp); | |
326 | ||
327 | /* Restore perf counter 1. The count may well now be wrong... */ | |
328 | uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ | |
329 | uasm_i_ehb(pp); | |
330 | uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ | |
331 | uasm_i_ehb(pp); | |
332 | ||
333 | return 0; | |
334 | } | |
335 | ||
336 | static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, | |
337 | struct uasm_reloc **pr, | |
338 | unsigned r_addr, int lbl) | |
339 | { | |
340 | uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000)); | |
341 | uasm_build_label(pl, *pp, lbl); | |
342 | uasm_i_ll(pp, t1, 0, r_addr); | |
343 | uasm_i_or(pp, t1, t1, t0); | |
344 | uasm_i_sc(pp, t1, 0, r_addr); | |
345 | uasm_il_beqz(pp, pr, t1, lbl); | |
346 | uasm_i_nop(pp); | |
347 | } | |
348 | ||
349 | static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) | |
350 | { | |
351 | struct uasm_label *l = labels; | |
352 | struct uasm_reloc *r = relocs; | |
353 | u32 *buf, *p; | |
354 | const unsigned r_online = a0; | |
355 | const unsigned r_nc_count = a1; | |
356 | const unsigned r_pcohctl = t7; | |
357 | const unsigned max_instrs = 256; | |
358 | unsigned cpc_cmd; | |
359 | int err; | |
360 | enum { | |
361 | lbl_incready = 1, | |
362 | lbl_poll_cont, | |
363 | lbl_secondary_hang, | |
364 | lbl_disable_coherence, | |
365 | lbl_flush_fsb, | |
366 | lbl_invicache, | |
367 | lbl_flushdcache, | |
368 | lbl_hang, | |
369 | lbl_set_cont, | |
370 | lbl_secondary_cont, | |
371 | lbl_decready, | |
372 | }; | |
373 | ||
374 | /* Allocate a buffer to hold the generated code */ | |
375 | p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL); | |
376 | if (!buf) | |
377 | return NULL; | |
378 | ||
379 | /* Clear labels & relocs ready for (re)use */ | |
380 | memset(labels, 0, sizeof(labels)); | |
381 | memset(relocs, 0, sizeof(relocs)); | |
382 | ||
383 | if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { | |
064231e5 PB |
384 | /* Power gating relies upon CPS SMP */ |
385 | if (!mips_cps_smp_in_use()) | |
386 | goto out_err; | |
387 | ||
3179d37e PB |
388 | /* |
389 | * Save CPU state. Note the non-standard calling convention | |
390 | * with the return address placed in v0 to avoid clobbering | |
391 | * the ra register before it is saved. | |
392 | */ | |
393 | UASM_i_LA(&p, t0, (long)mips_cps_pm_save); | |
394 | uasm_i_jalr(&p, v0, t0); | |
395 | uasm_i_nop(&p); | |
396 | } | |
397 | ||
398 | /* | |
399 | * Load addresses of required CM & CPC registers. This is done early | |
400 | * because they're needed in both the enable & disable coherence steps | |
401 | * but in the coupled case the enable step will only run on one VPE. | |
402 | */ | |
403 | UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence()); | |
404 | ||
405 | if (coupled_coherence) { | |
406 | /* Increment ready_count */ | |
407 | uasm_i_sync(&p, stype_ordering); | |
408 | uasm_build_label(&l, p, lbl_incready); | |
409 | uasm_i_ll(&p, t1, 0, r_nc_count); | |
410 | uasm_i_addiu(&p, t2, t1, 1); | |
411 | uasm_i_sc(&p, t2, 0, r_nc_count); | |
412 | uasm_il_beqz(&p, &r, t2, lbl_incready); | |
413 | uasm_i_addiu(&p, t1, t1, 1); | |
414 | ||
415 | /* Ordering barrier */ | |
416 | uasm_i_sync(&p, stype_ordering); | |
417 | ||
418 | /* | |
419 | * If this is the last VPE to become ready for non-coherence | |
420 | * then it should branch below. | |
421 | */ | |
422 | uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence); | |
423 | uasm_i_nop(&p); | |
424 | ||
425 | if (state < CPS_PM_POWER_GATED) { | |
426 | /* | |
427 | * Otherwise this is not the last VPE to become ready | |
428 | * for non-coherence. It needs to wait until coherence | |
429 | * has been disabled before proceeding, which it will do | |
430 | * by polling for the top bit of ready_count being set. | |
431 | */ | |
432 | uasm_i_addiu(&p, t1, zero, -1); | |
433 | uasm_build_label(&l, p, lbl_poll_cont); | |
434 | uasm_i_lw(&p, t0, 0, r_nc_count); | |
435 | uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); | |
436 | uasm_i_ehb(&p); | |
437 | uasm_i_yield(&p, zero, t1); | |
438 | uasm_il_b(&p, &r, lbl_poll_cont); | |
439 | uasm_i_nop(&p); | |
440 | } else { | |
441 | /* | |
442 | * The core will lose power & this VPE will not continue | |
443 | * so it can simply halt here. | |
444 | */ | |
445 | uasm_i_addiu(&p, t0, zero, TCHALT_H); | |
446 | uasm_i_mtc0(&p, t0, 2, 4); | |
447 | uasm_build_label(&l, p, lbl_secondary_hang); | |
448 | uasm_il_b(&p, &r, lbl_secondary_hang); | |
449 | uasm_i_nop(&p); | |
450 | } | |
451 | } | |
452 | ||
453 | /* | |
454 | * This is the point of no return - this VPE will now proceed to | |
455 | * disable coherence. At this point we *must* be sure that no other | |
456 | * VPE within the core will interfere with the L1 dcache. | |
457 | */ | |
458 | uasm_build_label(&l, p, lbl_disable_coherence); | |
459 | ||
460 | /* Invalidate the L1 icache */ | |
461 | cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache, | |
462 | Index_Invalidate_I, lbl_invicache); | |
463 | ||
464 | /* Writeback & invalidate the L1 dcache */ | |
465 | cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, | |
466 | Index_Writeback_Inv_D, lbl_flushdcache); | |
467 | ||
468 | /* Completion barrier */ | |
469 | uasm_i_sync(&p, stype_memory); | |
470 | uasm_i_ehb(&p); | |
471 | ||
472 | /* | |
473 | * Disable all but self interventions. The load from COHCTL is defined | |
474 | * by the interAptiv & proAptiv SUMs as ensuring that the operation | |
475 | * resulting from the preceeding store is complete. | |
476 | */ | |
477 | uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); | |
478 | uasm_i_sw(&p, t0, 0, r_pcohctl); | |
479 | uasm_i_lw(&p, t0, 0, r_pcohctl); | |
480 | ||
481 | /* Sync to ensure previous interventions are complete */ | |
482 | uasm_i_sync(&p, stype_intervention); | |
483 | uasm_i_ehb(&p); | |
484 | ||
485 | /* Disable coherence */ | |
486 | uasm_i_sw(&p, zero, 0, r_pcohctl); | |
487 | uasm_i_lw(&p, t0, 0, r_pcohctl); | |
488 | ||
489 | if (state >= CPS_PM_CLOCK_GATED) { | |
490 | err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], | |
491 | lbl_flush_fsb); | |
492 | if (err) | |
493 | goto out_err; | |
494 | ||
495 | /* Determine the CPC command to issue */ | |
496 | switch (state) { | |
497 | case CPS_PM_CLOCK_GATED: | |
498 | cpc_cmd = CPC_Cx_CMD_CLOCKOFF; | |
499 | break; | |
500 | case CPS_PM_POWER_GATED: | |
501 | cpc_cmd = CPC_Cx_CMD_PWRDOWN; | |
502 | break; | |
503 | default: | |
504 | BUG(); | |
505 | goto out_err; | |
506 | } | |
507 | ||
508 | /* Issue the CPC command */ | |
509 | UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd()); | |
510 | uasm_i_addiu(&p, t1, zero, cpc_cmd); | |
511 | uasm_i_sw(&p, t1, 0, t0); | |
512 | ||
513 | if (state == CPS_PM_POWER_GATED) { | |
514 | /* If anything goes wrong just hang */ | |
515 | uasm_build_label(&l, p, lbl_hang); | |
516 | uasm_il_b(&p, &r, lbl_hang); | |
517 | uasm_i_nop(&p); | |
518 | ||
519 | /* | |
520 | * There's no point generating more code, the core is | |
521 | * powered down & if powered back up will run from the | |
522 | * reset vector not from here. | |
523 | */ | |
524 | goto gen_done; | |
525 | } | |
526 | ||
527 | /* Completion barrier */ | |
528 | uasm_i_sync(&p, stype_memory); | |
529 | uasm_i_ehb(&p); | |
530 | } | |
531 | ||
532 | if (state == CPS_PM_NC_WAIT) { | |
533 | /* | |
534 | * At this point it is safe for all VPEs to proceed with | |
535 | * execution. This VPE will set the top bit of ready_count | |
536 | * to indicate to the other VPEs that they may continue. | |
537 | */ | |
538 | if (coupled_coherence) | |
539 | cps_gen_set_top_bit(&p, &l, &r, r_nc_count, | |
540 | lbl_set_cont); | |
541 | ||
542 | /* | |
543 | * VPEs which did not disable coherence will continue | |
544 | * executing, after coherence has been disabled, from this | |
545 | * point. | |
546 | */ | |
547 | uasm_build_label(&l, p, lbl_secondary_cont); | |
548 | ||
549 | /* Now perform our wait */ | |
550 | uasm_i_wait(&p, 0); | |
551 | } | |
552 | ||
553 | /* | |
554 | * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs | |
555 | * will run this. The first will actually re-enable coherence & the | |
556 | * rest will just be performing a rather unusual nop. | |
557 | */ | |
558 | uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK); | |
559 | uasm_i_sw(&p, t0, 0, r_pcohctl); | |
560 | uasm_i_lw(&p, t0, 0, r_pcohctl); | |
561 | ||
562 | /* Completion barrier */ | |
563 | uasm_i_sync(&p, stype_memory); | |
564 | uasm_i_ehb(&p); | |
565 | ||
566 | if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { | |
567 | /* Decrement ready_count */ | |
568 | uasm_build_label(&l, p, lbl_decready); | |
569 | uasm_i_sync(&p, stype_ordering); | |
570 | uasm_i_ll(&p, t1, 0, r_nc_count); | |
571 | uasm_i_addiu(&p, t2, t1, -1); | |
572 | uasm_i_sc(&p, t2, 0, r_nc_count); | |
573 | uasm_il_beqz(&p, &r, t2, lbl_decready); | |
574 | uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); | |
575 | ||
576 | /* Ordering barrier */ | |
577 | uasm_i_sync(&p, stype_ordering); | |
578 | } | |
579 | ||
580 | if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { | |
581 | /* | |
582 | * At this point it is safe for all VPEs to proceed with | |
583 | * execution. This VPE will set the top bit of ready_count | |
584 | * to indicate to the other VPEs that they may continue. | |
585 | */ | |
586 | cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); | |
587 | ||
588 | /* | |
589 | * This core will be reliant upon another core sending a | |
590 | * power-up command to the CPC in order to resume operation. | |
591 | * Thus an arbitrary VPE can't trigger the core leaving the | |
592 | * idle state and the one that disables coherence might as well | |
593 | * be the one to re-enable it. The rest will continue from here | |
594 | * after that has been done. | |
595 | */ | |
596 | uasm_build_label(&l, p, lbl_secondary_cont); | |
597 | ||
598 | /* Ordering barrier */ | |
599 | uasm_i_sync(&p, stype_ordering); | |
600 | } | |
601 | ||
602 | /* The core is coherent, time to return to C code */ | |
603 | uasm_i_jr(&p, ra); | |
604 | uasm_i_nop(&p); | |
605 | ||
606 | gen_done: | |
607 | /* Ensure the code didn't exceed the resources allocated for it */ | |
608 | BUG_ON((p - buf) > max_instrs); | |
609 | BUG_ON((l - labels) > ARRAY_SIZE(labels)); | |
610 | BUG_ON((r - relocs) > ARRAY_SIZE(relocs)); | |
611 | ||
612 | /* Patch branch offsets */ | |
613 | uasm_resolve_relocs(relocs, labels); | |
614 | ||
615 | /* Flush the icache */ | |
616 | local_flush_icache_range((unsigned long)buf, (unsigned long)p); | |
617 | ||
618 | return buf; | |
619 | out_err: | |
620 | kfree(buf); | |
621 | return NULL; | |
622 | } | |
623 | ||
624 | static int __init cps_gen_core_entries(unsigned cpu) | |
625 | { | |
626 | enum cps_pm_state state; | |
627 | unsigned core = cpu_data[cpu].core; | |
628 | unsigned dlinesz = cpu_data[cpu].dcache.linesz; | |
629 | void *entry_fn, *core_rc; | |
630 | ||
631 | for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { | |
632 | if (per_cpu(nc_asm_enter, core)[state]) | |
633 | continue; | |
634 | if (!test_bit(state, state_support)) | |
635 | continue; | |
636 | ||
637 | entry_fn = cps_gen_entry_code(cpu, state); | |
638 | if (!entry_fn) { | |
639 | pr_err("Failed to generate core %u state %u entry\n", | |
640 | core, state); | |
641 | clear_bit(state, state_support); | |
642 | } | |
643 | ||
644 | per_cpu(nc_asm_enter, core)[state] = entry_fn; | |
645 | } | |
646 | ||
647 | if (!per_cpu(ready_count, core)) { | |
648 | core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); | |
649 | if (!core_rc) { | |
650 | pr_err("Failed allocate core %u ready_count\n", core); | |
651 | return -ENOMEM; | |
652 | } | |
653 | per_cpu(ready_count_alloc, core) = core_rc; | |
654 | ||
655 | /* Ensure ready_count is aligned to a cacheline boundary */ | |
656 | core_rc += dlinesz - 1; | |
657 | core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); | |
658 | per_cpu(ready_count, core) = core_rc; | |
659 | } | |
660 | ||
661 | return 0; | |
662 | } | |
663 | ||
664 | static int __init cps_pm_init(void) | |
665 | { | |
666 | unsigned cpu; | |
667 | int err; | |
668 | ||
669 | /* Detect appropriate sync types for the system */ | |
670 | switch (current_cpu_data.cputype) { | |
671 | case CPU_INTERAPTIV: | |
672 | case CPU_PROAPTIV: | |
673 | case CPU_M5150: | |
674 | case CPU_P5600: | |
4e88a862 | 675 | case CPU_I6400: |
3179d37e PB |
676 | stype_intervention = 0x2; |
677 | stype_memory = 0x3; | |
678 | stype_ordering = 0x10; | |
679 | break; | |
680 | ||
681 | default: | |
682 | pr_warn("Power management is using heavyweight sync 0\n"); | |
683 | } | |
684 | ||
685 | /* A CM is required for all non-coherent states */ | |
686 | if (!mips_cm_present()) { | |
687 | pr_warn("pm-cps: no CM, non-coherent states unavailable\n"); | |
688 | goto out; | |
689 | } | |
690 | ||
691 | /* | |
692 | * If interrupts were enabled whilst running a wait instruction on a | |
693 | * non-coherent core then the VPE may end up processing interrupts | |
694 | * whilst non-coherent. That would be bad. | |
695 | */ | |
696 | if (cpu_wait == r4k_wait_irqoff) | |
697 | set_bit(CPS_PM_NC_WAIT, state_support); | |
698 | else | |
699 | pr_warn("pm-cps: non-coherent wait unavailable\n"); | |
700 | ||
701 | /* Detect whether a CPC is present */ | |
702 | if (mips_cpc_present()) { | |
703 | /* Detect whether clock gating is implemented */ | |
704 | if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK) | |
705 | set_bit(CPS_PM_CLOCK_GATED, state_support); | |
706 | else | |
707 | pr_warn("pm-cps: CPC does not support clock gating\n"); | |
708 | ||
709 | /* Power gating is available with CPS SMP & any CPC */ | |
710 | if (mips_cps_smp_in_use()) | |
711 | set_bit(CPS_PM_POWER_GATED, state_support); | |
712 | else | |
713 | pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n"); | |
714 | } else { | |
715 | pr_warn("pm-cps: no CPC, clock & power gating unavailable\n"); | |
716 | } | |
717 | ||
718 | for_each_present_cpu(cpu) { | |
719 | err = cps_gen_core_entries(cpu); | |
720 | if (err) | |
721 | return err; | |
722 | } | |
723 | out: | |
724 | return 0; | |
725 | } | |
726 | arch_initcall(cps_pm_init); |