Commit | Line | Data |
---|---|---|
0ee958e1 PB |
1 | /* |
2 | * Copyright (C) 2013 Imagination Technologies | |
3 | * Author: Paul Burton <paul.burton@imgtec.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License as published by the | |
7 | * Free Software Foundation; either version 2 of the License, or (at your | |
8 | * option) any later version. | |
9 | */ | |
10 | ||
11 | #include <asm/addrspace.h> | |
12 | #include <asm/asm.h> | |
13 | #include <asm/asm-offsets.h> | |
14 | #include <asm/asmmacro.h> | |
15 | #include <asm/cacheops.h> | |
16 | #include <asm/mipsregs.h> | |
245a7868 | 17 | #include <asm/mipsmtregs.h> |
3179d37e | 18 | #include <asm/pm.h> |
0ee958e1 | 19 | |
245a7868 PB |
20 | #define GCR_CL_COHERENCE_OFS 0x2008 |
21 | #define GCR_CL_ID_OFS 0x2028 | |
22 | ||
23 | .extern mips_cm_base | |
24 | ||
25 | .set noreorder | |
26 | ||
27 | /* | |
28 | * Set dest to non-zero if the core supports the MT ASE, else zero. If | |
29 | * MT is not supported then branch to nomt. | |
30 | */ | |
31 | .macro has_mt dest, nomt | |
32 | mfc0 \dest, CP0_CONFIG | |
33 | bgez \dest, \nomt | |
34 | mfc0 \dest, CP0_CONFIG, 1 | |
35 | bgez \dest, \nomt | |
36 | mfc0 \dest, CP0_CONFIG, 2 | |
37 | bgez \dest, \nomt | |
38 | mfc0 \dest, CP0_CONFIG, 3 | |
39 | andi \dest, \dest, MIPS_CONF3_MT | |
40 | beqz \dest, \nomt | |
41 | .endm | |
0ee958e1 PB |
42 | |
43 | .section .text.cps-vec | |
44 | .balign 0x1000 | |
0ee958e1 PB |
45 | |
46 | LEAF(mips_cps_core_entry) | |
47 | /* | |
0155a065 PB |
48 | * These first 12 bytes will be patched by cps_smp_setup to load the |
49 | * base address of the CM GCRs into register v1 and the CCA to use into | |
50 | * register s0. | |
0ee958e1 PB |
51 | */ |
52 | .quad 0 | |
0155a065 | 53 | .word 0 |
0ee958e1 PB |
54 | |
55 | /* Check whether we're here due to an NMI */ | |
56 | mfc0 k0, CP0_STATUS | |
57 | and k0, k0, ST0_NMI | |
58 | beqz k0, not_nmi | |
59 | nop | |
60 | ||
61 | /* This is an NMI */ | |
62 | la k0, nmi_handler | |
63 | jr k0 | |
64 | nop | |
65 | ||
66 | not_nmi: | |
67 | /* Setup Cause */ | |
68 | li t0, CAUSEF_IV | |
69 | mtc0 t0, CP0_CAUSE | |
70 | ||
71 | /* Setup Status */ | |
72 | li t0, ST0_CU1 | ST0_CU0 | |
73 | mtc0 t0, CP0_STATUS | |
74 | ||
75 | /* | |
76 | * Clear the bits used to index the caches. Note that the architecture | |
77 | * dictates that writing to any of TagLo or TagHi selects 0 or 2 should | |
78 | * be valid for all MIPS32 CPUs, even those for which said writes are | |
79 | * unnecessary. | |
80 | */ | |
81 | mtc0 zero, CP0_TAGLO, 0 | |
82 | mtc0 zero, CP0_TAGHI, 0 | |
83 | mtc0 zero, CP0_TAGLO, 2 | |
84 | mtc0 zero, CP0_TAGHI, 2 | |
85 | ehb | |
86 | ||
87 | /* Primary cache configuration is indicated by Config1 */ | |
88 | mfc0 v0, CP0_CONFIG, 1 | |
89 | ||
90 | /* Detect I-cache line size */ | |
91 | _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ | |
92 | beqz t0, icache_done | |
93 | li t1, 2 | |
94 | sllv t0, t1, t0 | |
95 | ||
96 | /* Detect I-cache size */ | |
97 | _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ | |
98 | xori t2, t1, 0x7 | |
99 | beqz t2, 1f | |
100 | li t3, 32 | |
101 | addi t1, t1, 1 | |
102 | sllv t1, t3, t1 | |
103 | 1: /* At this point t1 == I-cache sets per way */ | |
104 | _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ | |
105 | addi t2, t2, 1 | |
106 | mul t1, t1, t0 | |
107 | mul t1, t1, t2 | |
108 | ||
109 | li a0, KSEG0 | |
110 | add a1, a0, t1 | |
111 | 1: cache Index_Store_Tag_I, 0(a0) | |
112 | add a0, a0, t0 | |
113 | bne a0, a1, 1b | |
114 | nop | |
115 | icache_done: | |
116 | ||
117 | /* Detect D-cache line size */ | |
118 | _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ | |
119 | beqz t0, dcache_done | |
120 | li t1, 2 | |
121 | sllv t0, t1, t0 | |
122 | ||
123 | /* Detect D-cache size */ | |
124 | _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ | |
125 | xori t2, t1, 0x7 | |
126 | beqz t2, 1f | |
127 | li t3, 32 | |
128 | addi t1, t1, 1 | |
129 | sllv t1, t3, t1 | |
130 | 1: /* At this point t1 == D-cache sets per way */ | |
131 | _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ | |
132 | addi t2, t2, 1 | |
133 | mul t1, t1, t0 | |
134 | mul t1, t1, t2 | |
135 | ||
136 | li a0, KSEG0 | |
137 | addu a1, a0, t1 | |
138 | subu a1, a1, t0 | |
139 | 1: cache Index_Store_Tag_D, 0(a0) | |
140 | bne a0, a1, 1b | |
141 | add a0, a0, t0 | |
142 | dcache_done: | |
143 | ||
0155a065 | 144 | /* Set Kseg0 CCA to that in s0 */ |
0ee958e1 PB |
145 | mfc0 t0, CP0_CONFIG |
146 | ori t0, 0x7 | |
0155a065 PB |
147 | xori t0, 0x7 |
148 | or t0, t0, s0 | |
0ee958e1 PB |
149 | mtc0 t0, CP0_CONFIG |
150 | ehb | |
151 | ||
152 | /* Enter the coherent domain */ | |
153 | li t0, 0xff | |
154 | sw t0, GCR_CL_COHERENCE_OFS(v1) | |
155 | ehb | |
156 | ||
157 | /* Jump to kseg0 */ | |
158 | la t0, 1f | |
159 | jr t0 | |
160 | nop | |
161 | ||
245a7868 PB |
162 | /* |
163 | * We're up, cached & coherent. Perform any further required core-level | |
164 | * initialisation. | |
165 | */ | |
166 | 1: jal mips_cps_core_init | |
167 | nop | |
0ee958e1 PB |
168 | |
169 | /* | |
245a7868 PB |
170 | * Boot any other VPEs within this core that should be online, and |
171 | * deactivate this VPE if it should be offline. | |
0ee958e1 | 172 | */ |
245a7868 PB |
173 | jal mips_cps_boot_vpes |
174 | nop | |
0ee958e1 PB |
175 | |
176 | /* Off we go! */ | |
245a7868 PB |
177 | lw t1, VPEBOOTCFG_PC(v0) |
178 | lw gp, VPEBOOTCFG_GP(v0) | |
179 | lw sp, VPEBOOTCFG_SP(v0) | |
0ee958e1 PB |
180 | jr t1 |
181 | nop | |
182 | END(mips_cps_core_entry) | |
183 | ||
184 | .org 0x200 | |
185 | LEAF(excep_tlbfill) | |
186 | b . | |
187 | nop | |
188 | END(excep_tlbfill) | |
189 | ||
190 | .org 0x280 | |
191 | LEAF(excep_xtlbfill) | |
192 | b . | |
193 | nop | |
194 | END(excep_xtlbfill) | |
195 | ||
196 | .org 0x300 | |
197 | LEAF(excep_cache) | |
198 | b . | |
199 | nop | |
200 | END(excep_cache) | |
201 | ||
202 | .org 0x380 | |
203 | LEAF(excep_genex) | |
204 | b . | |
205 | nop | |
206 | END(excep_genex) | |
207 | ||
208 | .org 0x400 | |
209 | LEAF(excep_intex) | |
210 | b . | |
211 | nop | |
212 | END(excep_intex) | |
213 | ||
214 | .org 0x480 | |
215 | LEAF(excep_ejtag) | |
216 | la k0, ejtag_debug_handler | |
217 | jr k0 | |
218 | nop | |
219 | END(excep_ejtag) | |
245a7868 PB |
220 | |
221 | LEAF(mips_cps_core_init) | |
222 | #ifdef CONFIG_MIPS_MT | |
223 | /* Check that the core implements the MT ASE */ | |
224 | has_mt t0, 3f | |
225 | nop | |
226 | ||
227 | .set push | |
228 | .set mt | |
229 | ||
230 | /* Only allow 1 TC per VPE to execute... */ | |
231 | dmt | |
232 | ||
233 | /* ...and for the moment only 1 VPE */ | |
234 | dvpe | |
235 | la t1, 1f | |
236 | jr.hb t1 | |
237 | nop | |
238 | ||
239 | /* Enter VPE configuration state */ | |
240 | 1: mfc0 t0, CP0_MVPCONTROL | |
241 | ori t0, t0, MVPCONTROL_VPC | |
242 | mtc0 t0, CP0_MVPCONTROL | |
243 | ||
244 | /* Retrieve the number of VPEs within the core */ | |
245 | mfc0 t0, CP0_MVPCONF0 | |
246 | srl t0, t0, MVPCONF0_PVPE_SHIFT | |
247 | andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) | |
248 | addi t7, t0, 1 | |
249 | ||
250 | /* If there's only 1, we're done */ | |
251 | beqz t0, 2f | |
252 | nop | |
253 | ||
254 | /* Loop through each VPE within this core */ | |
255 | li t5, 1 | |
256 | ||
257 | 1: /* Operate on the appropriate TC */ | |
258 | mtc0 t5, CP0_VPECONTROL | |
259 | ehb | |
260 | ||
261 | /* Bind TC to VPE (1:1 TC:VPE mapping) */ | |
262 | mttc0 t5, CP0_TCBIND | |
263 | ||
264 | /* Set exclusive TC, non-active, master */ | |
265 | li t0, VPECONF0_MVP | |
266 | sll t1, t5, VPECONF0_XTC_SHIFT | |
267 | or t0, t0, t1 | |
268 | mttc0 t0, CP0_VPECONF0 | |
269 | ||
270 | /* Set TC non-active, non-allocatable */ | |
271 | mttc0 zero, CP0_TCSTATUS | |
272 | ||
273 | /* Set TC halted */ | |
274 | li t0, TCHALT_H | |
275 | mttc0 t0, CP0_TCHALT | |
276 | ||
277 | /* Next VPE */ | |
278 | addi t5, t5, 1 | |
279 | slt t0, t5, t7 | |
280 | bnez t0, 1b | |
281 | nop | |
282 | ||
283 | /* Leave VPE configuration state */ | |
284 | 2: mfc0 t0, CP0_MVPCONTROL | |
285 | xori t0, t0, MVPCONTROL_VPC | |
286 | mtc0 t0, CP0_MVPCONTROL | |
287 | ||
288 | 3: .set pop | |
289 | #endif | |
290 | jr ra | |
291 | nop | |
292 | END(mips_cps_core_init) | |
293 | ||
294 | LEAF(mips_cps_boot_vpes) | |
295 | /* Retrieve CM base address */ | |
296 | la t0, mips_cm_base | |
297 | lw t0, 0(t0) | |
298 | ||
299 | /* Calculate a pointer to this cores struct core_boot_config */ | |
300 | lw t0, GCR_CL_ID_OFS(t0) | |
301 | li t1, COREBOOTCFG_SIZE | |
302 | mul t0, t0, t1 | |
303 | la t1, mips_cps_core_bootcfg | |
304 | lw t1, 0(t1) | |
305 | addu t0, t0, t1 | |
306 | ||
307 | /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ | |
308 | has_mt t6, 1f | |
309 | li t9, 0 | |
310 | ||
311 | /* Find the number of VPEs present in the core */ | |
312 | mfc0 t1, CP0_MVPCONF0 | |
313 | srl t1, t1, MVPCONF0_PVPE_SHIFT | |
314 | andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT | |
315 | addi t1, t1, 1 | |
316 | ||
317 | /* Calculate a mask for the VPE ID from EBase.CPUNum */ | |
318 | clz t1, t1 | |
319 | li t2, 31 | |
320 | subu t1, t2, t1 | |
321 | li t2, 1 | |
322 | sll t1, t2, t1 | |
323 | addiu t1, t1, -1 | |
324 | ||
325 | /* Retrieve the VPE ID from EBase.CPUNum */ | |
326 | mfc0 t9, $15, 1 | |
327 | and t9, t9, t1 | |
328 | ||
329 | 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ | |
330 | li t1, VPEBOOTCFG_SIZE | |
331 | mul v0, t9, t1 | |
332 | lw t7, COREBOOTCFG_VPECONFIG(t0) | |
333 | addu v0, v0, t7 | |
334 | ||
335 | #ifdef CONFIG_MIPS_MT | |
336 | ||
337 | /* If the core doesn't support MT then return */ | |
338 | bnez t6, 1f | |
339 | nop | |
340 | jr ra | |
341 | nop | |
342 | ||
343 | .set push | |
344 | .set mt | |
345 | ||
346 | 1: /* Enter VPE configuration state */ | |
347 | dvpe | |
348 | la t1, 1f | |
349 | jr.hb t1 | |
350 | nop | |
351 | 1: mfc0 t1, CP0_MVPCONTROL | |
352 | ori t1, t1, MVPCONTROL_VPC | |
353 | mtc0 t1, CP0_MVPCONTROL | |
354 | ehb | |
355 | ||
356 | /* Loop through each VPE */ | |
357 | lw t6, COREBOOTCFG_VPEMASK(t0) | |
358 | move t8, t6 | |
359 | li t5, 0 | |
360 | ||
361 | /* Check whether the VPE should be running. If not, skip it */ | |
362 | 1: andi t0, t6, 1 | |
363 | beqz t0, 2f | |
364 | nop | |
365 | ||
366 | /* Operate on the appropriate TC */ | |
367 | mfc0 t0, CP0_VPECONTROL | |
368 | ori t0, t0, VPECONTROL_TARGTC | |
369 | xori t0, t0, VPECONTROL_TARGTC | |
370 | or t0, t0, t5 | |
371 | mtc0 t0, CP0_VPECONTROL | |
372 | ehb | |
373 | ||
374 | /* Skip the VPE if its TC is not halted */ | |
375 | mftc0 t0, CP0_TCHALT | |
376 | beqz t0, 2f | |
377 | nop | |
378 | ||
379 | /* Calculate a pointer to the VPEs struct vpe_boot_config */ | |
380 | li t0, VPEBOOTCFG_SIZE | |
381 | mul t0, t0, t5 | |
382 | addu t0, t0, t7 | |
383 | ||
384 | /* Set the TC restart PC */ | |
385 | lw t1, VPEBOOTCFG_PC(t0) | |
386 | mttc0 t1, CP0_TCRESTART | |
387 | ||
388 | /* Set the TC stack pointer */ | |
389 | lw t1, VPEBOOTCFG_SP(t0) | |
390 | mttgpr t1, sp | |
391 | ||
392 | /* Set the TC global pointer */ | |
393 | lw t1, VPEBOOTCFG_GP(t0) | |
394 | mttgpr t1, gp | |
395 | ||
396 | /* Copy config from this VPE */ | |
397 | mfc0 t0, CP0_CONFIG | |
398 | mttc0 t0, CP0_CONFIG | |
399 | ||
400 | /* Ensure no software interrupts are pending */ | |
401 | mttc0 zero, CP0_CAUSE | |
402 | mttc0 zero, CP0_STATUS | |
403 | ||
404 | /* Set TC active, not interrupt exempt */ | |
405 | mftc0 t0, CP0_TCSTATUS | |
406 | li t1, ~TCSTATUS_IXMT | |
407 | and t0, t0, t1 | |
408 | ori t0, t0, TCSTATUS_A | |
409 | mttc0 t0, CP0_TCSTATUS | |
410 | ||
411 | /* Clear the TC halt bit */ | |
412 | mttc0 zero, CP0_TCHALT | |
413 | ||
414 | /* Set VPE active */ | |
415 | mftc0 t0, CP0_VPECONF0 | |
416 | ori t0, t0, VPECONF0_VPA | |
417 | mttc0 t0, CP0_VPECONF0 | |
418 | ||
419 | /* Next VPE */ | |
420 | 2: srl t6, t6, 1 | |
421 | addi t5, t5, 1 | |
422 | bnez t6, 1b | |
423 | nop | |
424 | ||
425 | /* Leave VPE configuration state */ | |
426 | mfc0 t1, CP0_MVPCONTROL | |
427 | xori t1, t1, MVPCONTROL_VPC | |
428 | mtc0 t1, CP0_MVPCONTROL | |
429 | ehb | |
430 | evpe | |
431 | ||
432 | /* Check whether this VPE is meant to be running */ | |
433 | li t0, 1 | |
434 | sll t0, t0, t9 | |
435 | and t0, t0, t8 | |
436 | bnez t0, 2f | |
437 | nop | |
438 | ||
439 | /* This VPE should be offline, halt the TC */ | |
440 | li t0, TCHALT_H | |
441 | mtc0 t0, CP0_TCHALT | |
442 | la t0, 1f | |
443 | 1: jr.hb t0 | |
444 | nop | |
445 | ||
446 | 2: .set pop | |
447 | ||
448 | #endif /* CONFIG_MIPS_MT */ | |
449 | ||
450 | /* Return */ | |
451 | jr ra | |
452 | nop | |
453 | END(mips_cps_boot_vpes) | |
3179d37e PB |
454 | |
455 | #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) | |
456 | ||
457 | /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ | |
458 | .macro psstate dest | |
459 | .set push | |
460 | .set noat | |
461 | lw $1, TI_CPU(gp) | |
462 | sll $1, $1, LONGLOG | |
463 | la \dest, __per_cpu_offset | |
464 | addu $1, $1, \dest | |
465 | lw $1, 0($1) | |
466 | la \dest, cps_cpu_state | |
467 | addu \dest, \dest, $1 | |
468 | .set pop | |
469 | .endm | |
470 | ||
471 | LEAF(mips_cps_pm_save) | |
472 | /* Save CPU state */ | |
473 | SUSPEND_SAVE_REGS | |
474 | psstate t1 | |
475 | SUSPEND_SAVE_STATIC | |
476 | jr v0 | |
477 | nop | |
478 | END(mips_cps_pm_save) | |
479 | ||
480 | LEAF(mips_cps_pm_restore) | |
481 | /* Restore CPU state */ | |
482 | psstate t1 | |
483 | RESUME_RESTORE_STATIC | |
484 | RESUME_RESTORE_REGS_RETURN | |
485 | END(mips_cps_pm_restore) | |
486 | ||
487 | #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */ |