Commit | Line | Data |
---|---|---|
14cf11af PM |
1 | /* |
2 | * PowerPC version | |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * | |
5 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | |
6 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | |
7 | * Adapted for Power Macintosh by Paul Mackerras. | |
8 | * Low-level exception handlers and MMU support | |
9 | * rewritten by Paul Mackerras. | |
10 | * Copyright (C) 1996 Paul Mackerras. | |
11 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | |
14cf11af PM |
12 | * |
13 | * This file contains the low-level support and setup for the | |
14 | * PowerPC platform, including trap and interrupt dispatch. | |
15 | * (The PPC 8xx embedded CPUs use head_8xx.S instead.) | |
16 | * | |
17 | * This program is free software; you can redistribute it and/or | |
18 | * modify it under the terms of the GNU General Public License | |
19 | * as published by the Free Software Foundation; either version | |
20 | * 2 of the License, or (at your option) any later version. | |
21 | * | |
22 | */ | |
23 | ||
b3b8dc6c | 24 | #include <asm/reg.h> |
14cf11af PM |
25 | #include <asm/page.h> |
26 | #include <asm/mmu.h> | |
27 | #include <asm/pgtable.h> | |
28 | #include <asm/cputable.h> | |
29 | #include <asm/cache.h> | |
30 | #include <asm/thread_info.h> | |
31 | #include <asm/ppc_asm.h> | |
32 | #include <asm/asm-offsets.h> | |
ec2b36b9 | 33 | #include <asm/ptrace.h> |
5e696617 | 34 | #include <asm/bug.h> |
14cf11af | 35 | |
14cf11af PM |
36 | /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ |
37 | #define LOAD_BAT(n, reg, RA, RB) \ | |
38 | /* see the comment for clear_bats() -- Cort */ \ | |
39 | li RA,0; \ | |
40 | mtspr SPRN_IBAT##n##U,RA; \ | |
41 | mtspr SPRN_DBAT##n##U,RA; \ | |
42 | lwz RA,(n*16)+0(reg); \ | |
43 | lwz RB,(n*16)+4(reg); \ | |
44 | mtspr SPRN_IBAT##n##U,RA; \ | |
45 | mtspr SPRN_IBAT##n##L,RB; \ | |
46 | beq 1f; \ | |
47 | lwz RA,(n*16)+8(reg); \ | |
48 | lwz RB,(n*16)+12(reg); \ | |
49 | mtspr SPRN_DBAT##n##U,RA; \ | |
50 | mtspr SPRN_DBAT##n##L,RB; \ | |
51 | 1: | |
14cf11af | 52 | |
748a7683 | 53 | .section .text.head, "ax" |
b3b8dc6c PM |
54 | .stabs "arch/powerpc/kernel/",N_SO,0,0,0f |
55 | .stabs "head_32.S",N_SO,0,0,0f | |
14cf11af | 56 | 0: |
748a7683 | 57 | _ENTRY(_stext); |
14cf11af PM |
58 | |
59 | /* | |
60 | * _start is defined this way because the XCOFF loader in the OpenFirmware | |
61 | * on the powermac expects the entry point to be a procedure descriptor. | |
62 | */ | |
748a7683 | 63 | _ENTRY(_start); |
14cf11af PM |
64 | /* |
65 | * These are here for legacy reasons, the kernel used to | |
66 | * need to look like a coff function entry for the pmac | |
67 | * but we're always started by some kind of bootloader now. | |
68 | * -- Cort | |
69 | */ | |
70 | nop /* used by __secondary_hold on prep (mtx) and chrp smp */ | |
71 | nop /* used by __secondary_hold on prep (mtx) and chrp smp */ | |
72 | nop | |
73 | ||
74 | /* PMAC | |
75 | * Enter here with the kernel text, data and bss loaded starting at | |
76 | * 0, running with virtual == physical mapping. | |
77 | * r5 points to the prom entry point (the client interface handler | |
78 | * address). Address translation is turned on, with the prom | |
79 | * managing the hash table. Interrupts are disabled. The stack | |
80 | * pointer (r1) points to just below the end of the half-meg region | |
81 | * from 0x380000 - 0x400000, which is mapped in already. | |
82 | * | |
83 | * If we are booted from MacOS via BootX, we enter with the kernel | |
84 | * image loaded somewhere, and the following values in registers: | |
85 | * r3: 'BooX' (0x426f6f58) | |
86 | * r4: virtual address of boot_infos_t | |
87 | * r5: 0 | |
88 | * | |
14cf11af PM |
89 | * PREP |
90 | * This is jumped to on prep systems right after the kernel is relocated | |
91 | * to its proper place in memory by the boot loader. The expected layout | |
92 | * of the regs is: | |
93 | * r3: ptr to residual data | |
94 | * r4: initrd_start or if no initrd then 0 | |
95 | * r5: initrd_end - unused if r4 is 0 | |
96 | * r6: Start of command line string | |
97 | * r7: End of command line string | |
98 | * | |
99 | * This just gets a minimal mmu environment setup so we can call | |
100 | * start_here() to do the real work. | |
101 | * -- Cort | |
102 | */ | |
103 | ||
104 | .globl __start | |
105 | __start: | |
106 | /* | |
107 | * We have to do any OF calls before we map ourselves to KERNELBASE, | |
108 | * because OF may have I/O devices mapped into that area | |
109 | * (particularly on CHRP). | |
110 | */ | |
9b6b563c PM |
111 | cmpwi 0,r5,0 |
112 | beq 1f | |
2bda347b | 113 | |
28794d34 | 114 | #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE |
2bda347b BH |
115 | /* find out where we are now */ |
116 | bcl 20,31,$+4 | |
117 | 0: mflr r8 /* r8 = runtime addr here */ | |
118 | addis r8,r8,(_stext - 0b)@ha | |
119 | addi r8,r8,(_stext - 0b)@l /* current runtime base addr */ | |
9b6b563c | 120 | bl prom_init |
28794d34 BH |
121 | #endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */ |
122 | ||
123 | /* We never return. We also hit that trap if trying to boot | |
124 | * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */ | |
9b6b563c PM |
125 | trap |
126 | ||
d7f39454 BH |
127 | /* |
128 | * Check for BootX signature when supporting PowerMac and branch to | |
129 | * appropriate trampoline if it's present | |
130 | */ | |
131 | #ifdef CONFIG_PPC_PMAC | |
132 | 1: lis r31,0x426f | |
133 | ori r31,r31,0x6f58 | |
134 | cmpw 0,r3,r31 | |
135 | bne 1f | |
136 | bl bootx_init | |
137 | trap | |
138 | #endif /* CONFIG_PPC_PMAC */ | |
139 | ||
9b6b563c | 140 | 1: mr r31,r3 /* save parameters */ |
14cf11af | 141 | mr r30,r4 |
14cf11af PM |
142 | li r24,0 /* cpu # */ |
143 | ||
144 | /* | |
145 | * early_init() does the early machine identification and does | |
146 | * the necessary low-level setup and clears the BSS | |
147 | * -- Cort <cort@fsmlabs.com> | |
148 | */ | |
149 | bl early_init | |
150 | ||
14cf11af PM |
151 | /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains |
152 | * the physical address we are running at, returned by early_init() | |
153 | */ | |
154 | bl mmu_off | |
155 | __after_mmu_off: | |
14cf11af PM |
156 | bl clear_bats |
157 | bl flush_tlbs | |
158 | ||
159 | bl initial_bats | |
f21f49ea | 160 | #if defined(CONFIG_BOOTX_TEXT) |
51d3082f BH |
161 | bl setup_disp_bat |
162 | #endif | |
c374e00e SW |
163 | #ifdef CONFIG_PPC_EARLY_DEBUG_CPM |
164 | bl setup_cpm_bat | |
165 | #endif | |
14cf11af PM |
166 | |
167 | /* | |
168 | * Call setup_cpu for CPU 0 and initialize 6xx Idle | |
169 | */ | |
170 | bl reloc_offset | |
171 | li r24,0 /* cpu# */ | |
172 | bl call_setup_cpu /* Call setup_cpu for this CPU */ | |
173 | #ifdef CONFIG_6xx | |
174 | bl reloc_offset | |
175 | bl init_idle_6xx | |
176 | #endif /* CONFIG_6xx */ | |
14cf11af PM |
177 | |
178 | ||
14cf11af PM |
179 | /* |
180 | * We need to run with _start at physical address 0. | |
181 | * On CHRP, we are loaded at 0x10000 since OF on CHRP uses | |
182 | * the exception vectors at 0 (and therefore this copy | |
183 | * overwrites OF's exception vectors with our own). | |
9b6b563c | 184 | * The MMU is off at this point. |
14cf11af PM |
185 | */ |
186 | bl reloc_offset | |
187 | mr r26,r3 | |
188 | addis r4,r3,KERNELBASE@h /* current address of _start */ | |
ccdcef72 DF |
189 | lis r5,PHYSICAL_START@h |
190 | cmplw 0,r4,r5 /* already running at PHYSICAL_START? */ | |
14cf11af | 191 | bne relocate_kernel |
14cf11af PM |
192 | /* |
193 | * we now have the 1st 16M of ram mapped with the bats. | |
194 | * prep needs the mmu to be turned on here, but pmac already has it on. | |
195 | * this shouldn't bother the pmac since it just gets turned on again | |
196 | * as we jump to our code at KERNELBASE. -- Cort | |
197 | * Actually no, pmac doesn't have it on any more. BootX enters with MMU | |
198 | * off, and in other cases, we now turn it off before changing BATs above. | |
199 | */ | |
200 | turn_on_mmu: | |
201 | mfmsr r0 | |
202 | ori r0,r0,MSR_DR|MSR_IR | |
203 | mtspr SPRN_SRR1,r0 | |
204 | lis r0,start_here@h | |
205 | ori r0,r0,start_here@l | |
206 | mtspr SPRN_SRR0,r0 | |
207 | SYNC | |
208 | RFI /* enables MMU */ | |
209 | ||
210 | /* | |
211 | * We need __secondary_hold as a place to hold the other cpus on | |
212 | * an SMP machine, even when we are running a UP kernel. | |
213 | */ | |
214 | . = 0xc0 /* for prep bootloader */ | |
215 | li r3,1 /* MTX only has 1 cpu */ | |
216 | .globl __secondary_hold | |
217 | __secondary_hold: | |
218 | /* tell the master we're here */ | |
bbd0abda | 219 | stw r3,__secondary_hold_acknowledge@l(0) |
14cf11af PM |
220 | #ifdef CONFIG_SMP |
221 | 100: lwz r4,0(0) | |
222 | /* wait until we're told to start */ | |
223 | cmpw 0,r4,r3 | |
224 | bne 100b | |
225 | /* our cpu # was at addr 0 - go */ | |
226 | mr r24,r3 /* cpu # */ | |
227 | b __secondary_start | |
228 | #else | |
229 | b . | |
230 | #endif /* CONFIG_SMP */ | |
231 | ||
bbd0abda PM |
232 | .globl __secondary_hold_spinloop |
233 | __secondary_hold_spinloop: | |
234 | .long 0 | |
235 | .globl __secondary_hold_acknowledge | |
236 | __secondary_hold_acknowledge: | |
237 | .long -1 | |
238 | ||
14cf11af PM |
239 | /* |
240 | * Exception entry code. This code runs with address translation | |
241 | * turned off, i.e. using physical addresses. | |
242 | * We assume sprg3 has the physical address of the current | |
243 | * task's thread_struct. | |
244 | */ | |
245 | #define EXCEPTION_PROLOG \ | |
246 | mtspr SPRN_SPRG0,r10; \ | |
247 | mtspr SPRN_SPRG1,r11; \ | |
248 | mfcr r10; \ | |
249 | EXCEPTION_PROLOG_1; \ | |
250 | EXCEPTION_PROLOG_2 | |
251 | ||
252 | #define EXCEPTION_PROLOG_1 \ | |
253 | mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \ | |
254 | andi. r11,r11,MSR_PR; \ | |
255 | tophys(r11,r1); /* use tophys(r1) if kernel */ \ | |
256 | beq 1f; \ | |
257 | mfspr r11,SPRN_SPRG3; \ | |
258 | lwz r11,THREAD_INFO-THREAD(r11); \ | |
259 | addi r11,r11,THREAD_SIZE; \ | |
260 | tophys(r11,r11); \ | |
261 | 1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ | |
262 | ||
263 | ||
264 | #define EXCEPTION_PROLOG_2 \ | |
265 | CLR_TOP32(r11); \ | |
266 | stw r10,_CCR(r11); /* save registers */ \ | |
267 | stw r12,GPR12(r11); \ | |
268 | stw r9,GPR9(r11); \ | |
269 | mfspr r10,SPRN_SPRG0; \ | |
270 | stw r10,GPR10(r11); \ | |
271 | mfspr r12,SPRN_SPRG1; \ | |
272 | stw r12,GPR11(r11); \ | |
273 | mflr r10; \ | |
274 | stw r10,_LINK(r11); \ | |
275 | mfspr r12,SPRN_SRR0; \ | |
276 | mfspr r9,SPRN_SRR1; \ | |
277 | stw r1,GPR1(r11); \ | |
278 | stw r1,0(r11); \ | |
279 | tovirt(r1,r11); /* set new kernel sp */ \ | |
280 | li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ | |
281 | MTMSRD(r10); /* (except for mach check in rtas) */ \ | |
282 | stw r0,GPR0(r11); \ | |
ec2b36b9 BH |
283 | lis r10,STACK_FRAME_REGS_MARKER@ha; /* exception frame marker */ \ |
284 | addi r10,r10,STACK_FRAME_REGS_MARKER@l; \ | |
f78541dc | 285 | stw r10,8(r11); \ |
14cf11af PM |
286 | SAVE_4GPRS(3, r11); \ |
287 | SAVE_2GPRS(7, r11) | |
288 | ||
289 | /* | |
290 | * Note: code which follows this uses cr0.eq (set if from kernel), | |
291 | * r11, r12 (SRR0), and r9 (SRR1). | |
292 | * | |
293 | * Note2: once we have set r1 we are in a position to take exceptions | |
294 | * again, and we could thus set MSR:RI at that point. | |
295 | */ | |
296 | ||
297 | /* | |
298 | * Exception vectors. | |
299 | */ | |
300 | #define EXCEPTION(n, label, hdlr, xfer) \ | |
301 | . = n; \ | |
302 | label: \ | |
303 | EXCEPTION_PROLOG; \ | |
304 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | |
305 | xfer(n, hdlr) | |
306 | ||
307 | #define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \ | |
308 | li r10,trap; \ | |
d73e0c99 | 309 | stw r10,_TRAP(r11); \ |
14cf11af PM |
310 | li r10,MSR_KERNEL; \ |
311 | copyee(r10, r9); \ | |
312 | bl tfer; \ | |
313 | i##n: \ | |
314 | .long hdlr; \ | |
315 | .long ret | |
316 | ||
317 | #define COPY_EE(d, s) rlwimi d,s,0,16,16 | |
318 | #define NOCOPY(d, s) | |
319 | ||
320 | #define EXC_XFER_STD(n, hdlr) \ | |
321 | EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \ | |
322 | ret_from_except_full) | |
323 | ||
324 | #define EXC_XFER_LITE(n, hdlr) \ | |
325 | EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \ | |
326 | ret_from_except) | |
327 | ||
328 | #define EXC_XFER_EE(n, hdlr) \ | |
329 | EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \ | |
330 | ret_from_except_full) | |
331 | ||
332 | #define EXC_XFER_EE_LITE(n, hdlr) \ | |
333 | EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \ | |
334 | ret_from_except) | |
335 | ||
336 | /* System reset */ | |
337 | /* core99 pmac starts the seconary here by changing the vector, and | |
dc1c1ca3 | 338 | putting it back to what it was (unknown_exception) when done. */ |
dc1c1ca3 | 339 | EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD) |
14cf11af PM |
340 | |
341 | /* Machine check */ | |
342 | /* | |
343 | * On CHRP, this is complicated by the fact that we could get a | |
344 | * machine check inside RTAS, and we have no guarantee that certain | |
345 | * critical registers will have the values we expect. The set of | |
346 | * registers that might have bad values includes all the GPRs | |
347 | * and all the BATs. We indicate that we are in RTAS by putting | |
348 | * a non-zero value, the address of the exception frame to use, | |
349 | * in SPRG2. The machine check handler checks SPRG2 and uses its | |
350 | * value if it is non-zero. If we ever needed to free up SPRG2, | |
351 | * we could use a field in the thread_info or thread_struct instead. | |
352 | * (Other exception handlers assume that r1 is a valid kernel stack | |
353 | * pointer when we take an exception from supervisor mode.) | |
354 | * -- paulus. | |
355 | */ | |
356 | . = 0x200 | |
357 | mtspr SPRN_SPRG0,r10 | |
358 | mtspr SPRN_SPRG1,r11 | |
359 | mfcr r10 | |
360 | #ifdef CONFIG_PPC_CHRP | |
361 | mfspr r11,SPRN_SPRG2 | |
362 | cmpwi 0,r11,0 | |
363 | bne 7f | |
364 | #endif /* CONFIG_PPC_CHRP */ | |
365 | EXCEPTION_PROLOG_1 | |
366 | 7: EXCEPTION_PROLOG_2 | |
367 | addi r3,r1,STACK_FRAME_OVERHEAD | |
368 | #ifdef CONFIG_PPC_CHRP | |
369 | mfspr r4,SPRN_SPRG2 | |
370 | cmpwi cr1,r4,0 | |
371 | bne cr1,1f | |
372 | #endif | |
dc1c1ca3 | 373 | EXC_XFER_STD(0x200, machine_check_exception) |
14cf11af PM |
374 | #ifdef CONFIG_PPC_CHRP |
375 | 1: b machine_check_in_rtas | |
376 | #endif | |
377 | ||
378 | /* Data access exception. */ | |
379 | . = 0x300 | |
14cf11af PM |
380 | DataAccess: |
381 | EXCEPTION_PROLOG | |
14cf11af | 382 | mfspr r10,SPRN_DSISR |
4ee7084e | 383 | stw r10,_DSISR(r11) |
14cf11af PM |
384 | andis. r0,r10,0xa470 /* weird error? */ |
385 | bne 1f /* if not, try to put a PTE */ | |
386 | mfspr r4,SPRN_DAR /* into the hash table */ | |
387 | rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ | |
388 | bl hash_page | |
4ee7084e | 389 | 1: lwz r5,_DSISR(r11) /* get DSISR value */ |
14cf11af PM |
390 | mfspr r4,SPRN_DAR |
391 | EXC_XFER_EE_LITE(0x300, handle_page_fault) | |
392 | ||
14cf11af PM |
393 | |
394 | /* Instruction access exception. */ | |
395 | . = 0x400 | |
14cf11af PM |
396 | InstructionAccess: |
397 | EXCEPTION_PROLOG | |
14cf11af PM |
398 | andis. r0,r9,0x4000 /* no pte found? */ |
399 | beq 1f /* if so, try to put a PTE */ | |
400 | li r3,0 /* into the hash table */ | |
401 | mr r4,r12 /* SRR0 is fault address */ | |
402 | bl hash_page | |
403 | 1: mr r4,r12 | |
404 | mr r5,r9 | |
405 | EXC_XFER_EE_LITE(0x400, handle_page_fault) | |
406 | ||
14cf11af PM |
407 | /* External interrupt */ |
408 | EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) | |
409 | ||
410 | /* Alignment exception */ | |
411 | . = 0x600 | |
412 | Alignment: | |
413 | EXCEPTION_PROLOG | |
414 | mfspr r4,SPRN_DAR | |
415 | stw r4,_DAR(r11) | |
416 | mfspr r5,SPRN_DSISR | |
417 | stw r5,_DSISR(r11) | |
418 | addi r3,r1,STACK_FRAME_OVERHEAD | |
dc1c1ca3 | 419 | EXC_XFER_EE(0x600, alignment_exception) |
14cf11af PM |
420 | |
421 | /* Program check exception */ | |
dc1c1ca3 | 422 | EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD) |
14cf11af PM |
423 | |
424 | /* Floating-point unavailable */ | |
425 | . = 0x800 | |
426 | FPUnavailable: | |
aa42c69c KP |
427 | BEGIN_FTR_SECTION |
428 | /* | |
429 | * Certain Freescale cores don't have a FPU and treat fp instructions | |
430 | * as a FP Unavailable exception. Redirect to illegal/emulation handling. | |
431 | */ | |
432 | b ProgramCheck | |
433 | END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE) | |
14cf11af | 434 | EXCEPTION_PROLOG |
6f3d8e69 MN |
435 | beq 1f |
436 | bl load_up_fpu /* if from user, just load it up */ | |
437 | b fast_exception_return | |
438 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
8dad3f92 | 439 | EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception) |
14cf11af PM |
440 | |
441 | /* Decrementer */ | |
442 | EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) | |
443 | ||
dc1c1ca3 SR |
444 | EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE) |
445 | EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE) | |
14cf11af PM |
446 | |
447 | /* System call */ | |
448 | . = 0xc00 | |
449 | SystemCall: | |
450 | EXCEPTION_PROLOG | |
451 | EXC_XFER_EE_LITE(0xc00, DoSyscall) | |
452 | ||
453 | /* Single step - not used on 601 */ | |
dc1c1ca3 SR |
454 | EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD) |
455 | EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE) | |
14cf11af PM |
456 | |
457 | /* | |
458 | * The Altivec unavailable trap is at 0x0f20. Foo. | |
459 | * We effectively remap it to 0x3000. | |
460 | * We include an altivec unavailable exception vector even if | |
461 | * not configured for Altivec, so that you can't panic a | |
462 | * non-altivec kernel running on a machine with altivec just | |
463 | * by executing an altivec instruction. | |
464 | */ | |
465 | . = 0xf00 | |
555d97ac | 466 | b PerformanceMonitor |
14cf11af PM |
467 | |
468 | . = 0xf20 | |
469 | b AltiVecUnavailable | |
470 | ||
14cf11af PM |
471 | /* |
472 | * Handle TLB miss for instruction on 603/603e. | |
473 | * Note: we get an alternate set of r0 - r3 to use automatically. | |
474 | */ | |
475 | . = 0x1000 | |
476 | InstructionTLBMiss: | |
477 | /* | |
478 | * r0: stored ctr | |
479 | * r1: linux style pte ( later becomes ppc hardware pte ) | |
480 | * r2: ptr to linux-style pte | |
481 | * r3: scratch | |
482 | */ | |
483 | mfctr r0 | |
484 | /* Get PTE (linux-style) and check access */ | |
485 | mfspr r3,SPRN_IMISS | |
8a13c4f9 KG |
486 | lis r1,PAGE_OFFSET@h /* check if kernel address */ |
487 | cmplw 0,r1,r3 | |
14cf11af PM |
488 | mfspr r2,SPRN_SPRG3 |
489 | li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ | |
490 | lwz r2,PGDIR(r2) | |
8a13c4f9 | 491 | bge- 112f |
bde6c6e1 SW |
492 | mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ |
493 | rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ | |
14cf11af PM |
494 | lis r2,swapper_pg_dir@ha /* if kernel address, use */ |
495 | addi r2,r2,swapper_pg_dir@l /* kernel page table */ | |
14cf11af PM |
496 | 112: tophys(r2,r2) |
497 | rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ | |
498 | lwz r2,0(r2) /* get pmd entry */ | |
499 | rlwinm. r2,r2,0,0,19 /* extract address of pte page */ | |
500 | beq- InstructionAddressInvalid /* return if no mapping */ | |
501 | rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ | |
502 | lwz r3,0(r2) /* get linux-style pte */ | |
503 | andc. r1,r1,r3 /* check access & ~permission */ | |
504 | bne- InstructionAddressInvalid /* return if access not permitted */ | |
505 | ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ | |
506 | /* | |
507 | * NOTE! We are assuming this is not an SMP system, otherwise | |
508 | * we would need to update the pte atomically with lwarx/stwcx. | |
509 | */ | |
510 | stw r3,0(r2) /* update PTE (accessed bit) */ | |
511 | /* Convert linux-style PTE to low word of PPC-style PTE */ | |
512 | rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ | |
513 | rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ | |
514 | and r1,r1,r2 /* writable if _RW and _DIRTY */ | |
515 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | |
516 | rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ | |
a4bd6a93 | 517 | ori r1,r1,0xe04 /* clear out reserved bits */ |
14cf11af | 518 | andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ |
345953cf KG |
519 | BEGIN_FTR_SECTION |
520 | rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ | |
521 | END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) | |
14cf11af PM |
522 | mtspr SPRN_RPA,r1 |
523 | mfspr r3,SPRN_IMISS | |
524 | tlbli r3 | |
525 | mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ | |
526 | mtcrf 0x80,r3 | |
527 | rfi | |
528 | InstructionAddressInvalid: | |
529 | mfspr r3,SPRN_SRR1 | |
530 | rlwinm r1,r3,9,6,6 /* Get load/store bit */ | |
531 | ||
532 | addis r1,r1,0x2000 | |
533 | mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */ | |
534 | mtctr r0 /* Restore CTR */ | |
535 | andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ | |
536 | or r2,r2,r1 | |
537 | mtspr SPRN_SRR1,r2 | |
538 | mfspr r1,SPRN_IMISS /* Get failing address */ | |
539 | rlwinm. r2,r2,0,31,31 /* Check for little endian access */ | |
540 | rlwimi r2,r2,1,30,30 /* change 1 -> 3 */ | |
541 | xor r1,r1,r2 | |
542 | mtspr SPRN_DAR,r1 /* Set fault address */ | |
543 | mfmsr r0 /* Restore "normal" registers */ | |
544 | xoris r0,r0,MSR_TGPR>>16 | |
545 | mtcrf 0x80,r3 /* Restore CR0 */ | |
546 | mtmsr r0 | |
547 | b InstructionAccess | |
548 | ||
549 | /* | |
550 | * Handle TLB miss for DATA Load operation on 603/603e | |
551 | */ | |
552 | . = 0x1100 | |
553 | DataLoadTLBMiss: | |
554 | /* | |
555 | * r0: stored ctr | |
556 | * r1: linux style pte ( later becomes ppc hardware pte ) | |
557 | * r2: ptr to linux-style pte | |
558 | * r3: scratch | |
559 | */ | |
560 | mfctr r0 | |
561 | /* Get PTE (linux-style) and check access */ | |
562 | mfspr r3,SPRN_DMISS | |
8a13c4f9 KG |
563 | lis r1,PAGE_OFFSET@h /* check if kernel address */ |
564 | cmplw 0,r1,r3 | |
14cf11af PM |
565 | mfspr r2,SPRN_SPRG3 |
566 | li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ | |
567 | lwz r2,PGDIR(r2) | |
8a13c4f9 | 568 | bge- 112f |
bde6c6e1 SW |
569 | mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ |
570 | rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ | |
14cf11af PM |
571 | lis r2,swapper_pg_dir@ha /* if kernel address, use */ |
572 | addi r2,r2,swapper_pg_dir@l /* kernel page table */ | |
14cf11af PM |
573 | 112: tophys(r2,r2) |
574 | rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ | |
575 | lwz r2,0(r2) /* get pmd entry */ | |
576 | rlwinm. r2,r2,0,0,19 /* extract address of pte page */ | |
577 | beq- DataAddressInvalid /* return if no mapping */ | |
578 | rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ | |
579 | lwz r3,0(r2) /* get linux-style pte */ | |
580 | andc. r1,r1,r3 /* check access & ~permission */ | |
581 | bne- DataAddressInvalid /* return if access not permitted */ | |
582 | ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ | |
583 | /* | |
584 | * NOTE! We are assuming this is not an SMP system, otherwise | |
585 | * we would need to update the pte atomically with lwarx/stwcx. | |
586 | */ | |
587 | stw r3,0(r2) /* update PTE (accessed bit) */ | |
588 | /* Convert linux-style PTE to low word of PPC-style PTE */ | |
589 | rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ | |
590 | rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ | |
591 | and r1,r1,r2 /* writable if _RW and _DIRTY */ | |
592 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | |
593 | rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ | |
a4bd6a93 | 594 | ori r1,r1,0xe04 /* clear out reserved bits */ |
14cf11af | 595 | andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ |
345953cf KG |
596 | BEGIN_FTR_SECTION |
597 | rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ | |
598 | END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) | |
14cf11af PM |
599 | mtspr SPRN_RPA,r1 |
600 | mfspr r3,SPRN_DMISS | |
601 | tlbld r3 | |
602 | mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ | |
603 | mtcrf 0x80,r3 | |
604 | rfi | |
605 | DataAddressInvalid: | |
606 | mfspr r3,SPRN_SRR1 | |
607 | rlwinm r1,r3,9,6,6 /* Get load/store bit */ | |
608 | addis r1,r1,0x2000 | |
609 | mtspr SPRN_DSISR,r1 | |
610 | mtctr r0 /* Restore CTR */ | |
611 | andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ | |
612 | mtspr SPRN_SRR1,r2 | |
613 | mfspr r1,SPRN_DMISS /* Get failing address */ | |
614 | rlwinm. r2,r2,0,31,31 /* Check for little endian access */ | |
615 | beq 20f /* Jump if big endian */ | |
616 | xori r1,r1,3 | |
617 | 20: mtspr SPRN_DAR,r1 /* Set fault address */ | |
618 | mfmsr r0 /* Restore "normal" registers */ | |
619 | xoris r0,r0,MSR_TGPR>>16 | |
620 | mtcrf 0x80,r3 /* Restore CR0 */ | |
621 | mtmsr r0 | |
622 | b DataAccess | |
623 | ||
624 | /* | |
625 | * Handle TLB miss for DATA Store on 603/603e | |
626 | */ | |
627 | . = 0x1200 | |
628 | DataStoreTLBMiss: | |
629 | /* | |
630 | * r0: stored ctr | |
631 | * r1: linux style pte ( later becomes ppc hardware pte ) | |
632 | * r2: ptr to linux-style pte | |
633 | * r3: scratch | |
634 | */ | |
635 | mfctr r0 | |
636 | /* Get PTE (linux-style) and check access */ | |
637 | mfspr r3,SPRN_DMISS | |
8a13c4f9 KG |
638 | lis r1,PAGE_OFFSET@h /* check if kernel address */ |
639 | cmplw 0,r1,r3 | |
14cf11af PM |
640 | mfspr r2,SPRN_SPRG3 |
641 | li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ | |
642 | lwz r2,PGDIR(r2) | |
8a13c4f9 | 643 | bge- 112f |
bde6c6e1 SW |
644 | mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ |
645 | rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ | |
14cf11af PM |
646 | lis r2,swapper_pg_dir@ha /* if kernel address, use */ |
647 | addi r2,r2,swapper_pg_dir@l /* kernel page table */ | |
14cf11af PM |
648 | 112: tophys(r2,r2) |
649 | rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ | |
650 | lwz r2,0(r2) /* get pmd entry */ | |
651 | rlwinm. r2,r2,0,0,19 /* extract address of pte page */ | |
652 | beq- DataAddressInvalid /* return if no mapping */ | |
653 | rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ | |
654 | lwz r3,0(r2) /* get linux-style pte */ | |
655 | andc. r1,r1,r3 /* check access & ~permission */ | |
656 | bne- DataAddressInvalid /* return if access not permitted */ | |
657 | ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY | |
658 | /* | |
659 | * NOTE! We are assuming this is not an SMP system, otherwise | |
660 | * we would need to update the pte atomically with lwarx/stwcx. | |
661 | */ | |
662 | stw r3,0(r2) /* update PTE (accessed/dirty bits) */ | |
663 | /* Convert linux-style PTE to low word of PPC-style PTE */ | |
664 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | |
a4bd6a93 | 665 | li r1,0xe05 /* clear out reserved bits & PP lsb */ |
14cf11af | 666 | andc r1,r3,r1 /* PP = user? 2: 0 */ |
345953cf KG |
667 | BEGIN_FTR_SECTION |
668 | rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ | |
669 | END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) | |
14cf11af PM |
670 | mtspr SPRN_RPA,r1 |
671 | mfspr r3,SPRN_DMISS | |
672 | tlbld r3 | |
673 | mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ | |
674 | mtcrf 0x80,r3 | |
675 | rfi | |
676 | ||
677 | #ifndef CONFIG_ALTIVEC | |
dc1c1ca3 | 678 | #define altivec_assist_exception unknown_exception |
14cf11af PM |
679 | #endif |
680 | ||
dc1c1ca3 | 681 | EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE) |
14cf11af | 682 | EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE) |
dc1c1ca3 | 683 | EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE) |
dc1c1ca3 | 684 | EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE) |
14cf11af | 685 | EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) |
dc1c1ca3 | 686 | EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE) |
dc1c1ca3 SR |
687 | EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE) |
688 | EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE) | |
689 | EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE) | |
690 | EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE) | |
691 | EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE) | |
692 | EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE) | |
693 | EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE) | |
14cf11af | 694 | EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE) |
dc1c1ca3 SR |
695 | EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE) |
696 | EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE) | |
697 | EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE) | |
698 | EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE) | |
699 | EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE) | |
700 | EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE) | |
701 | EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE) | |
702 | EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE) | |
703 | EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE) | |
704 | EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE) | |
705 | EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE) | |
706 | EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE) | |
707 | EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE) | |
708 | EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE) | |
709 | EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE) | |
14cf11af PM |
710 | |
711 | .globl mol_trampoline | |
712 | .set mol_trampoline, i0x2f00 | |
713 | ||
714 | . = 0x3000 | |
715 | ||
716 | AltiVecUnavailable: | |
717 | EXCEPTION_PROLOG | |
718 | #ifdef CONFIG_ALTIVEC | |
719 | bne load_up_altivec /* if from user, just load it up */ | |
720 | #endif /* CONFIG_ALTIVEC */ | |
f1434a48 | 721 | addi r3,r1,STACK_FRAME_OVERHEAD |
dc1c1ca3 | 722 | EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception) |
14cf11af | 723 | |
555d97ac AF |
724 | PerformanceMonitor: |
725 | EXCEPTION_PROLOG | |
726 | addi r3,r1,STACK_FRAME_OVERHEAD | |
727 | EXC_XFER_STD(0xf00, performance_monitor_exception) | |
728 | ||
14cf11af PM |
729 | #ifdef CONFIG_ALTIVEC |
730 | /* Note that the AltiVec support is closely modeled after the FP | |
731 | * support. Changes to one are likely to be applicable to the | |
732 | * other! */ | |
733 | load_up_altivec: | |
734 | /* | |
735 | * Disable AltiVec for the task which had AltiVec previously, | |
736 | * and save its AltiVec registers in its thread_struct. | |
737 | * Enables AltiVec for use in the kernel on return. | |
738 | * On SMP we know the AltiVec units are free, since we give it up every | |
739 | * switch. -- Kumar | |
740 | */ | |
741 | mfmsr r5 | |
742 | oris r5,r5,MSR_VEC@h | |
743 | MTMSRD(r5) /* enable use of AltiVec now */ | |
744 | isync | |
745 | /* | |
746 | * For SMP, we don't do lazy AltiVec switching because it just gets too | |
747 | * horrendously complex, especially when a task switches from one CPU | |
748 | * to another. Instead we call giveup_altivec in switch_to. | |
749 | */ | |
750 | #ifndef CONFIG_SMP | |
751 | tophys(r6,0) | |
752 | addis r3,r6,last_task_used_altivec@ha | |
753 | lwz r4,last_task_used_altivec@l(r3) | |
754 | cmpwi 0,r4,0 | |
755 | beq 1f | |
756 | add r4,r4,r6 | |
757 | addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ | |
758 | SAVE_32VRS(0,r10,r4) | |
759 | mfvscr vr0 | |
760 | li r10,THREAD_VSCR | |
761 | stvx vr0,r10,r4 | |
762 | lwz r5,PT_REGS(r4) | |
763 | add r5,r5,r6 | |
764 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
765 | lis r10,MSR_VEC@h | |
766 | andc r4,r4,r10 /* disable altivec for previous task */ | |
767 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
768 | 1: | |
769 | #endif /* CONFIG_SMP */ | |
770 | /* enable use of AltiVec after return */ | |
771 | oris r9,r9,MSR_VEC@h | |
772 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | |
773 | li r4,1 | |
774 | li r10,THREAD_VSCR | |
775 | stw r4,THREAD_USED_VR(r5) | |
776 | lvx vr0,r10,r5 | |
777 | mtvscr vr0 | |
778 | REST_32VRS(0,r10,r5) | |
779 | #ifndef CONFIG_SMP | |
780 | subi r4,r5,THREAD | |
781 | sub r4,r4,r6 | |
782 | stw r4,last_task_used_altivec@l(r3) | |
783 | #endif /* CONFIG_SMP */ | |
784 | /* restore registers and return */ | |
785 | /* we haven't used ctr or xer or lr */ | |
786 | b fast_exception_return | |
787 | ||
14cf11af PM |
788 | /* |
789 | * giveup_altivec(tsk) | |
790 | * Disable AltiVec for the task given as the argument, | |
791 | * and save the AltiVec registers in its thread_struct. | |
792 | * Enables AltiVec for use in the kernel on return. | |
793 | */ | |
794 | ||
795 | .globl giveup_altivec | |
796 | giveup_altivec: | |
797 | mfmsr r5 | |
798 | oris r5,r5,MSR_VEC@h | |
799 | SYNC | |
800 | MTMSRD(r5) /* enable use of AltiVec now */ | |
801 | isync | |
802 | cmpwi 0,r3,0 | |
803 | beqlr- /* if no previous owner, done */ | |
804 | addi r3,r3,THREAD /* want THREAD of task */ | |
805 | lwz r5,PT_REGS(r3) | |
806 | cmpwi 0,r5,0 | |
807 | SAVE_32VRS(0, r4, r3) | |
808 | mfvscr vr0 | |
809 | li r4,THREAD_VSCR | |
810 | stvx vr0,r4,r3 | |
811 | beq 1f | |
812 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
813 | lis r3,MSR_VEC@h | |
814 | andc r4,r4,r3 /* disable AltiVec for previous task */ | |
815 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
816 | 1: | |
817 | #ifndef CONFIG_SMP | |
818 | li r5,0 | |
819 | lis r4,last_task_used_altivec@ha | |
820 | stw r5,last_task_used_altivec@l(r4) | |
821 | #endif /* CONFIG_SMP */ | |
822 | blr | |
823 | #endif /* CONFIG_ALTIVEC */ | |
824 | ||
825 | /* | |
826 | * This code is jumped to from the startup code to copy | |
ccdcef72 | 827 | * the kernel image to physical address PHYSICAL_START. |
14cf11af PM |
828 | */ |
829 | relocate_kernel: | |
830 | addis r9,r26,klimit@ha /* fetch klimit */ | |
831 | lwz r25,klimit@l(r9) | |
832 | addis r25,r25,-KERNELBASE@h | |
ccdcef72 | 833 | lis r3,PHYSICAL_START@h /* Destination base address */ |
14cf11af PM |
834 | li r6,0 /* Destination offset */ |
835 | li r5,0x4000 /* # bytes of memory to copy */ | |
836 | bl copy_and_flush /* copy the first 0x4000 bytes */ | |
837 | addi r0,r3,4f@l /* jump to the address of 4f */ | |
838 | mtctr r0 /* in copy and do the rest. */ | |
839 | bctr /* jump to the copy */ | |
840 | 4: mr r5,r25 | |
841 | bl copy_and_flush /* copy the rest */ | |
842 | b turn_on_mmu | |
843 | ||
844 | /* | |
845 | * Copy routine used to copy the kernel to start at physical address 0 | |
846 | * and flush and invalidate the caches as needed. | |
847 | * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset | |
848 | * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. | |
849 | */ | |
748a7683 | 850 | _ENTRY(copy_and_flush) |
14cf11af PM |
851 | addi r5,r5,-4 |
852 | addi r6,r6,-4 | |
7dffb720 | 853 | 4: li r0,L1_CACHE_BYTES/4 |
14cf11af PM |
854 | mtctr r0 |
855 | 3: addi r6,r6,4 /* copy a cache line */ | |
856 | lwzx r0,r6,r4 | |
857 | stwx r0,r6,r3 | |
858 | bdnz 3b | |
859 | dcbst r6,r3 /* write it to memory */ | |
860 | sync | |
861 | icbi r6,r3 /* flush the icache line */ | |
862 | cmplw 0,r6,r5 | |
863 | blt 4b | |
864 | sync /* additional sync needed on g4 */ | |
865 | isync | |
866 | addi r5,r5,4 | |
867 | addi r6,r6,4 | |
868 | blr | |
869 | ||
14cf11af PM |
870 | #ifdef CONFIG_SMP |
871 | #ifdef CONFIG_GEMINI | |
872 | .globl __secondary_start_gemini | |
873 | __secondary_start_gemini: | |
874 | mfspr r4,SPRN_HID0 | |
875 | ori r4,r4,HID0_ICFI | |
876 | li r3,0 | |
877 | ori r3,r3,HID0_ICE | |
878 | andc r4,r4,r3 | |
879 | mtspr SPRN_HID0,r4 | |
880 | sync | |
881 | b __secondary_start | |
882 | #endif /* CONFIG_GEMINI */ | |
883 | ||
ee0339f2 JL |
884 | .globl __secondary_start_mpc86xx |
885 | __secondary_start_mpc86xx: | |
886 | mfspr r3, SPRN_PIR | |
887 | stw r3, __secondary_hold_acknowledge@l(0) | |
888 | mr r24, r3 /* cpu # */ | |
889 | b __secondary_start | |
890 | ||
14cf11af PM |
891 | .globl __secondary_start_pmac_0 |
892 | __secondary_start_pmac_0: | |
893 | /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ | |
894 | li r24,0 | |
895 | b 1f | |
896 | li r24,1 | |
897 | b 1f | |
898 | li r24,2 | |
899 | b 1f | |
900 | li r24,3 | |
901 | 1: | |
902 | /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0 | |
903 | set to map the 0xf0000000 - 0xffffffff region */ | |
904 | mfmsr r0 | |
905 | rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ | |
906 | SYNC | |
907 | mtmsr r0 | |
908 | isync | |
909 | ||
910 | .globl __secondary_start | |
911 | __secondary_start: | |
14cf11af PM |
912 | /* Copy some CPU settings from CPU 0 */ |
913 | bl __restore_cpu_setup | |
914 | ||
915 | lis r3,-KERNELBASE@h | |
916 | mr r4,r24 | |
14cf11af PM |
917 | bl call_setup_cpu /* Call setup_cpu for this CPU */ |
918 | #ifdef CONFIG_6xx | |
919 | lis r3,-KERNELBASE@h | |
920 | bl init_idle_6xx | |
921 | #endif /* CONFIG_6xx */ | |
14cf11af PM |
922 | |
923 | /* get current_thread_info and current */ | |
924 | lis r1,secondary_ti@ha | |
925 | tophys(r1,r1) | |
926 | lwz r1,secondary_ti@l(r1) | |
927 | tophys(r2,r1) | |
928 | lwz r2,TI_TASK(r2) | |
929 | ||
930 | /* stack */ | |
931 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | |
932 | li r0,0 | |
933 | tophys(r3,r1) | |
934 | stw r0,0(r3) | |
935 | ||
936 | /* load up the MMU */ | |
937 | bl load_up_mmu | |
938 | ||
939 | /* ptr to phys current thread */ | |
940 | tophys(r4,r2) | |
941 | addi r4,r4,THREAD /* phys address of our thread_struct */ | |
942 | CLR_TOP32(r4) | |
943 | mtspr SPRN_SPRG3,r4 | |
944 | li r3,0 | |
945 | mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */ | |
946 | ||
947 | /* enable MMU and jump to start_secondary */ | |
948 | li r4,MSR_KERNEL | |
949 | FIX_SRR1(r4,r5) | |
950 | lis r3,start_secondary@h | |
951 | ori r3,r3,start_secondary@l | |
952 | mtspr SPRN_SRR0,r3 | |
953 | mtspr SPRN_SRR1,r4 | |
954 | SYNC | |
955 | RFI | |
956 | #endif /* CONFIG_SMP */ | |
957 | ||
958 | /* | |
959 | * Those generic dummy functions are kept for CPUs not | |
960 | * included in CONFIG_6xx | |
961 | */ | |
187a0067 | 962 | #if !defined(CONFIG_6xx) |
748a7683 | 963 | _ENTRY(__save_cpu_setup) |
14cf11af | 964 | blr |
748a7683 | 965 | _ENTRY(__restore_cpu_setup) |
14cf11af | 966 | blr |
187a0067 | 967 | #endif /* !defined(CONFIG_6xx) */ |
14cf11af PM |
968 | |
969 | ||
970 | /* | |
971 | * Load stuff into the MMU. Intended to be called with | |
972 | * IR=0 and DR=0. | |
973 | */ | |
974 | load_up_mmu: | |
975 | sync /* Force all PTE updates to finish */ | |
976 | isync | |
977 | tlbia /* Clear all TLB entries */ | |
978 | sync /* wait for tlbia/tlbie to finish */ | |
979 | TLBSYNC /* ... on all CPUs */ | |
980 | /* Load the SDR1 register (hash table base & size) */ | |
981 | lis r6,_SDR1@ha | |
982 | tophys(r6,r6) | |
983 | lwz r6,_SDR1@l(r6) | |
984 | mtspr SPRN_SDR1,r6 | |
14cf11af PM |
985 | li r0,16 /* load up segment register values */ |
986 | mtctr r0 /* for context 0 */ | |
987 | lis r3,0x2000 /* Ku = 1, VSID = 0 */ | |
988 | li r4,0 | |
989 | 3: mtsrin r3,r4 | |
990 | addi r3,r3,0x111 /* increment VSID */ | |
991 | addis r4,r4,0x1000 /* address of next segment */ | |
992 | bdnz 3b | |
187a0067 | 993 | |
14cf11af PM |
994 | /* Load the BAT registers with the values set up by MMU_init. |
995 | MMU_init takes care of whether we're on a 601 or not. */ | |
996 | mfpvr r3 | |
997 | srwi r3,r3,16 | |
998 | cmpwi r3,1 | |
999 | lis r3,BATS@ha | |
1000 | addi r3,r3,BATS@l | |
1001 | tophys(r3,r3) | |
1002 | LOAD_BAT(0,r3,r4,r5) | |
1003 | LOAD_BAT(1,r3,r4,r5) | |
1004 | LOAD_BAT(2,r3,r4,r5) | |
1005 | LOAD_BAT(3,r3,r4,r5) | |
7c03d653 | 1006 | BEGIN_MMU_FTR_SECTION |
ee0339f2 JL |
1007 | LOAD_BAT(4,r3,r4,r5) |
1008 | LOAD_BAT(5,r3,r4,r5) | |
1009 | LOAD_BAT(6,r3,r4,r5) | |
1010 | LOAD_BAT(7,r3,r4,r5) | |
7c03d653 | 1011 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) |
14cf11af PM |
1012 | blr |
1013 | ||
1014 | /* | |
1015 | * This is where the main kernel code starts. | |
1016 | */ | |
1017 | start_here: | |
1018 | /* ptr to current */ | |
1019 | lis r2,init_task@h | |
1020 | ori r2,r2,init_task@l | |
1021 | /* Set up for using our exception vectors */ | |
1022 | /* ptr to phys current thread */ | |
1023 | tophys(r4,r2) | |
1024 | addi r4,r4,THREAD /* init task's THREAD */ | |
1025 | CLR_TOP32(r4) | |
1026 | mtspr SPRN_SPRG3,r4 | |
1027 | li r3,0 | |
1028 | mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */ | |
1029 | ||
1030 | /* stack */ | |
1031 | lis r1,init_thread_union@ha | |
1032 | addi r1,r1,init_thread_union@l | |
1033 | li r0,0 | |
1034 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | |
1035 | /* | |
187a0067 | 1036 | * Do early platform-specific initialization, |
14cf11af PM |
1037 | * and set up the MMU. |
1038 | */ | |
1039 | mr r3,r31 | |
1040 | mr r4,r30 | |
14cf11af | 1041 | bl machine_init |
22c841c9 | 1042 | bl __save_cpu_setup |
14cf11af PM |
1043 | bl MMU_init |
1044 | ||
14cf11af PM |
1045 | /* |
1046 | * Go back to running unmapped so we can load up new values | |
1047 | * for SDR1 (hash table pointer) and the segment registers | |
1048 | * and change to using our exception vectors. | |
1049 | */ | |
1050 | lis r4,2f@h | |
1051 | ori r4,r4,2f@l | |
1052 | tophys(r4,r4) | |
1053 | li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) | |
1054 | FIX_SRR1(r3,r5) | |
1055 | mtspr SPRN_SRR0,r4 | |
1056 | mtspr SPRN_SRR1,r3 | |
1057 | SYNC | |
1058 | RFI | |
1059 | /* Load up the kernel context */ | |
1060 | 2: bl load_up_mmu | |
1061 | ||
1062 | #ifdef CONFIG_BDI_SWITCH | |
1063 | /* Add helper information for the Abatron bdiGDB debugger. | |
1064 | * We do this here because we know the mmu is disabled, and | |
1065 | * will be enabled for real in just a few instructions. | |
1066 | */ | |
1067 | lis r5, abatron_pteptrs@h | |
1068 | ori r5, r5, abatron_pteptrs@l | |
1069 | stw r5, 0xf0(r0) /* This much match your Abatron config */ | |
1070 | lis r6, swapper_pg_dir@h | |
1071 | ori r6, r6, swapper_pg_dir@l | |
1072 | tophys(r5, r5) | |
1073 | stw r6, 0(r5) | |
1074 | #endif /* CONFIG_BDI_SWITCH */ | |
1075 | ||
1076 | /* Now turn on the MMU for real! */ | |
1077 | li r4,MSR_KERNEL | |
1078 | FIX_SRR1(r4,r5) | |
1079 | lis r3,start_kernel@h | |
1080 | ori r3,r3,start_kernel@l | |
1081 | mtspr SPRN_SRR0,r3 | |
1082 | mtspr SPRN_SRR1,r4 | |
1083 | SYNC | |
1084 | RFI | |
1085 | ||
1086 | /* | |
5e696617 BH |
1087 | * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next); |
1088 | * | |
14cf11af PM |
1089 | * Set up the segment registers for a new context. |
1090 | */ | |
5e696617 BH |
1091 | _ENTRY(switch_mmu_context) |
1092 | lwz r3,MMCONTEXTID(r4) | |
1093 | cmpwi cr0,r3,0 | |
1094 | blt- 4f | |
14cf11af PM |
1095 | mulli r3,r3,897 /* multiply context by skew factor */ |
1096 | rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ | |
1097 | addis r3,r3,0x6000 /* Set Ks, Ku bits */ | |
1098 | li r0,NUM_USER_SEGMENTS | |
1099 | mtctr r0 | |
1100 | ||
1101 | #ifdef CONFIG_BDI_SWITCH | |
1102 | /* Context switch the PTE pointer for the Abatron BDI2000. | |
1103 | * The PGDIR is passed as second argument. | |
1104 | */ | |
5e696617 | 1105 | lwz r4,MM_PGD(r4) |
14cf11af PM |
1106 | lis r5, KERNELBASE@h |
1107 | lwz r5, 0xf0(r5) | |
1108 | stw r4, 0x4(r5) | |
1109 | #endif | |
1110 | li r4,0 | |
1111 | isync | |
1112 | 3: | |
14cf11af PM |
1113 | mtsrin r3,r4 |
1114 | addi r3,r3,0x111 /* next VSID */ | |
1115 | rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */ | |
1116 | addis r4,r4,0x1000 /* address of next segment */ | |
1117 | bdnz 3b | |
1118 | sync | |
1119 | isync | |
1120 | blr | |
5e696617 BH |
1121 | 4: trap |
1122 | EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0 | |
1123 | blr | |
14cf11af PM |
1124 | |
1125 | /* | |
1126 | * An undocumented "feature" of 604e requires that the v bit | |
1127 | * be cleared before changing BAT values. | |
1128 | * | |
1129 | * Also, newer IBM firmware does not clear bat3 and 4 so | |
1130 | * this makes sure it's done. | |
1131 | * -- Cort | |
1132 | */ | |
1133 | clear_bats: | |
1134 | li r10,0 | |
1135 | mfspr r9,SPRN_PVR | |
1136 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | |
1137 | cmpwi r9, 1 | |
1138 | beq 1f | |
1139 | ||
1140 | mtspr SPRN_DBAT0U,r10 | |
1141 | mtspr SPRN_DBAT0L,r10 | |
1142 | mtspr SPRN_DBAT1U,r10 | |
1143 | mtspr SPRN_DBAT1L,r10 | |
1144 | mtspr SPRN_DBAT2U,r10 | |
1145 | mtspr SPRN_DBAT2L,r10 | |
1146 | mtspr SPRN_DBAT3U,r10 | |
1147 | mtspr SPRN_DBAT3L,r10 | |
1148 | 1: | |
1149 | mtspr SPRN_IBAT0U,r10 | |
1150 | mtspr SPRN_IBAT0L,r10 | |
1151 | mtspr SPRN_IBAT1U,r10 | |
1152 | mtspr SPRN_IBAT1L,r10 | |
1153 | mtspr SPRN_IBAT2U,r10 | |
1154 | mtspr SPRN_IBAT2L,r10 | |
1155 | mtspr SPRN_IBAT3U,r10 | |
1156 | mtspr SPRN_IBAT3L,r10 | |
7c03d653 | 1157 | BEGIN_MMU_FTR_SECTION |
14cf11af PM |
1158 | /* Here's a tweak: at this point, CPU setup have |
1159 | * not been called yet, so HIGH_BAT_EN may not be | |
1160 | * set in HID0 for the 745x processors. However, it | |
1161 | * seems that doesn't affect our ability to actually | |
1162 | * write to these SPRs. | |
1163 | */ | |
1164 | mtspr SPRN_DBAT4U,r10 | |
1165 | mtspr SPRN_DBAT4L,r10 | |
1166 | mtspr SPRN_DBAT5U,r10 | |
1167 | mtspr SPRN_DBAT5L,r10 | |
1168 | mtspr SPRN_DBAT6U,r10 | |
1169 | mtspr SPRN_DBAT6L,r10 | |
1170 | mtspr SPRN_DBAT7U,r10 | |
1171 | mtspr SPRN_DBAT7L,r10 | |
1172 | mtspr SPRN_IBAT4U,r10 | |
1173 | mtspr SPRN_IBAT4L,r10 | |
1174 | mtspr SPRN_IBAT5U,r10 | |
1175 | mtspr SPRN_IBAT5L,r10 | |
1176 | mtspr SPRN_IBAT6U,r10 | |
1177 | mtspr SPRN_IBAT6L,r10 | |
1178 | mtspr SPRN_IBAT7U,r10 | |
1179 | mtspr SPRN_IBAT7L,r10 | |
7c03d653 | 1180 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) |
14cf11af PM |
1181 | blr |
1182 | ||
1183 | flush_tlbs: | |
1184 | lis r10, 0x40 | |
1185 | 1: addic. r10, r10, -0x1000 | |
1186 | tlbie r10 | |
9acd57ca | 1187 | bgt 1b |
14cf11af PM |
1188 | sync |
1189 | blr | |
1190 | ||
1191 | mmu_off: | |
1192 | addi r4, r3, __after_mmu_off - _start | |
1193 | mfmsr r3 | |
1194 | andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ | |
1195 | beqlr | |
1196 | andc r3,r3,r0 | |
1197 | mtspr SPRN_SRR0,r4 | |
1198 | mtspr SPRN_SRR1,r3 | |
1199 | sync | |
1200 | RFI | |
1201 | ||
14cf11af PM |
1202 | /* |
1203 | * Use the first pair of BAT registers to map the 1st 16MB | |
ccdcef72 | 1204 | * of RAM to PAGE_OFFSET. From this point on we can't safely |
14cf11af PM |
1205 | * call OF any more. |
1206 | */ | |
1207 | initial_bats: | |
ccdcef72 | 1208 | lis r11,PAGE_OFFSET@h |
14cf11af PM |
1209 | mfspr r9,SPRN_PVR |
1210 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | |
1211 | cmpwi 0,r9,1 | |
1212 | bne 4f | |
1213 | ori r11,r11,4 /* set up BAT registers for 601 */ | |
1214 | li r8,0x7f /* valid, block length = 8MB */ | |
1215 | oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */ | |
1216 | oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */ | |
1217 | mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ | |
1218 | mtspr SPRN_IBAT0L,r8 /* lower BAT register */ | |
1219 | mtspr SPRN_IBAT1U,r9 | |
1220 | mtspr SPRN_IBAT1L,r10 | |
1221 | isync | |
1222 | blr | |
14cf11af PM |
1223 | |
1224 | 4: tophys(r8,r11) | |
1225 | #ifdef CONFIG_SMP | |
1226 | ori r8,r8,0x12 /* R/W access, M=1 */ | |
1227 | #else | |
1228 | ori r8,r8,2 /* R/W access */ | |
1229 | #endif /* CONFIG_SMP */ | |
14cf11af | 1230 | ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */ |
14cf11af | 1231 | |
14cf11af PM |
1232 | mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */ |
1233 | mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */ | |
1234 | mtspr SPRN_IBAT0L,r8 | |
1235 | mtspr SPRN_IBAT0U,r11 | |
1236 | isync | |
1237 | blr | |
1238 | ||
14cf11af | 1239 | |
f21f49ea | 1240 | #ifdef CONFIG_BOOTX_TEXT |
51d3082f BH |
1241 | setup_disp_bat: |
1242 | /* | |
1243 | * setup the display bat prepared for us in prom.c | |
1244 | */ | |
1245 | mflr r8 | |
1246 | bl reloc_offset | |
1247 | mtlr r8 | |
1248 | addis r8,r3,disp_BAT@ha | |
1249 | addi r8,r8,disp_BAT@l | |
1250 | cmpwi cr0,r8,0 | |
1251 | beqlr | |
1252 | lwz r11,0(r8) | |
1253 | lwz r8,4(r8) | |
1254 | mfspr r9,SPRN_PVR | |
1255 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | |
1256 | cmpwi 0,r9,1 | |
1257 | beq 1f | |
1258 | mtspr SPRN_DBAT3L,r8 | |
1259 | mtspr SPRN_DBAT3U,r11 | |
1260 | blr | |
1261 | 1: mtspr SPRN_IBAT3L,r8 | |
1262 | mtspr SPRN_IBAT3U,r11 | |
1263 | blr | |
f21f49ea | 1264 | #endif /* CONFIG_BOOTX_TEXT */ |
51d3082f | 1265 | |
c374e00e SW |
1266 | #ifdef CONFIG_PPC_EARLY_DEBUG_CPM |
1267 | setup_cpm_bat: | |
1268 | lis r8, 0xf000 | |
1269 | ori r8, r8, 0x002a | |
1270 | mtspr SPRN_DBAT1L, r8 | |
1271 | ||
1272 | lis r11, 0xf000 | |
1273 | ori r11, r11, (BL_1M << 2) | 2 | |
1274 | mtspr SPRN_DBAT1U, r11 | |
1275 | ||
1276 | blr | |
1277 | #endif | |
1278 | ||
14cf11af PM |
1279 | #ifdef CONFIG_8260 |
1280 | /* Jump into the system reset for the rom. | |
1281 | * We first disable the MMU, and then jump to the ROM reset address. | |
1282 | * | |
1283 | * r3 is the board info structure, r4 is the location for starting. | |
1284 | * I use this for building a small kernel that can load other kernels, | |
1285 | * rather than trying to write or rely on a rom monitor that can tftp load. | |
1286 | */ | |
1287 | .globl m8260_gorom | |
1288 | m8260_gorom: | |
1289 | mfmsr r0 | |
1290 | rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ | |
1291 | sync | |
1292 | mtmsr r0 | |
1293 | sync | |
1294 | mfspr r11, SPRN_HID0 | |
1295 | lis r10, 0 | |
1296 | ori r10,r10,HID0_ICE|HID0_DCE | |
1297 | andc r11, r11, r10 | |
1298 | mtspr SPRN_HID0, r11 | |
1299 | isync | |
1300 | li r5, MSR_ME|MSR_RI | |
1301 | lis r6,2f@h | |
1302 | addis r6,r6,-KERNELBASE@h | |
1303 | ori r6,r6,2f@l | |
1304 | mtspr SPRN_SRR0,r6 | |
1305 | mtspr SPRN_SRR1,r5 | |
1306 | isync | |
1307 | sync | |
1308 | rfi | |
1309 | 2: | |
1310 | mtlr r4 | |
1311 | blr | |
1312 | #endif | |
1313 | ||
1314 | ||
1315 | /* | |
1316 | * We put a few things here that have to be page-aligned. | |
1317 | * This stuff goes at the beginning of the data segment, | |
1318 | * which is page-aligned. | |
1319 | */ | |
1320 | .data | |
1321 | .globl sdata | |
1322 | sdata: | |
1323 | .globl empty_zero_page | |
1324 | empty_zero_page: | |
1325 | .space 4096 | |
1326 | ||
1327 | .globl swapper_pg_dir | |
1328 | swapper_pg_dir: | |
bee86f14 | 1329 | .space PGD_TABLE_SIZE |
14cf11af | 1330 | |
14cf11af PM |
1331 | .globl intercept_table |
1332 | intercept_table: | |
1333 | .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700 | |
1334 | .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0 | |
1335 | .long 0, 0, 0, i0x1300, 0, 0, 0, 0 | |
1336 | .long 0, 0, 0, 0, 0, 0, 0, 0 | |
1337 | .long 0, 0, 0, 0, 0, 0, 0, 0 | |
1338 | .long 0, 0, 0, 0, 0, 0, 0, 0 | |
1339 | ||
1340 | /* Room for two PTE pointers, usually the kernel and current user pointers | |
1341 | * to their respective root page table. | |
1342 | */ | |
1343 | abatron_pteptrs: | |
1344 | .space 8 |