Merge branch 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / arch / powerpc / kernel / head_44x.S
CommitLineData
14cf11af 1/*
14cf11af
PM
2 * Kernel execution entry point code.
3 *
4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
5 * Initial PowerPC version.
6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Rewritten for PReP
8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
9 * Low-level exception handers, MMU support, and rewrite.
10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
11 * PowerPC 8xx modifications.
12 * Copyright (c) 1998-1999 TiVo, Inc.
13 * PowerPC 403GCX modifications.
14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
15 * PowerPC 403GCX/405GP modifications.
16 * Copyright 2000 MontaVista Software Inc.
17 * PPC405 modifications
18 * PowerPC 403GCX/405GP modifications.
19 * Author: MontaVista Software, Inc.
20 * frank_rowand@mvista.com or source@mvista.com
21 * debbie_chu@mvista.com
22 * Copyright 2002-2005 MontaVista Software, Inc.
23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
24 *
25 * This program is free software; you can redistribute it and/or modify it
26 * under the terms of the GNU General Public License as published by the
27 * Free Software Foundation; either version 2 of the License, or (at your
28 * option) any later version.
29 */
30
e7039845 31#include <linux/init.h>
14cf11af
PM
32#include <asm/processor.h>
33#include <asm/page.h>
34#include <asm/mmu.h>
35#include <asm/pgtable.h>
14cf11af
PM
36#include <asm/cputable.h>
37#include <asm/thread_info.h>
38#include <asm/ppc_asm.h>
39#include <asm/asm-offsets.h>
46f52210 40#include <asm/ptrace.h>
e7f75ad0 41#include <asm/synch.h>
14cf11af
PM
42#include "head_booke.h"
43
44
45/* As with the other PowerPC ports, it is expected that when code
46 * execution begins here, the following registers contain valid, yet
47 * optional, information:
48 *
49 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
50 * r4 - Starting address of the init RAM disk
51 * r5 - Ending address of the init RAM disk
52 * r6 - Start of kernel command line string (e.g. "mem=128")
53 * r7 - End of kernel command line string
54 *
55 */
e7039845 56 __HEAD
748a7683
KG
57_ENTRY(_stext);
58_ENTRY(_start);
14cf11af
PM
59 /*
60 * Reserve a word at a fixed location to store the address
61 * of abatron_pteptrs
62 */
63 nop
6dece0eb 64 mr r31,r3 /* save device tree ptr */
14cf11af
PM
65 li r24,0 /* CPU number */
66
26ecb6c4
SP
67#ifdef CONFIG_RELOCATABLE
68/*
69 * Relocate ourselves to the current runtime address.
70 * This is called only by the Boot CPU.
71 * "relocate" is called with our current runtime virutal
72 * address.
73 * r21 will be loaded with the physical runtime address of _stext
74 */
75 bl 0f /* Get our runtime address */
760: mflr r21 /* Make it accessible */
77 addis r21,r21,(_stext - 0b)@ha
78 addi r21,r21,(_stext - 0b)@l /* Get our current runtime base */
79
80 /*
81 * We have the runtime (virutal) address of our base.
82 * We calculate our shift of offset from a 256M page.
83 * We could map the 256M page we belong to at PAGE_OFFSET and
84 * get going from there.
85 */
86 lis r4,KERNELBASE@h
87 ori r4,r4,KERNELBASE@l
88 rlwinm r6,r21,0,4,31 /* r6 = PHYS_START % 256M */
89 rlwinm r5,r4,0,4,31 /* r5 = KERNELBASE % 256M */
90 subf r3,r5,r6 /* r3 = r6 - r5 */
91 add r3,r4,r3 /* Required Virutal Address */
92
93 bl relocate
94#endif
95
795033c3 96 bl init_cpu_state
14cf11af 97
14cf11af
PM
98 /*
99 * This is where the main kernel code starts.
100 */
101
102 /* ptr to current */
103 lis r2,init_task@h
104 ori r2,r2,init_task@l
105
106 /* ptr to current thread */
107 addi r4,r2,THREAD /* init task's THREAD */
ee43eb78 108 mtspr SPRN_SPRG_THREAD,r4
14cf11af
PM
109
110 /* stack */
111 lis r1,init_thread_union@h
112 ori r1,r1,init_thread_union@l
113 li r0,0
114 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
115
116 bl early_init
117
26ecb6c4
SP
118#ifdef CONFIG_RELOCATABLE
119 /*
120 * Relocatable kernel support based on processing of dynamic
121 * relocation entries.
122 *
123 * r25 will contain RPN/ERPN for the start address of memory
124 * r21 will contain the current offset of _stext
125 */
126 lis r3,kernstart_addr@ha
127 la r3,kernstart_addr@l(r3)
128
129 /*
130 * Compute the kernstart_addr.
131 * kernstart_addr => (r6,r8)
132 * kernstart_addr & ~0xfffffff => (r6,r7)
133 */
134 rlwinm r6,r25,0,28,31 /* ERPN. Bits 32-35 of Address */
135 rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */
136 rlwinm r8,r21,0,4,31 /* r8 = (_stext & 0xfffffff) */
137 or r8,r7,r8 /* Compute the lower 32bit of kernstart_addr */
138
139 /* Store kernstart_addr */
140 stw r6,0(r3) /* higher 32bit */
141 stw r8,4(r3) /* lower 32bit */
142
143 /*
144 * Compute the virt_phys_offset :
145 * virt_phys_offset = stext.run - kernstart_addr
146 *
147 * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff)
148 * When we relocate, we have :
149 *
150 * (kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff)
151 *
152 * hence:
153 * virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff)
154 *
155 */
156
157 /* KERNELBASE&~0xfffffff => (r4,r5) */
158 li r4, 0 /* higer 32bit */
159 lis r5,KERNELBASE@h
160 rlwinm r5,r5,0,0,3 /* Align to 256M, lower 32bit */
161
162 /*
163 * 64bit subtraction.
164 */
165 subfc r5,r7,r5
166 subfe r4,r6,r4
167
168 /* Store virt_phys_offset */
169 lis r3,virt_phys_offset@ha
170 la r3,virt_phys_offset@l(r3)
171
172 stw r4,0(r3)
173 stw r5,4(r3)
174
175#elif defined(CONFIG_DYNAMIC_MEMSTART)
9661534d 176 /*
0f890c8d
SP
177 * Mapping based, page aligned dynamic kernel loading.
178 *
9661534d
DK
179 * r25 will contain RPN/ERPN for the start address of memory
180 *
181 * Add the difference between KERNELBASE and PAGE_OFFSET to the
182 * start of physical memory to get kernstart_addr.
183 */
184 lis r3,kernstart_addr@ha
185 la r3,kernstart_addr@l(r3)
186
187 lis r4,KERNELBASE@h
188 ori r4,r4,KERNELBASE@l
189 lis r5,PAGE_OFFSET@h
190 ori r5,r5,PAGE_OFFSET@l
191 subf r4,r5,r4
192
193 rlwinm r6,r25,0,28,31 /* ERPN */
194 rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */
195 add r7,r7,r4
196
197 stw r6,0(r3)
198 stw r7,4(r3)
199#endif
200
14cf11af
PM
201/*
202 * Decide what sort of machine this is and initialize the MMU.
203 */
6dece0eb
SW
204 li r3,0
205 mr r4,r31
14cf11af
PM
206 bl machine_init
207 bl MMU_init
208
209 /* Setup PTE pointers for the Abatron bdiGDB */
210 lis r6, swapper_pg_dir@h
211 ori r6, r6, swapper_pg_dir@l
212 lis r5, abatron_pteptrs@h
213 ori r5, r5, abatron_pteptrs@l
214 lis r4, KERNELBASE@h
215 ori r4, r4, KERNELBASE@l
216 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
217 stw r6, 0(r5)
218
029b8f66
DK
219 /* Clear the Machine Check Syndrome Register */
220 li r0,0
221 mtspr SPRN_MCSR,r0
222
14cf11af
PM
223 /* Let's move on */
224 lis r4,start_kernel@h
225 ori r4,r4,start_kernel@l
226 lis r3,MSR_KERNEL@h
227 ori r3,r3,MSR_KERNEL@l
228 mtspr SPRN_SRR0,r4
229 mtspr SPRN_SRR1,r3
230 rfi /* change context and jump to start_kernel */
231
232/*
233 * Interrupt vector entry code
234 *
235 * The Book E MMUs are always on so we don't need to handle
236 * interrupts in real mode as with previous PPC processors. In
237 * this case we handle interrupts in the kernel virtual address
238 * space.
239 *
240 * Interrupt vectors are dynamically placed relative to the
241 * interrupt prefix as determined by the address of interrupt_base.
242 * The interrupt vectors offsets are programmed using the labels
243 * for each interrupt vector entry.
244 *
245 * Interrupt vectors must be aligned on a 16 byte boundary.
246 * We align on a 32 byte cache line boundary for good measure.
247 */
248
249interrupt_base:
250 /* Critical Input Interrupt */
cfac5784 251 CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
14cf11af
PM
252
253 /* Machine Check Interrupt */
cfac5784
SW
254 CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
255 machine_check_exception)
47c0bd1a 256 MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
14cf11af
PM
257
258 /* Data Storage Interrupt */
1bc54c03 259 DATA_STORAGE_EXCEPTION
14cf11af 260
1bc54c03 261 /* Instruction Storage Interrupt */
14cf11af
PM
262 INSTRUCTION_STORAGE_EXCEPTION
263
264 /* External Input Interrupt */
cfac5784
SW
265 EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, \
266 do_IRQ, EXC_XFER_LITE)
14cf11af
PM
267
268 /* Alignment Interrupt */
269 ALIGNMENT_EXCEPTION
270
271 /* Program Interrupt */
272 PROGRAM_EXCEPTION
273
274 /* Floating Point Unavailable Interrupt */
275#ifdef CONFIG_PPC_FPU
276 FP_UNAVAILABLE_EXCEPTION
277#else
cfac5784
SW
278 EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
279 FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
14cf11af 280#endif
14cf11af
PM
281 /* System Call Interrupt */
282 START_EXCEPTION(SystemCall)
cfac5784 283 NORMAL_EXCEPTION_PROLOG(BOOKE_INTERRUPT_SYSCALL)
14cf11af
PM
284 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
285
25985edc 286 /* Auxiliary Processor Unavailable Interrupt */
cfac5784
SW
287 EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
288 AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
14cf11af
PM
289
290 /* Decrementer Interrupt */
291 DECREMENTER_EXCEPTION
292
293 /* Fixed Internal Timer Interrupt */
294 /* TODO: Add FIT support */
cfac5784
SW
295 EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, \
296 unknown_exception, EXC_XFER_EE)
14cf11af
PM
297
298 /* Watchdog Timer Interrupt */
299 /* TODO: Add watchdog support */
300#ifdef CONFIG_BOOKE_WDT
cfac5784 301 CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException)
14cf11af 302#else
cfac5784 303 CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception)
14cf11af
PM
304#endif
305
306 /* Data TLB Error Interrupt */
e7f75ad0 307 START_EXCEPTION(DataTLBError44x)
ee43eb78
BH
308 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
309 mtspr SPRN_SPRG_WSCRATCH1, r11
310 mtspr SPRN_SPRG_WSCRATCH2, r12
311 mtspr SPRN_SPRG_WSCRATCH3, r13
14cf11af 312 mfcr r11
ee43eb78 313 mtspr SPRN_SPRG_WSCRATCH4, r11
14cf11af
PM
314 mfspr r10, SPRN_DEAR /* Get faulting address */
315
316 /* If we are faulting a kernel address, we have to use the
317 * kernel page tables.
318 */
8a13c4f9 319 lis r11, PAGE_OFFSET@h
14cf11af
PM
320 cmplw r10, r11
321 blt+ 3f
322 lis r11, swapper_pg_dir@h
323 ori r11, r11, swapper_pg_dir@l
324
325 mfspr r12,SPRN_MMUCR
326 rlwinm r12,r12,0,0,23 /* Clear TID */
327
328 b 4f
329
330 /* Get the PGD for the current thread */
3313:
ee43eb78 332 mfspr r11,SPRN_SPRG_THREAD
14cf11af
PM
333 lwz r11,PGDIR(r11)
334
335 /* Load PID into MMUCR TID */
336 mfspr r12,SPRN_MMUCR
337 mfspr r13,SPRN_PID /* Get PID */
338 rlwimi r12,r13,0,24,31 /* Set TID */
339
3404:
341 mtspr SPRN_MMUCR,r12
342
1bc54c03
BH
343 /* Mask of required permission bits. Note that while we
344 * do copy ESR:ST to _PAGE_RW position as trying to write
345 * to an RO page is pretty common, we don't do it with
346 * _PAGE_DIRTY. We could do it, but it's a fairly rare
347 * event so I'd rather take the overhead when it happens
348 * rather than adding an instruction here. We should measure
349 * whether the whole thing is worth it in the first place
350 * as we could avoid loading SPRN_ESR completely in the first
351 * place...
352 *
353 * TODO: Is it worth doing that mfspr & rlwimi in the first
354 * place or can we save a couple of instructions here ?
355 */
356 mfspr r12,SPRN_ESR
357 li r13,_PAGE_PRESENT|_PAGE_ACCESSED
358 rlwimi r13,r12,10,30,30
359
360 /* Load the PTE */
ca9153a3
IY
361 /* Compute pgdir/pmd offset */
362 rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
14cf11af
PM
363 lwzx r11, r12, r11 /* Get pgd/pmd entry */
364 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
365 beq 2f /* Bail if no table */
366
ca9153a3
IY
367 /* Compute pte address */
368 rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
1bc54c03
BH
369 lwz r11, 0(r12) /* Get high word of pte entry */
370 lwz r12, 4(r12) /* Get low word of pte entry */
14cf11af 371
1bc54c03
BH
372 lis r10,tlb_44x_index@ha
373
374 andc. r13,r13,r12 /* Check permission */
375
376 /* Load the next available TLB index */
377 lwz r13,tlb_44x_index@l(r10)
378
379 bne 2f /* Bail if permission mismach */
380
381 /* Increment, rollover, and store TLB index */
382 addi r13,r13,1
383
384 /* Compare with watermark (instruction gets patched) */
385 .globl tlb_44x_patch_hwater_D
386tlb_44x_patch_hwater_D:
387 cmpwi 0,r13,1 /* reserve entries */
388 ble 5f
389 li r13,0
3905:
391 /* Store the next available TLB index */
392 stw r13,tlb_44x_index@l(r10)
393
394 /* Re-load the faulting address */
395 mfspr r10,SPRN_DEAR
14cf11af
PM
396
397 /* Jump to common tlb load */
e7f75ad0 398 b finish_tlb_load_44x
14cf11af
PM
399
4002:
401 /* The bailout. Restore registers to pre-exception conditions
402 * and call the heavyweights to help us out.
403 */
ee43eb78 404 mfspr r11, SPRN_SPRG_RSCRATCH4
14cf11af 405 mtcr r11
ee43eb78
BH
406 mfspr r13, SPRN_SPRG_RSCRATCH3
407 mfspr r12, SPRN_SPRG_RSCRATCH2
408 mfspr r11, SPRN_SPRG_RSCRATCH1
409 mfspr r10, SPRN_SPRG_RSCRATCH0
1bc54c03 410 b DataStorage
14cf11af
PM
411
412 /* Instruction TLB Error Interrupt */
413 /*
414 * Nearly the same as above, except we get our
415 * information from different registers and bailout
416 * to a different point.
417 */
e7f75ad0 418 START_EXCEPTION(InstructionTLBError44x)
ee43eb78
BH
419 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
420 mtspr SPRN_SPRG_WSCRATCH1, r11
421 mtspr SPRN_SPRG_WSCRATCH2, r12
422 mtspr SPRN_SPRG_WSCRATCH3, r13
14cf11af 423 mfcr r11
ee43eb78 424 mtspr SPRN_SPRG_WSCRATCH4, r11
14cf11af
PM
425 mfspr r10, SPRN_SRR0 /* Get faulting address */
426
427 /* If we are faulting a kernel address, we have to use the
428 * kernel page tables.
429 */
8a13c4f9 430 lis r11, PAGE_OFFSET@h
14cf11af
PM
431 cmplw r10, r11
432 blt+ 3f
433 lis r11, swapper_pg_dir@h
434 ori r11, r11, swapper_pg_dir@l
435
436 mfspr r12,SPRN_MMUCR
437 rlwinm r12,r12,0,0,23 /* Clear TID */
438
439 b 4f
440
441 /* Get the PGD for the current thread */
4423:
ee43eb78 443 mfspr r11,SPRN_SPRG_THREAD
14cf11af
PM
444 lwz r11,PGDIR(r11)
445
446 /* Load PID into MMUCR TID */
447 mfspr r12,SPRN_MMUCR
448 mfspr r13,SPRN_PID /* Get PID */
449 rlwimi r12,r13,0,24,31 /* Set TID */
450
4514:
452 mtspr SPRN_MMUCR,r12
453
1bc54c03 454 /* Make up the required permissions */
ea3cc330 455 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
1bc54c03 456
ca9153a3
IY
457 /* Compute pgdir/pmd offset */
458 rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
14cf11af
PM
459 lwzx r11, r12, r11 /* Get pgd/pmd entry */
460 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
461 beq 2f /* Bail if no table */
462
ca9153a3
IY
463 /* Compute pte address */
464 rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
1bc54c03
BH
465 lwz r11, 0(r12) /* Get high word of pte entry */
466 lwz r12, 4(r12) /* Get low word of pte entry */
14cf11af 467
1bc54c03
BH
468 lis r10,tlb_44x_index@ha
469
470 andc. r13,r13,r12 /* Check permission */
471
472 /* Load the next available TLB index */
473 lwz r13,tlb_44x_index@l(r10)
474
475 bne 2f /* Bail if permission mismach */
476
477 /* Increment, rollover, and store TLB index */
478 addi r13,r13,1
479
480 /* Compare with watermark (instruction gets patched) */
481 .globl tlb_44x_patch_hwater_I
482tlb_44x_patch_hwater_I:
483 cmpwi 0,r13,1 /* reserve entries */
484 ble 5f
485 li r13,0
4865:
487 /* Store the next available TLB index */
488 stw r13,tlb_44x_index@l(r10)
489
490 /* Re-load the faulting address */
491 mfspr r10,SPRN_SRR0
14cf11af
PM
492
493 /* Jump to common TLB load point */
e7f75ad0 494 b finish_tlb_load_44x
14cf11af
PM
495
4962:
497 /* The bailout. Restore registers to pre-exception conditions
498 * and call the heavyweights to help us out.
499 */
ee43eb78 500 mfspr r11, SPRN_SPRG_RSCRATCH4
14cf11af 501 mtcr r11
ee43eb78
BH
502 mfspr r13, SPRN_SPRG_RSCRATCH3
503 mfspr r12, SPRN_SPRG_RSCRATCH2
504 mfspr r11, SPRN_SPRG_RSCRATCH1
505 mfspr r10, SPRN_SPRG_RSCRATCH0
14cf11af
PM
506 b InstructionStorage
507
14cf11af 508/*
14cf11af
PM
509 * Both the instruction and data TLB miss get to this
510 * point to load the TLB.
511 * r10 - EA of fault
1bc54c03
BH
512 * r11 - PTE high word value
513 * r12 - PTE low word value
514 * r13 - TLB index
14cf11af
PM
515 * MMUCR - loaded with proper value when we get here
516 * Upon exit, we reload everything and RFI.
517 */
e7f75ad0 518finish_tlb_load_44x:
1bc54c03 519 /* Combine RPN & ERPN an write WS 0 */
ca9153a3 520 rlwimi r11,r12,0,0,31-PAGE_SHIFT
1bc54c03 521 tlbwe r11,r13,PPC44x_TLB_XLAT
14cf11af
PM
522
523 /*
1bc54c03 524 * Create WS1. This is the faulting address (EPN),
14cf11af
PM
525 * page size, and valid flag.
526 */
ca9153a3
IY
527 li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
528 /* Insert valid and page size */
529 rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
1bc54c03
BH
530 tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */
531
532 /* And WS 2 */
533 li r10,0xf85 /* Mask to apply from PTE */
534 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
535 and r11,r12,r10 /* Mask PTE bits to keep */
536 andi. r10,r12,_PAGE_USER /* User page ? */
537 beq 1f /* nope, leave U bits empty */
538 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
5391: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
14cf11af
PM
540
541 /* Done...restore registers and get out of here.
542 */
ee43eb78 543 mfspr r11, SPRN_SPRG_RSCRATCH4
14cf11af 544 mtcr r11
ee43eb78
BH
545 mfspr r13, SPRN_SPRG_RSCRATCH3
546 mfspr r12, SPRN_SPRG_RSCRATCH2
547 mfspr r11, SPRN_SPRG_RSCRATCH1
548 mfspr r10, SPRN_SPRG_RSCRATCH0
14cf11af
PM
549 rfi /* Force context change */
550
e7f75ad0
DK
551/* TLB error interrupts for 476
552 */
553#ifdef CONFIG_PPC_47x
554 START_EXCEPTION(DataTLBError47x)
555 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
556 mtspr SPRN_SPRG_WSCRATCH1,r11
557 mtspr SPRN_SPRG_WSCRATCH2,r12
558 mtspr SPRN_SPRG_WSCRATCH3,r13
559 mfcr r11
560 mtspr SPRN_SPRG_WSCRATCH4,r11
561 mfspr r10,SPRN_DEAR /* Get faulting address */
562
563 /* If we are faulting a kernel address, we have to use the
564 * kernel page tables.
565 */
566 lis r11,PAGE_OFFSET@h
567 cmplw cr0,r10,r11
568 blt+ 3f
569 lis r11,swapper_pg_dir@h
570 ori r11,r11, swapper_pg_dir@l
571 li r12,0 /* MMUCR = 0 */
572 b 4f
573
574 /* Get the PGD for the current thread and setup MMUCR */
5753: mfspr r11,SPRN_SPRG3
576 lwz r11,PGDIR(r11)
577 mfspr r12,SPRN_PID /* Get PID */
5784: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
579
580 /* Mask of required permission bits. Note that while we
581 * do copy ESR:ST to _PAGE_RW position as trying to write
582 * to an RO page is pretty common, we don't do it with
583 * _PAGE_DIRTY. We could do it, but it's a fairly rare
584 * event so I'd rather take the overhead when it happens
585 * rather than adding an instruction here. We should measure
586 * whether the whole thing is worth it in the first place
587 * as we could avoid loading SPRN_ESR completely in the first
588 * place...
589 *
590 * TODO: Is it worth doing that mfspr & rlwimi in the first
591 * place or can we save a couple of instructions here ?
592 */
593 mfspr r12,SPRN_ESR
594 li r13,_PAGE_PRESENT|_PAGE_ACCESSED
595 rlwimi r13,r12,10,30,30
596
597 /* Load the PTE */
598 /* Compute pgdir/pmd offset */
599 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
600 lwzx r11,r12,r11 /* Get pgd/pmd entry */
601
602 /* Word 0 is EPN,V,TS,DSIZ */
603 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
604 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
605 li r12,0
606 tlbwe r10,r12,0
607
608 /* XXX can we do better ? Need to make sure tlbwe has established
609 * latch V bit in MMUCR0 before the PTE is loaded further down */
610#ifdef CONFIG_SMP
611 isync
612#endif
613
614 rlwinm. r12,r11,0,0,20 /* Extract pt base address */
615 /* Compute pte address */
616 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
617 beq 2f /* Bail if no table */
618 lwz r11,0(r12) /* Get high word of pte entry */
619
620 /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
621 * bottom of r12 to create a data dependency... We can also use r10
622 * as destination nowadays
623 */
624#ifdef CONFIG_SMP
625 lwsync
626#endif
627 lwz r12,4(r12) /* Get low word of pte entry */
628
629 andc. r13,r13,r12 /* Check permission */
630
631 /* Jump to common tlb load */
632 beq finish_tlb_load_47x
633
6342: /* The bailout. Restore registers to pre-exception conditions
635 * and call the heavyweights to help us out.
636 */
637 mfspr r11,SPRN_SPRG_RSCRATCH4
638 mtcr r11
639 mfspr r13,SPRN_SPRG_RSCRATCH3
640 mfspr r12,SPRN_SPRG_RSCRATCH2
641 mfspr r11,SPRN_SPRG_RSCRATCH1
642 mfspr r10,SPRN_SPRG_RSCRATCH0
643 b DataStorage
644
645 /* Instruction TLB Error Interrupt */
646 /*
647 * Nearly the same as above, except we get our
648 * information from different registers and bailout
649 * to a different point.
650 */
651 START_EXCEPTION(InstructionTLBError47x)
652 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
653 mtspr SPRN_SPRG_WSCRATCH1,r11
654 mtspr SPRN_SPRG_WSCRATCH2,r12
655 mtspr SPRN_SPRG_WSCRATCH3,r13
656 mfcr r11
657 mtspr SPRN_SPRG_WSCRATCH4,r11
658 mfspr r10,SPRN_SRR0 /* Get faulting address */
659
660 /* If we are faulting a kernel address, we have to use the
661 * kernel page tables.
662 */
663 lis r11,PAGE_OFFSET@h
664 cmplw cr0,r10,r11
665 blt+ 3f
666 lis r11,swapper_pg_dir@h
667 ori r11,r11, swapper_pg_dir@l
668 li r12,0 /* MMUCR = 0 */
669 b 4f
670
671 /* Get the PGD for the current thread and setup MMUCR */
6723: mfspr r11,SPRN_SPRG_THREAD
673 lwz r11,PGDIR(r11)
674 mfspr r12,SPRN_PID /* Get PID */
6754: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
676
677 /* Make up the required permissions */
678 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
679
680 /* Load PTE */
681 /* Compute pgdir/pmd offset */
682 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
683 lwzx r11,r12,r11 /* Get pgd/pmd entry */
684
685 /* Word 0 is EPN,V,TS,DSIZ */
686 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
687 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
688 li r12,0
689 tlbwe r10,r12,0
690
691 /* XXX can we do better ? Need to make sure tlbwe has established
692 * latch V bit in MMUCR0 before the PTE is loaded further down */
693#ifdef CONFIG_SMP
694 isync
695#endif
696
697 rlwinm. r12,r11,0,0,20 /* Extract pt base address */
698 /* Compute pte address */
699 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
700 beq 2f /* Bail if no table */
701
702 lwz r11,0(r12) /* Get high word of pte entry */
703 /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
704 * bottom of r12 to create a data dependency... We can also use r10
705 * as destination nowadays
706 */
707#ifdef CONFIG_SMP
708 lwsync
709#endif
710 lwz r12,4(r12) /* Get low word of pte entry */
711
712 andc. r13,r13,r12 /* Check permission */
713
714 /* Jump to common TLB load point */
715 beq finish_tlb_load_47x
716
7172: /* The bailout. Restore registers to pre-exception conditions
718 * and call the heavyweights to help us out.
719 */
720 mfspr r11, SPRN_SPRG_RSCRATCH4
721 mtcr r11
722 mfspr r13, SPRN_SPRG_RSCRATCH3
723 mfspr r12, SPRN_SPRG_RSCRATCH2
724 mfspr r11, SPRN_SPRG_RSCRATCH1
725 mfspr r10, SPRN_SPRG_RSCRATCH0
726 b InstructionStorage
727
728/*
729 * Both the instruction and data TLB miss get to this
730 * point to load the TLB.
731 * r10 - free to use
732 * r11 - PTE high word value
733 * r12 - PTE low word value
734 * r13 - free to use
735 * MMUCR - loaded with proper value when we get here
736 * Upon exit, we reload everything and RFI.
737 */
738finish_tlb_load_47x:
739 /* Combine RPN & ERPN an write WS 1 */
740 rlwimi r11,r12,0,0,31-PAGE_SHIFT
741 tlbwe r11,r13,1
742
743 /* And make up word 2 */
744 li r10,0xf85 /* Mask to apply from PTE */
745 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
746 and r11,r12,r10 /* Mask PTE bits to keep */
747 andi. r10,r12,_PAGE_USER /* User page ? */
748 beq 1f /* nope, leave U bits empty */
749 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
7501: tlbwe r11,r13,2
751
752 /* Done...restore registers and get out of here.
753 */
754 mfspr r11, SPRN_SPRG_RSCRATCH4
755 mtcr r11
756 mfspr r13, SPRN_SPRG_RSCRATCH3
757 mfspr r12, SPRN_SPRG_RSCRATCH2
758 mfspr r11, SPRN_SPRG_RSCRATCH1
759 mfspr r10, SPRN_SPRG_RSCRATCH0
760 rfi
761
762#endif /* CONFIG_PPC_47x */
763
764 /* Debug Interrupt */
765 /*
766 * This statement needs to exist at the end of the IVPR
767 * definition just in case you end up taking a debug
768 * exception within another exception.
769 */
770 DEBUG_CRIT_EXCEPTION
771
fc2a6cfe
BB
772interrupt_end:
773
14cf11af
PM
774/*
775 * Global functions
776 */
777
47c0bd1a
BH
778/*
779 * Adjust the machine check IVOR on 440A cores
780 */
781_GLOBAL(__fixup_440A_mcheck)
782 li r3,MachineCheckA@l
783 mtspr SPRN_IVOR1,r3
784 sync
785 blr
786
14cf11af
PM
787_GLOBAL(set_context)
788
789#ifdef CONFIG_BDI_SWITCH
790 /* Context switch the PTE pointer for the Abatron BDI2000.
791 * The PGDIR is the second parameter.
792 */
793 lis r5, abatron_pteptrs@h
794 ori r5, r5, abatron_pteptrs@l
795 stw r4, 0x4(r5)
796#endif
797 mtspr SPRN_PID,r3
798 isync /* Force context change */
799 blr
800
795033c3
DK
801/*
802 * Init CPU state. This is called at boot time or for secondary CPUs
803 * to setup initial TLB entries, setup IVORs, etc...
e7f75ad0 804 *
795033c3
DK
805 */
806_GLOBAL(init_cpu_state)
807 mflr r22
e7f75ad0 808#ifdef CONFIG_PPC_47x
446957ba 809 /* We use the PVR to differentiate 44x cores from 476 */
e7f75ad0
DK
810 mfspr r3,SPRN_PVR
811 srwi r3,r3,16
df777bd3
TB
812 cmplwi cr0,r3,PVR_476FPE@h
813 beq head_start_47x
e7f75ad0
DK
814 cmplwi cr0,r3,PVR_476@h
815 beq head_start_47x
b4e8c8dd
TS
816 cmplwi cr0,r3,PVR_476_ISS@h
817 beq head_start_47x
e7f75ad0
DK
818#endif /* CONFIG_PPC_47x */
819
795033c3
DK
820/*
821 * In case the firmware didn't do it, we apply some workarounds
822 * that are good for all 440 core variants here
823 */
824 mfspr r3,SPRN_CCR0
825 rlwinm r3,r3,0,0,27 /* disable icache prefetch */
826 isync
827 mtspr SPRN_CCR0,r3
828 isync
829 sync
830
831/*
e7f75ad0 832 * Set up the initial MMU state for 44x
795033c3
DK
833 *
834 * We are still executing code at the virtual address
835 * mappings set by the firmware for the base of RAM.
836 *
837 * We first invalidate all TLB entries but the one
838 * we are running from. We then load the KERNELBASE
839 * mappings so we can begin to use kernel addresses
840 * natively and so the interrupt vector locations are
841 * permanently pinned (necessary since Book E
842 * implementations always have translation enabled).
843 *
844 * TODO: Use the known TLB entry we are running from to
845 * determine which physical region we are located
846 * in. This can be used to determine where in RAM
847 * (on a shared CPU system) or PCI memory space
848 * (on a DRAMless system) we are located.
849 * For now, we assume a perfect world which means
850 * we are located at the base of DRAM (physical 0).
851 */
852
853/*
854 * Search TLB for entry that we are currently using.
855 * Invalidate all entries but the one we are using.
856 */
857 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
858 mfspr r3,SPRN_PID /* Get PID */
859 mfmsr r4 /* Get MSR */
860 andi. r4,r4,MSR_IS@l /* TS=1? */
861 beq wmmucr /* If not, leave STS=0 */
862 oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
863wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
864 sync
865
866 bl invstr /* Find our address */
867invstr: mflr r5 /* Make it accessible */
868 tlbsx r23,0,r5 /* Find entry we are in */
869 li r4,0 /* Start at TLB entry 0 */
870 li r3,0 /* Set PAGEID inval value */
8711: cmpw r23,r4 /* Is this our entry? */
872 beq skpinv /* If so, skip the inval */
873 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
874skpinv: addi r4,r4,1 /* Increment */
875 cmpwi r4,64 /* Are we done? */
876 bne 1b /* If not, repeat */
877 isync /* If so, context change */
878
879/*
880 * Configure and load pinned entry into TLB slot 63.
881 */
26ecb6c4
SP
882#ifdef CONFIG_NONSTATIC_KERNEL
883 /*
884 * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT
885 * entries of the initial mapping set by the boot loader.
886 * The XLAT entry is stored in r25
887 */
23913245
SP
888
889 /* Read the XLAT entry for our current mapping */
890 tlbre r25,r23,PPC44x_TLB_XLAT
891
892 lis r3,KERNELBASE@h
893 ori r3,r3,KERNELBASE@l
894
895 /* Use our current RPN entry */
896 mr r4,r25
897#else
795033c3
DK
898
899 lis r3,PAGE_OFFSET@h
900 ori r3,r3,PAGE_OFFSET@l
901
902 /* Kernel is at the base of RAM */
903 li r4, 0 /* Load the kernel physical address */
23913245 904#endif
795033c3
DK
905
906 /* Load the kernel PID = 0 */
907 li r0,0
908 mtspr SPRN_PID,r0
909 sync
910
911 /* Initialize MMUCR */
912 li r5,0
913 mtspr SPRN_MMUCR,r5
914 sync
915
916 /* pageid fields */
917 clrrwi r3,r3,10 /* Mask off the effective page number */
918 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
919
920 /* xlat fields */
921 clrrwi r4,r4,10 /* Mask off the real page number */
922 /* ERPN is 0 for first 4GB page */
923
924 /* attrib fields */
925 /* Added guarded bit to protect against speculative loads/stores */
926 li r5,0
927 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
928
929 li r0,63 /* TLB slot 63 */
930
931 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
932 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
933 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
934
935 /* Force context change */
936 mfmsr r0
937 mtspr SPRN_SRR1, r0
938 lis r0,3f@h
939 ori r0,r0,3f@l
940 mtspr SPRN_SRR0,r0
941 sync
942 rfi
943
944 /* If necessary, invalidate original entry we used */
9453: cmpwi r23,63
946 beq 4f
947 li r6,0
948 tlbwe r6,r23,PPC44x_TLB_PAGEID
949 isync
950
9514:
952#ifdef CONFIG_PPC_EARLY_DEBUG_44x
953 /* Add UART mapping for early debug. */
954
955 /* pageid fields */
956 lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
957 ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
958
959 /* xlat fields */
960 lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
961 ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
962
963 /* attrib fields */
964 li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
965 li r0,62 /* TLB slot 0 */
966
967 tlbwe r3,r0,PPC44x_TLB_PAGEID
968 tlbwe r4,r0,PPC44x_TLB_XLAT
969 tlbwe r5,r0,PPC44x_TLB_ATTRIB
970
971 /* Force context change */
972 isync
973#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
974
975 /* Establish the interrupt vector offsets */
976 SET_IVOR(0, CriticalInput);
977 SET_IVOR(1, MachineCheck);
978 SET_IVOR(2, DataStorage);
979 SET_IVOR(3, InstructionStorage);
980 SET_IVOR(4, ExternalInput);
981 SET_IVOR(5, Alignment);
982 SET_IVOR(6, Program);
983 SET_IVOR(7, FloatingPointUnavailable);
984 SET_IVOR(8, SystemCall);
985 SET_IVOR(9, AuxillaryProcessorUnavailable);
986 SET_IVOR(10, Decrementer);
987 SET_IVOR(11, FixedIntervalTimer);
988 SET_IVOR(12, WatchdogTimer);
e7f75ad0
DK
989 SET_IVOR(13, DataTLBError44x);
990 SET_IVOR(14, InstructionTLBError44x);
795033c3
DK
991 SET_IVOR(15, DebugCrit);
992
e7f75ad0
DK
993 b head_start_common
994
995
996#ifdef CONFIG_PPC_47x
997
998#ifdef CONFIG_SMP
999
1000/* Entry point for secondary 47x processors */
1001_GLOBAL(start_secondary_47x)
1002 mr r24,r3 /* CPU number */
1003
1004 bl init_cpu_state
1005
1006 /* Now we need to bolt the rest of kernel memory which
1007 * is done in C code. We must be careful because our task
1008 * struct or our stack can (and will probably) be out
1009 * of reach of the initial 256M TLB entry, so we use a
1010 * small temporary stack in .bss for that. This works
1011 * because only one CPU at a time can be in this code
1012 */
1013 lis r1,temp_boot_stack@h
1014 ori r1,r1,temp_boot_stack@l
1015 addi r1,r1,1024-STACK_FRAME_OVERHEAD
1016 li r0,0
1017 stw r0,0(r1)
1018 bl mmu_init_secondary
1019
1020 /* Now we can get our task struct and real stack pointer */
1021
1022 /* Get current_thread_info and current */
1023 lis r1,secondary_ti@ha
1024 lwz r1,secondary_ti@l(r1)
1025 lwz r2,TI_TASK(r1)
1026
1027 /* Current stack pointer */
1028 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1029 li r0,0
1030 stw r0,0(r1)
1031
1032 /* Kernel stack for exception entry in SPRG3 */
1033 addi r4,r2,THREAD /* init task's THREAD */
1034 mtspr SPRN_SPRG3,r4
1035
1036 b start_secondary
1037
1038#endif /* CONFIG_SMP */
1039
1040/*
1041 * Set up the initial MMU state for 44x
1042 *
1043 * We are still executing code at the virtual address
1044 * mappings set by the firmware for the base of RAM.
1045 */
1046
1047head_start_47x:
1048 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
1049 mfspr r3,SPRN_PID /* Get PID */
1050 mfmsr r4 /* Get MSR */
1051 andi. r4,r4,MSR_IS@l /* TS=1? */
1052 beq 1f /* If not, leave STS=0 */
1053 oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */
10541: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
1055 sync
1056
1057 /* Find the entry we are running from */
1058 bl 1f
10591: mflr r23
1060 tlbsx r23,0,r23
1061 tlbre r24,r23,0
1062 tlbre r25,r23,1
1063 tlbre r26,r23,2
1064
1065/*
1066 * Cleanup time
1067 */
1068
1069 /* Initialize MMUCR */
1070 li r5,0
1071 mtspr SPRN_MMUCR,r5
1072 sync
1073
1074clear_all_utlb_entries:
1075
1076 #; Set initial values.
1077
1078 addis r3,0,0x8000
1079 addi r4,0,0
1080 addi r5,0,0
1081 b clear_utlb_entry
1082
1083 #; Align the loop to speed things up.
1084
1085 .align 6
1086
1087clear_utlb_entry:
1088
1089 tlbwe r4,r3,0
1090 tlbwe r5,r3,1
1091 tlbwe r5,r3,2
1092 addis r3,r3,0x2000
1093 cmpwi r3,0
1094 bne clear_utlb_entry
1095 addis r3,0,0x8000
1096 addis r4,r4,0x100
1097 cmpwi r4,0
1098 bne clear_utlb_entry
1099
1100 #; Restore original entry.
1101
1102 oris r23,r23,0x8000 /* specify the way */
1103 tlbwe r24,r23,0
1104 tlbwe r25,r23,1
1105 tlbwe r26,r23,2
1106
1107/*
1108 * Configure and load pinned entry into TLB for the kernel core
1109 */
1110
1111 lis r3,PAGE_OFFSET@h
1112 ori r3,r3,PAGE_OFFSET@l
1113
e7f75ad0
DK
1114 /* Load the kernel PID = 0 */
1115 li r0,0
1116 mtspr SPRN_PID,r0
1117 sync
1118
1119 /* Word 0 */
1120 clrrwi r3,r3,12 /* Mask off the effective page number */
1121 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
1122
9661534d
DK
1123 /* Word 1 - use r25. RPN is the same as the original entry */
1124
e7f75ad0
DK
1125 /* Word 2 */
1126 li r5,0
1127 ori r5,r5,PPC47x_TLB2_S_RWX
1128#ifdef CONFIG_SMP
1129 ori r5,r5,PPC47x_TLB2_M
1130#endif
1131
1132 /* We write to way 0 and bolted 0 */
1133 lis r0,0x8800
1134 tlbwe r3,r0,0
9661534d 1135 tlbwe r25,r0,1
e7f75ad0
DK
1136 tlbwe r5,r0,2
1137
1138/*
1139 * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
1140 * them up later
1141 */
1142 LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
1143 mtspr SPRN_SSPCR,r3
1144 mtspr SPRN_USPCR,r3
1145 LOAD_REG_IMMEDIATE(r3, 0x12345670)
1146 mtspr SPRN_ISPCR,r3
1147
1148 /* Force context change */
1149 mfmsr r0
1150 mtspr SPRN_SRR1, r0
1151 lis r0,3f@h
1152 ori r0,r0,3f@l
1153 mtspr SPRN_SRR0,r0
1154 sync
1155 rfi
1156
1157 /* Invalidate original entry we used */
11583:
1159 rlwinm r24,r24,0,21,19 /* clear the "valid" bit */
1160 tlbwe r24,r23,0
1161 addi r24,0,0
1162 tlbwe r24,r23,1
1163 tlbwe r24,r23,2
1164 isync /* Clear out the shadow TLB entries */
1165
1166#ifdef CONFIG_PPC_EARLY_DEBUG_44x
1167 /* Add UART mapping for early debug. */
1168
1169 /* Word 0 */
1170 lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
1171 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
1172
1173 /* Word 1 */
1174 lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
1175 ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
1176
1177 /* Word 2 */
1178 li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
1179
1180 /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
1181 * congruence class as the kernel, we need to make sure of it at
1182 * some point
1183 */
1184 lis r0,0x8d00
1185 tlbwe r3,r0,0
1186 tlbwe r4,r0,1
1187 tlbwe r5,r0,2
1188
1189 /* Force context change */
1190 isync
1191#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
1192
1193 /* Establish the interrupt vector offsets */
1194 SET_IVOR(0, CriticalInput);
1195 SET_IVOR(1, MachineCheckA);
1196 SET_IVOR(2, DataStorage);
1197 SET_IVOR(3, InstructionStorage);
1198 SET_IVOR(4, ExternalInput);
1199 SET_IVOR(5, Alignment);
1200 SET_IVOR(6, Program);
1201 SET_IVOR(7, FloatingPointUnavailable);
1202 SET_IVOR(8, SystemCall);
1203 SET_IVOR(9, AuxillaryProcessorUnavailable);
1204 SET_IVOR(10, Decrementer);
1205 SET_IVOR(11, FixedIntervalTimer);
1206 SET_IVOR(12, WatchdogTimer);
1207 SET_IVOR(13, DataTLBError47x);
1208 SET_IVOR(14, InstructionTLBError47x);
1209 SET_IVOR(15, DebugCrit);
1210
1211 /* We configure icbi to invalidate 128 bytes at a time since the
1212 * current 32-bit kernel code isn't too happy with icache != dcache
97b3be1e
AP
1213 * block size. We also disable the BTAC as this can cause errors
1214 * in some circumstances (see IBM Erratum 47).
e7f75ad0
DK
1215 */
1216 mfspr r3,SPRN_CCR0
1217 oris r3,r3,0x0020
97b3be1e 1218 ori r3,r3,0x0040
e7f75ad0
DK
1219 mtspr SPRN_CCR0,r3
1220 isync
1221
1222#endif /* CONFIG_PPC_47x */
1223
1224/*
1225 * Here we are back to code that is common between 44x and 47x
1226 *
1227 * We proceed to further kernel initialization and return to the
1228 * main kernel entry
1229 */
1230head_start_common:
795033c3
DK
1231 /* Establish the interrupt vector base */
1232 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
1233 mtspr SPRN_IVPR,r4
1234
9661534d
DK
1235 /*
1236 * If the kernel was loaded at a non-zero 256 MB page, we need to
1237 * mask off the most significant 4 bits to get the relative address
1238 * from the start of physical memory
1239 */
1240 rlwinm r22,r22,0,4,31
1241 addis r22,r22,PAGE_OFFSET@h
795033c3 1242 mtlr r22
e7f75ad0 1243 isync
795033c3
DK
1244 blr
1245
14cf11af
PM
1246/*
1247 * We put a few things here that have to be page-aligned. This stuff
1248 * goes at the beginning of the data segment, which is page-aligned.
1249 */
1250 .data
ca9153a3 1251 .align PAGE_SHIFT
ea703ce2
KG
1252 .globl sdata
1253sdata:
1254 .globl empty_zero_page
1255empty_zero_page:
ca9153a3 1256 .space PAGE_SIZE
14cf11af
PM
1257
1258/*
1259 * To support >32-bit physical addresses, we use an 8KB pgdir.
1260 */
ea703ce2
KG
1261 .globl swapper_pg_dir
1262swapper_pg_dir:
bee86f14 1263 .space PGD_TABLE_SIZE
14cf11af 1264
14cf11af
PM
1265/*
1266 * Room for two PTE pointers, usually the kernel and current user pointers
1267 * to their respective root page table.
1268 */
1269abatron_pteptrs:
1270 .space 8
e7f75ad0
DK
1271
1272#ifdef CONFIG_SMP
1273 .align 12
1274temp_boot_stack:
1275 .space 1024
1276#endif /* CONFIG_SMP */
This page took 1.068587 seconds and 5 git commands to generate.