Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/boot/compressed/head.S | |
3 | * | |
4 | * Copyright (C) 1996-2002 Russell King | |
10c2df65 | 5 | * Copyright (C) 2004 Hyok S. Choi (MPU support) |
1da177e4 LT |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
1da177e4 LT |
11 | #include <linux/linkage.h> |
12 | ||
13 | /* | |
14 | * Debugging stuff | |
15 | * | |
16 | * Note that these macros must not contain any code which is not | |
17 | * 100% relocatable. Any attempt to do so will result in a crash. | |
18 | * Please select one of the following when turning on debugging. | |
19 | */ | |
20 | #ifdef DEBUG | |
5cd0c344 | 21 | |
5cd0c344 | 22 | #if defined(CONFIG_DEBUG_ICEDCC) |
7d95ded9 | 23 | |
dfad549d | 24 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) |
4e6d488a | 25 | .macro loadsp, rb, tmp |
7d95ded9 TL |
26 | .endm |
27 | .macro writeb, ch, rb | |
28 | mcr p14, 0, \ch, c0, c5, 0 | |
29 | .endm | |
c633c3cf | 30 | #elif defined(CONFIG_CPU_XSCALE) |
4e6d488a | 31 | .macro loadsp, rb, tmp |
c633c3cf JCPV |
32 | .endm |
33 | .macro writeb, ch, rb | |
34 | mcr p14, 0, \ch, c8, c0, 0 | |
35 | .endm | |
7d95ded9 | 36 | #else |
4e6d488a | 37 | .macro loadsp, rb, tmp |
1da177e4 | 38 | .endm |
224b5be6 | 39 | .macro writeb, ch, rb |
41a9e680 | 40 | mcr p14, 0, \ch, c1, c0, 0 |
1da177e4 | 41 | .endm |
7d95ded9 TL |
42 | #endif |
43 | ||
5cd0c344 | 44 | #else |
224b5be6 | 45 | |
a09e64fb | 46 | #include <mach/debug-macro.S> |
224b5be6 | 47 | |
5cd0c344 RK |
48 | .macro writeb, ch, rb |
49 | senduart \ch, \rb | |
1da177e4 | 50 | .endm |
5cd0c344 | 51 | |
224b5be6 | 52 | #if defined(CONFIG_ARCH_SA1100) |
4e6d488a | 53 | .macro loadsp, rb, tmp |
1da177e4 | 54 | mov \rb, #0x80000000 @ physical base address |
224b5be6 | 55 | #ifdef CONFIG_DEBUG_LL_SER3 |
1da177e4 | 56 | add \rb, \rb, #0x00050000 @ Ser3 |
224b5be6 | 57 | #else |
1da177e4 | 58 | add \rb, \rb, #0x00010000 @ Ser1 |
224b5be6 | 59 | #endif |
1da177e4 | 60 | .endm |
1da177e4 | 61 | #elif defined(CONFIG_ARCH_S3C2410) |
4e6d488a | 62 | .macro loadsp, rb, tmp |
1da177e4 | 63 | mov \rb, #0x50000000 |
c7657846 | 64 | add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT |
1da177e4 | 65 | .endm |
1da177e4 | 66 | #else |
4e6d488a TL |
67 | .macro loadsp, rb, tmp |
68 | addruart \rb, \tmp | |
224b5be6 | 69 | .endm |
1da177e4 | 70 | #endif |
5cd0c344 | 71 | #endif |
1da177e4 LT |
72 | #endif |
73 | ||
74 | .macro kputc,val | |
75 | mov r0, \val | |
76 | bl putc | |
77 | .endm | |
78 | ||
79 | .macro kphex,val,len | |
80 | mov r0, \val | |
81 | mov r1, #\len | |
82 | bl phex | |
83 | .endm | |
84 | ||
85 | .macro debug_reloc_start | |
86 | #ifdef DEBUG | |
87 | kputc #'\n' | |
88 | kphex r6, 8 /* processor id */ | |
89 | kputc #':' | |
90 | kphex r7, 8 /* architecture id */ | |
f12d0d7c | 91 | #ifdef CONFIG_CPU_CP15 |
1da177e4 LT |
92 | kputc #':' |
93 | mrc p15, 0, r0, c1, c0 | |
94 | kphex r0, 8 /* control reg */ | |
f12d0d7c | 95 | #endif |
1da177e4 LT |
96 | kputc #'\n' |
97 | kphex r5, 8 /* decompressed kernel start */ | |
98 | kputc #'-' | |
f4619025 | 99 | kphex r9, 8 /* decompressed kernel end */ |
1da177e4 LT |
100 | kputc #'>' |
101 | kphex r4, 8 /* kernel execution address */ | |
102 | kputc #'\n' | |
103 | #endif | |
104 | .endm | |
105 | ||
106 | .macro debug_reloc_end | |
107 | #ifdef DEBUG | |
108 | kphex r5, 8 /* end of kernel */ | |
109 | kputc #'\n' | |
110 | mov r0, r4 | |
111 | bl memdump /* dump 256 bytes at start of kernel */ | |
112 | #endif | |
113 | .endm | |
114 | ||
115 | .section ".start", #alloc, #execinstr | |
116 | /* | |
117 | * sort out different calling conventions | |
118 | */ | |
119 | .align | |
26e5ca93 | 120 | .arm @ Always enter in ARM state |
1da177e4 LT |
121 | start: |
122 | .type start,#function | |
b11fe388 | 123 | .rept 7 |
1da177e4 LT |
124 | mov r0, r0 |
125 | .endr | |
b11fe388 NP |
126 | ARM( mov r0, r0 ) |
127 | ARM( b 1f ) | |
128 | THUMB( adr r12, BSYM(1f) ) | |
129 | THUMB( bx r12 ) | |
1da177e4 | 130 | |
1da177e4 LT |
131 | .word 0x016f2818 @ Magic numbers to help the loader |
132 | .word start @ absolute load/run zImage address | |
133 | .word _edata @ zImage end address | |
26e5ca93 | 134 | THUMB( .thumb ) |
1da177e4 | 135 | 1: mov r7, r1 @ save architecture ID |
f4619025 | 136 | mov r8, r2 @ save atags pointer |
1da177e4 LT |
137 | |
138 | #ifndef __ARM_ARCH_2__ | |
139 | /* | |
140 | * Booting from Angel - need to enter SVC mode and disable | |
141 | * FIQs/IRQs (numeric definitions from angel arm.h source). | |
142 | * We only do this if we were in user mode on entry. | |
143 | */ | |
144 | mrs r2, cpsr @ get current mode | |
145 | tst r2, #3 @ not user? | |
146 | bne not_angel | |
147 | mov r0, #0x17 @ angel_SWIreason_EnterSVC | |
0e056f20 CM |
148 | ARM( swi 0x123456 ) @ angel_SWI_ARM |
149 | THUMB( svc 0xab ) @ angel_SWI_THUMB | |
1da177e4 LT |
150 | not_angel: |
151 | mrs r2, cpsr @ turn off interrupts to | |
152 | orr r2, r2, #0xc0 @ prevent angel from running | |
153 | msr cpsr_c, r2 | |
154 | #else | |
155 | teqp pc, #0x0c000003 @ turn off interrupts | |
156 | #endif | |
157 | ||
158 | /* | |
159 | * Note that some cache flushing and other stuff may | |
160 | * be needed here - is there an Angel SWI call for this? | |
161 | */ | |
162 | ||
163 | /* | |
164 | * some architecture specific code can be inserted | |
f4619025 | 165 | * by the linker here, but it should preserve r7, r8, and r9. |
1da177e4 LT |
166 | */ |
167 | ||
168 | .text | |
6d7d0ae5 | 169 | |
e69edc79 EM |
170 | #ifdef CONFIG_AUTO_ZRELADDR |
171 | @ determine final kernel image address | |
bfa64c4a DM |
172 | mov r4, pc |
173 | and r4, r4, #0xf8000000 | |
e69edc79 EM |
174 | add r4, r4, #TEXT_OFFSET |
175 | #else | |
9e84ed63 | 176 | ldr r4, =zreladdr |
e69edc79 | 177 | #endif |
1da177e4 | 178 | |
6d7d0ae5 NP |
179 | bl cache_on |
180 | ||
181 | restart: adr r0, LC0 | |
34cc1a8f | 182 | ldmia r0, {r1, r2, r3, r6, r10, r11, r12} |
adcc2591 | 183 | ldr sp, [r0, #28] |
6d7d0ae5 NP |
184 | |
185 | /* | |
186 | * We might be running at a different address. We need | |
187 | * to fix up various pointers. | |
188 | */ | |
189 | sub r0, r0, r1 @ calculate the delta offset | |
6d7d0ae5 | 190 | add r6, r6, r0 @ _edata |
34cc1a8f NP |
191 | add r10, r10, r0 @ inflated kernel size location |
192 | ||
193 | /* | |
194 | * The kernel build system appends the size of the | |
195 | * decompressed kernel at the end of the compressed data | |
196 | * in little-endian form. | |
197 | */ | |
198 | ldrb r9, [r10, #0] | |
199 | ldrb lr, [r10, #1] | |
200 | orr r9, r9, lr, lsl #8 | |
201 | ldrb lr, [r10, #2] | |
202 | ldrb r10, [r10, #3] | |
203 | orr r9, r9, lr, lsl #16 | |
204 | orr r9, r9, r10, lsl #24 | |
1da177e4 | 205 | |
6d7d0ae5 NP |
206 | #ifndef CONFIG_ZBOOT_ROM |
207 | /* malloc space is above the relocated stack (64k max) */ | |
208 | add sp, sp, r0 | |
209 | add r10, sp, #0x10000 | |
210 | #else | |
1da177e4 | 211 | /* |
6d7d0ae5 NP |
212 | * With ZBOOT_ROM the bss/stack is non relocatable, |
213 | * but someone could still run this code from RAM, | |
214 | * in which case our reference is _edata. | |
1da177e4 | 215 | */ |
6d7d0ae5 NP |
216 | mov r10, r6 |
217 | #endif | |
218 | ||
219 | /* | |
220 | * Check to see if we will overwrite ourselves. | |
221 | * r4 = final kernel address | |
6d7d0ae5 NP |
222 | * r9 = size of decompressed image |
223 | * r10 = end of this image, including bss/stack/malloc space if non XIP | |
224 | * We basically want: | |
ea9df3b1 | 225 | * r4 - 16k page directory >= r10 -> OK |
adcc2591 | 226 | * r4 + image length <= current position (pc) -> OK |
6d7d0ae5 | 227 | */ |
ea9df3b1 | 228 | add r10, r10, #16384 |
6d7d0ae5 NP |
229 | cmp r4, r10 |
230 | bhs wont_overwrite | |
231 | add r10, r4, r9 | |
adcc2591 NP |
232 | ARM( cmp r10, pc ) |
233 | THUMB( mov lr, pc ) | |
234 | THUMB( cmp r10, lr ) | |
6d7d0ae5 NP |
235 | bls wont_overwrite |
236 | ||
237 | /* | |
238 | * Relocate ourselves past the end of the decompressed kernel. | |
6d7d0ae5 NP |
239 | * r6 = _edata |
240 | * r10 = end of the decompressed kernel | |
241 | * Because we always copy ahead, we need to do it from the end and go | |
242 | * backward in case the source and destination overlap. | |
243 | */ | |
adcc2591 NP |
244 | /* |
245 | * Bump to the next 256-byte boundary with the size of | |
246 | * the relocation code added. This avoids overwriting | |
247 | * ourself when the offset is small. | |
248 | */ | |
249 | add r10, r10, #((reloc_code_end - restart + 256) & ~255) | |
6d7d0ae5 NP |
250 | bic r10, r10, #255 |
251 | ||
adcc2591 NP |
252 | /* Get start of code we want to copy and align it down. */ |
253 | adr r5, restart | |
254 | bic r5, r5, #31 | |
255 | ||
6d7d0ae5 NP |
256 | sub r9, r6, r5 @ size to copy |
257 | add r9, r9, #31 @ rounded up to a multiple | |
258 | bic r9, r9, #31 @ ... of 32 bytes | |
259 | add r6, r9, r5 | |
260 | add r9, r9, r10 | |
261 | ||
262 | 1: ldmdb r6!, {r0 - r3, r10 - r12, lr} | |
263 | cmp r6, r5 | |
264 | stmdb r9!, {r0 - r3, r10 - r12, lr} | |
265 | bhi 1b | |
266 | ||
267 | /* Preserve offset to relocated code. */ | |
268 | sub r6, r9, r6 | |
269 | ||
7c2527f0 TL |
270 | #ifndef CONFIG_ZBOOT_ROM |
271 | /* cache_clean_flush may use the stack, so relocate it */ | |
272 | add sp, sp, r6 | |
273 | #endif | |
274 | ||
6d7d0ae5 NP |
275 | bl cache_clean_flush |
276 | ||
277 | adr r0, BSYM(restart) | |
278 | add r0, r0, r6 | |
279 | mov pc, r0 | |
280 | ||
281 | wont_overwrite: | |
282 | /* | |
283 | * If delta is zero, we are running at the address we were linked at. | |
284 | * r0 = delta | |
285 | * r2 = BSS start | |
286 | * r3 = BSS end | |
287 | * r4 = kernel execution address | |
288 | * r7 = architecture ID | |
289 | * r8 = atags pointer | |
290 | * r11 = GOT start | |
291 | * r12 = GOT end | |
292 | * sp = stack pointer | |
293 | */ | |
294 | teq r0, #0 | |
295 | beq not_relocated | |
98e12b5a | 296 | add r11, r11, r0 |
6d7d0ae5 | 297 | add r12, r12, r0 |
1da177e4 LT |
298 | |
299 | #ifndef CONFIG_ZBOOT_ROM | |
300 | /* | |
301 | * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, | |
302 | * we need to fix up pointers into the BSS region. | |
6d7d0ae5 | 303 | * Note that the stack pointer has already been fixed up. |
1da177e4 LT |
304 | */ |
305 | add r2, r2, r0 | |
306 | add r3, r3, r0 | |
1da177e4 LT |
307 | |
308 | /* | |
309 | * Relocate all entries in the GOT table. | |
310 | */ | |
98e12b5a | 311 | 1: ldr r1, [r11, #0] @ relocate entries in the GOT |
1da177e4 | 312 | add r1, r1, r0 @ table. This fixes up the |
98e12b5a | 313 | str r1, [r11], #4 @ C references. |
6d7d0ae5 | 314 | cmp r11, r12 |
1da177e4 LT |
315 | blo 1b |
316 | #else | |
317 | ||
318 | /* | |
319 | * Relocate entries in the GOT table. We only relocate | |
320 | * the entries that are outside the (relocated) BSS region. | |
321 | */ | |
98e12b5a | 322 | 1: ldr r1, [r11, #0] @ relocate entries in the GOT |
1da177e4 LT |
323 | cmp r1, r2 @ entry < bss_start || |
324 | cmphs r3, r1 @ _end < entry | |
325 | addlo r1, r1, r0 @ table. This fixes up the | |
98e12b5a | 326 | str r1, [r11], #4 @ C references. |
6d7d0ae5 | 327 | cmp r11, r12 |
1da177e4 LT |
328 | blo 1b |
329 | #endif | |
330 | ||
331 | not_relocated: mov r0, #0 | |
332 | 1: str r0, [r2], #4 @ clear bss | |
333 | str r0, [r2], #4 | |
334 | str r0, [r2], #4 | |
335 | str r0, [r2], #4 | |
336 | cmp r2, r3 | |
337 | blo 1b | |
338 | ||
1da177e4 | 339 | /* |
6d7d0ae5 NP |
340 | * The C runtime environment should now be setup sufficiently. |
341 | * Set up some pointers, and start decompressing. | |
342 | * r4 = kernel execution address | |
343 | * r7 = architecture ID | |
344 | * r8 = atags pointer | |
1da177e4 | 345 | */ |
6d7d0ae5 NP |
346 | mov r0, r4 |
347 | mov r1, sp @ malloc space above stack | |
348 | add r2, sp, #0x10000 @ 64k max | |
1da177e4 LT |
349 | mov r3, r7 |
350 | bl decompress_kernel | |
1da177e4 | 351 | bl cache_clean_flush |
6d7d0ae5 NP |
352 | bl cache_off |
353 | mov r0, #0 @ must be zero | |
354 | mov r1, r7 @ restore architecture number | |
355 | mov r2, r8 @ restore atags pointer | |
356 | mov pc, r4 @ call kernel | |
1da177e4 | 357 | |
88987ef9 | 358 | .align 2 |
1da177e4 LT |
359 | .type LC0, #object |
360 | LC0: .word LC0 @ r1 | |
361 | .word __bss_start @ r2 | |
362 | .word _end @ r3 | |
6d7d0ae5 | 363 | .word _edata @ r6 |
34cc1a8f | 364 | .word input_data_end - 4 @ r10 (inflated size location) |
98e12b5a | 365 | .word _got_start @ r11 |
1da177e4 | 366 | .word _got_end @ ip |
8d7e4cc2 | 367 | .word .L_user_stack_end @ sp |
1da177e4 LT |
368 | .size LC0, . - LC0 |
369 | ||
370 | #ifdef CONFIG_ARCH_RPC | |
371 | .globl params | |
db7b2b4b | 372 | params: ldr r0, =0x10000100 @ params_phys for RPC |
1da177e4 LT |
373 | mov pc, lr |
374 | .ltorg | |
375 | .align | |
376 | #endif | |
377 | ||
378 | /* | |
379 | * Turn on the cache. We need to setup some page tables so that we | |
380 | * can have both the I and D caches on. | |
381 | * | |
382 | * We place the page tables 16k down from the kernel execution address, | |
383 | * and we hope that nothing else is using it. If we're using it, we | |
384 | * will go pop! | |
385 | * | |
386 | * On entry, | |
387 | * r4 = kernel execution address | |
1da177e4 | 388 | * r7 = architecture number |
f4619025 | 389 | * r8 = atags pointer |
1da177e4 | 390 | * On exit, |
21b2841d | 391 | * r0, r1, r2, r3, r9, r10, r12 corrupted |
1da177e4 | 392 | * This routine must preserve: |
6d7d0ae5 | 393 | * r4, r7, r8 |
1da177e4 LT |
394 | */ |
395 | .align 5 | |
396 | cache_on: mov r3, #8 @ cache_on function | |
397 | b call_cache_fn | |
398 | ||
10c2df65 HC |
399 | /* |
400 | * Initialize the highest priority protection region, PR7 | |
401 | * to cover all 32bit address and cacheable and bufferable. | |
402 | */ | |
403 | __armv4_mpu_cache_on: | |
404 | mov r0, #0x3f @ 4G, the whole | |
405 | mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting | |
406 | mcr p15, 0, r0, c6, c7, 1 | |
407 | ||
408 | mov r0, #0x80 @ PR7 | |
409 | mcr p15, 0, r0, c2, c0, 0 @ D-cache on | |
410 | mcr p15, 0, r0, c2, c0, 1 @ I-cache on | |
411 | mcr p15, 0, r0, c3, c0, 0 @ write-buffer on | |
412 | ||
413 | mov r0, #0xc000 | |
414 | mcr p15, 0, r0, c5, c0, 1 @ I-access permission | |
415 | mcr p15, 0, r0, c5, c0, 0 @ D-access permission | |
416 | ||
417 | mov r0, #0 | |
418 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | |
419 | mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache | |
420 | mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache | |
421 | mrc p15, 0, r0, c1, c0, 0 @ read control reg | |
422 | @ ...I .... ..D. WC.M | |
423 | orr r0, r0, #0x002d @ .... .... ..1. 11.1 | |
424 | orr r0, r0, #0x1000 @ ...1 .... .... .... | |
425 | ||
426 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | |
427 | ||
428 | mov r0, #0 | |
429 | mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache | |
430 | mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache | |
431 | mov pc, lr | |
432 | ||
433 | __armv3_mpu_cache_on: | |
434 | mov r0, #0x3f @ 4G, the whole | |
435 | mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting | |
436 | ||
437 | mov r0, #0x80 @ PR7 | |
438 | mcr p15, 0, r0, c2, c0, 0 @ cache on | |
439 | mcr p15, 0, r0, c3, c0, 0 @ write-buffer on | |
440 | ||
441 | mov r0, #0xc000 | |
442 | mcr p15, 0, r0, c5, c0, 0 @ access permission | |
443 | ||
444 | mov r0, #0 | |
445 | mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 | |
4a8d57a5 UKK |
446 | /* |
447 | * ?? ARMv3 MMU does not allow reading the control register, | |
448 | * does this really work on ARMv3 MPU? | |
449 | */ | |
10c2df65 HC |
450 | mrc p15, 0, r0, c1, c0, 0 @ read control reg |
451 | @ .... .... .... WC.M | |
452 | orr r0, r0, #0x000d @ .... .... .... 11.1 | |
4a8d57a5 | 453 | /* ?? this overwrites the value constructed above? */ |
10c2df65 HC |
454 | mov r0, #0 |
455 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | |
456 | ||
4a8d57a5 | 457 | /* ?? invalidate for the second time? */ |
10c2df65 HC |
458 | mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 |
459 | mov pc, lr | |
460 | ||
1da177e4 LT |
461 | __setup_mmu: sub r3, r4, #16384 @ Page directory size |
462 | bic r3, r3, #0xff @ Align the pointer | |
463 | bic r3, r3, #0x3f00 | |
464 | /* | |
465 | * Initialise the page tables, turning on the cacheable and bufferable | |
466 | * bits for the RAM area only. | |
467 | */ | |
468 | mov r0, r3 | |
f4619025 RK |
469 | mov r9, r0, lsr #18 |
470 | mov r9, r9, lsl #18 @ start of RAM | |
471 | add r10, r9, #0x10000000 @ a reasonable RAM size | |
1da177e4 LT |
472 | mov r1, #0x12 |
473 | orr r1, r1, #3 << 10 | |
474 | add r2, r3, #16384 | |
265d5e48 | 475 | 1: cmp r1, r9 @ if virt > start of RAM |
af3e4fd3 MG |
476 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH |
477 | orrhs r1, r1, #0x08 @ set cacheable | |
478 | #else | |
1da177e4 | 479 | orrhs r1, r1, #0x0c @ set cacheable, bufferable |
af3e4fd3 | 480 | #endif |
f4619025 | 481 | cmp r1, r10 @ if virt > end of RAM |
1da177e4 LT |
482 | bichs r1, r1, #0x0c @ clear cacheable, bufferable |
483 | str r1, [r0], #4 @ 1:1 mapping | |
484 | add r1, r1, #1048576 | |
485 | teq r0, r2 | |
486 | bne 1b | |
487 | /* | |
488 | * If ever we are running from Flash, then we surely want the cache | |
489 | * to be enabled also for our execution instance... We map 2MB of it | |
490 | * so there is no map overlap problem for up to 1 MB compressed kernel. | |
491 | * If the execution is in RAM then we would only be duplicating the above. | |
492 | */ | |
493 | mov r1, #0x1e | |
494 | orr r1, r1, #3 << 10 | |
bfa64c4a DM |
495 | mov r2, pc |
496 | mov r2, r2, lsr #20 | |
1da177e4 LT |
497 | orr r1, r1, r2, lsl #20 |
498 | add r0, r3, r2, lsl #2 | |
499 | str r1, [r0], #4 | |
500 | add r1, r1, #1048576 | |
501 | str r1, [r0] | |
502 | mov pc, lr | |
93ed3970 | 503 | ENDPROC(__setup_mmu) |
1da177e4 | 504 | |
af3e4fd3 MG |
505 | __arm926ejs_mmu_cache_on: |
506 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH | |
507 | mov r0, #4 @ put dcache in WT mode | |
508 | mcr p15, 7, r0, c15, c0, 0 | |
509 | #endif | |
510 | ||
c76b6b41 | 511 | __armv4_mmu_cache_on: |
1da177e4 | 512 | mov r12, lr |
8bdca0ac | 513 | #ifdef CONFIG_MMU |
1da177e4 LT |
514 | bl __setup_mmu |
515 | mov r0, #0 | |
516 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | |
517 | mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs | |
518 | mrc p15, 0, r0, c1, c0, 0 @ read control reg | |
519 | orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement | |
520 | orr r0, r0, #0x0030 | |
26584853 CM |
521 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
522 | orr r0, r0, #1 << 25 @ big-endian page tables | |
523 | #endif | |
c76b6b41 | 524 | bl __common_mmu_cache_on |
1da177e4 LT |
525 | mov r0, #0 |
526 | mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs | |
8bdca0ac | 527 | #endif |
1da177e4 LT |
528 | mov pc, r12 |
529 | ||
7d09e854 CM |
530 | __armv7_mmu_cache_on: |
531 | mov r12, lr | |
8bdca0ac | 532 | #ifdef CONFIG_MMU |
7d09e854 CM |
533 | mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 |
534 | tst r11, #0xf @ VMSA | |
535 | blne __setup_mmu | |
536 | mov r0, #0 | |
537 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | |
538 | tst r11, #0xf @ VMSA | |
539 | mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs | |
8bdca0ac | 540 | #endif |
7d09e854 CM |
541 | mrc p15, 0, r0, c1, c0, 0 @ read control reg |
542 | orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement | |
543 | orr r0, r0, #0x003c @ write buffer | |
8bdca0ac | 544 | #ifdef CONFIG_MMU |
26584853 CM |
545 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
546 | orr r0, r0, #1 << 25 @ big-endian page tables | |
547 | #endif | |
7d09e854 CM |
548 | orrne r0, r0, #1 @ MMU enabled |
549 | movne r1, #-1 | |
550 | mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer | |
551 | mcrne p15, 0, r1, c3, c0, 0 @ load domain access control | |
8bdca0ac | 552 | #endif |
7d09e854 CM |
553 | mcr p15, 0, r0, c1, c0, 0 @ load control register |
554 | mrc p15, 0, r0, c1, c0, 0 @ and read it back | |
555 | mov r0, #0 | |
556 | mcr p15, 0, r0, c7, c5, 4 @ ISB | |
557 | mov pc, r12 | |
558 | ||
28853ac8 PZ |
559 | __fa526_cache_on: |
560 | mov r12, lr | |
561 | bl __setup_mmu | |
562 | mov r0, #0 | |
563 | mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache | |
564 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | |
565 | mcr p15, 0, r0, c8, c7, 0 @ flush UTLB | |
566 | mrc p15, 0, r0, c1, c0, 0 @ read control reg | |
567 | orr r0, r0, #0x1000 @ I-cache enable | |
568 | bl __common_mmu_cache_on | |
569 | mov r0, #0 | |
570 | mcr p15, 0, r0, c8, c7, 0 @ flush UTLB | |
571 | mov pc, r12 | |
572 | ||
c76b6b41 | 573 | __arm6_mmu_cache_on: |
1da177e4 LT |
574 | mov r12, lr |
575 | bl __setup_mmu | |
576 | mov r0, #0 | |
577 | mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 | |
578 | mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3 | |
579 | mov r0, #0x30 | |
c76b6b41 | 580 | bl __common_mmu_cache_on |
1da177e4 LT |
581 | mov r0, #0 |
582 | mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3 | |
583 | mov pc, r12 | |
584 | ||
c76b6b41 | 585 | __common_mmu_cache_on: |
0e056f20 | 586 | #ifndef CONFIG_THUMB2_KERNEL |
1da177e4 LT |
587 | #ifndef DEBUG |
588 | orr r0, r0, #0x000d @ Write buffer, mmu | |
589 | #endif | |
590 | mov r1, #-1 | |
591 | mcr p15, 0, r3, c2, c0, 0 @ load page table pointer | |
592 | mcr p15, 0, r1, c3, c0, 0 @ load domain access control | |
2dc7667b NP |
593 | b 1f |
594 | .align 5 @ cache line aligned | |
595 | 1: mcr p15, 0, r0, c1, c0, 0 @ load control register | |
596 | mrc p15, 0, r0, c1, c0, 0 @ and read it back to | |
597 | sub pc, lr, r0, lsr #32 @ properly flush pipeline | |
0e056f20 | 598 | #endif |
1da177e4 | 599 | |
1da177e4 LT |
600 | /* |
601 | * Here follow the relocatable cache support functions for the | |
602 | * various processors. This is a generic hook for locating an | |
603 | * entry and jumping to an instruction at the specified offset | |
604 | * from the start of the block. Please note this is all position | |
605 | * independent code. | |
606 | * | |
607 | * r1 = corrupted | |
608 | * r2 = corrupted | |
609 | * r3 = block offset | |
98e12b5a | 610 | * r9 = corrupted |
1da177e4 LT |
611 | * r12 = corrupted |
612 | */ | |
613 | ||
614 | call_cache_fn: adr r12, proc_types | |
f12d0d7c | 615 | #ifdef CONFIG_CPU_CP15 |
98e12b5a | 616 | mrc p15, 0, r9, c0, c0 @ get processor ID |
f12d0d7c | 617 | #else |
98e12b5a | 618 | ldr r9, =CONFIG_PROCESSOR_ID |
f12d0d7c | 619 | #endif |
1da177e4 LT |
620 | 1: ldr r1, [r12, #0] @ get value |
621 | ldr r2, [r12, #4] @ get mask | |
98e12b5a | 622 | eor r1, r1, r9 @ (real ^ match) |
1da177e4 | 623 | tst r1, r2 @ & mask |
0e056f20 CM |
624 | ARM( addeq pc, r12, r3 ) @ call cache function |
625 | THUMB( addeq r12, r3 ) | |
626 | THUMB( moveq pc, r12 ) @ call cache function | |
1da177e4 LT |
627 | add r12, r12, #4*5 |
628 | b 1b | |
629 | ||
630 | /* | |
631 | * Table for cache operations. This is basically: | |
632 | * - CPU ID match | |
633 | * - CPU ID mask | |
634 | * - 'cache on' method instruction | |
635 | * - 'cache off' method instruction | |
636 | * - 'cache flush' method instruction | |
637 | * | |
638 | * We match an entry using: ((real_id ^ match) & mask) == 0 | |
639 | * | |
640 | * Writethrough caches generally only need 'on' and 'off' | |
641 | * methods. Writeback caches _must_ have the flush method | |
642 | * defined. | |
643 | */ | |
88987ef9 | 644 | .align 2 |
1da177e4 LT |
645 | .type proc_types,#object |
646 | proc_types: | |
647 | .word 0x41560600 @ ARM6/610 | |
648 | .word 0xffffffe0 | |
0e056f20 CM |
649 | W(b) __arm6_mmu_cache_off @ works, but slow |
650 | W(b) __arm6_mmu_cache_off | |
1da177e4 | 651 | mov pc, lr |
0e056f20 | 652 | THUMB( nop ) |
c76b6b41 HC |
653 | @ b __arm6_mmu_cache_on @ untested |
654 | @ b __arm6_mmu_cache_off | |
655 | @ b __armv3_mmu_cache_flush | |
1da177e4 LT |
656 | |
657 | .word 0x00000000 @ old ARM ID | |
658 | .word 0x0000f000 | |
659 | mov pc, lr | |
0e056f20 | 660 | THUMB( nop ) |
1da177e4 | 661 | mov pc, lr |
0e056f20 | 662 | THUMB( nop ) |
1da177e4 | 663 | mov pc, lr |
0e056f20 | 664 | THUMB( nop ) |
1da177e4 LT |
665 | |
666 | .word 0x41007000 @ ARM7/710 | |
667 | .word 0xfff8fe00 | |
0e056f20 CM |
668 | W(b) __arm7_mmu_cache_off |
669 | W(b) __arm7_mmu_cache_off | |
1da177e4 | 670 | mov pc, lr |
0e056f20 | 671 | THUMB( nop ) |
1da177e4 LT |
672 | |
673 | .word 0x41807200 @ ARM720T (writethrough) | |
674 | .word 0xffffff00 | |
0e056f20 CM |
675 | W(b) __armv4_mmu_cache_on |
676 | W(b) __armv4_mmu_cache_off | |
1da177e4 | 677 | mov pc, lr |
0e056f20 | 678 | THUMB( nop ) |
1da177e4 | 679 | |
10c2df65 HC |
680 | .word 0x41007400 @ ARM74x |
681 | .word 0xff00ff00 | |
0e056f20 CM |
682 | W(b) __armv3_mpu_cache_on |
683 | W(b) __armv3_mpu_cache_off | |
684 | W(b) __armv3_mpu_cache_flush | |
10c2df65 HC |
685 | |
686 | .word 0x41009400 @ ARM94x | |
687 | .word 0xff00ff00 | |
0e056f20 CM |
688 | W(b) __armv4_mpu_cache_on |
689 | W(b) __armv4_mpu_cache_off | |
690 | W(b) __armv4_mpu_cache_flush | |
10c2df65 | 691 | |
af3e4fd3 MG |
692 | .word 0x41069260 @ ARM926EJ-S (v5TEJ) |
693 | .word 0xff0ffff0 | |
694 | b __arm926ejs_mmu_cache_on | |
695 | b __armv4_mmu_cache_off | |
696 | b __armv5tej_mmu_cache_flush | |
10c2df65 | 697 | |
1da177e4 LT |
698 | .word 0x00007000 @ ARM7 IDs |
699 | .word 0x0000f000 | |
700 | mov pc, lr | |
0e056f20 | 701 | THUMB( nop ) |
1da177e4 | 702 | mov pc, lr |
0e056f20 | 703 | THUMB( nop ) |
1da177e4 | 704 | mov pc, lr |
0e056f20 | 705 | THUMB( nop ) |
1da177e4 LT |
706 | |
707 | @ Everything from here on will be the new ID system. | |
708 | ||
709 | .word 0x4401a100 @ sa110 / sa1100 | |
710 | .word 0xffffffe0 | |
0e056f20 CM |
711 | W(b) __armv4_mmu_cache_on |
712 | W(b) __armv4_mmu_cache_off | |
713 | W(b) __armv4_mmu_cache_flush | |
1da177e4 LT |
714 | |
715 | .word 0x6901b110 @ sa1110 | |
716 | .word 0xfffffff0 | |
0e056f20 CM |
717 | W(b) __armv4_mmu_cache_on |
718 | W(b) __armv4_mmu_cache_off | |
719 | W(b) __armv4_mmu_cache_flush | |
1da177e4 | 720 | |
4157d317 HZ |
721 | .word 0x56056900 |
722 | .word 0xffffff00 @ PXA9xx | |
0e056f20 CM |
723 | W(b) __armv4_mmu_cache_on |
724 | W(b) __armv4_mmu_cache_off | |
725 | W(b) __armv4_mmu_cache_flush | |
49cbe786 EM |
726 | |
727 | .word 0x56158000 @ PXA168 | |
728 | .word 0xfffff000 | |
0e056f20 CM |
729 | W(b) __armv4_mmu_cache_on |
730 | W(b) __armv4_mmu_cache_off | |
731 | W(b) __armv5tej_mmu_cache_flush | |
49cbe786 | 732 | |
2e2023fe NP |
733 | .word 0x56050000 @ Feroceon |
734 | .word 0xff0f0000 | |
0e056f20 CM |
735 | W(b) __armv4_mmu_cache_on |
736 | W(b) __armv4_mmu_cache_off | |
737 | W(b) __armv5tej_mmu_cache_flush | |
3ebb5a2b | 738 | |
5587931c JS |
739 | #ifdef CONFIG_CPU_FEROCEON_OLD_ID |
740 | /* this conflicts with the standard ARMv5TE entry */ | |
741 | .long 0x41009260 @ Old Feroceon | |
742 | .long 0xff00fff0 | |
743 | b __armv4_mmu_cache_on | |
744 | b __armv4_mmu_cache_off | |
745 | b __armv5tej_mmu_cache_flush | |
746 | #endif | |
747 | ||
28853ac8 PZ |
748 | .word 0x66015261 @ FA526 |
749 | .word 0xff01fff1 | |
0e056f20 CM |
750 | W(b) __fa526_cache_on |
751 | W(b) __armv4_mmu_cache_off | |
752 | W(b) __fa526_cache_flush | |
28853ac8 | 753 | |
1da177e4 LT |
754 | @ These match on the architecture ID |
755 | ||
756 | .word 0x00020000 @ ARMv4T | |
757 | .word 0x000f0000 | |
0e056f20 CM |
758 | W(b) __armv4_mmu_cache_on |
759 | W(b) __armv4_mmu_cache_off | |
760 | W(b) __armv4_mmu_cache_flush | |
1da177e4 LT |
761 | |
762 | .word 0x00050000 @ ARMv5TE | |
763 | .word 0x000f0000 | |
0e056f20 CM |
764 | W(b) __armv4_mmu_cache_on |
765 | W(b) __armv4_mmu_cache_off | |
766 | W(b) __armv4_mmu_cache_flush | |
1da177e4 LT |
767 | |
768 | .word 0x00060000 @ ARMv5TEJ | |
769 | .word 0x000f0000 | |
0e056f20 CM |
770 | W(b) __armv4_mmu_cache_on |
771 | W(b) __armv4_mmu_cache_off | |
75216859 | 772 | W(b) __armv5tej_mmu_cache_flush |
1da177e4 | 773 | |
45a7b9cf | 774 | .word 0x0007b000 @ ARMv6 |
7d09e854 | 775 | .word 0x000ff000 |
0e056f20 CM |
776 | W(b) __armv4_mmu_cache_on |
777 | W(b) __armv4_mmu_cache_off | |
778 | W(b) __armv6_mmu_cache_flush | |
1da177e4 | 779 | |
7d09e854 CM |
780 | .word 0x000f0000 @ new CPU Id |
781 | .word 0x000f0000 | |
0e056f20 CM |
782 | W(b) __armv7_mmu_cache_on |
783 | W(b) __armv7_mmu_cache_off | |
784 | W(b) __armv7_mmu_cache_flush | |
7d09e854 | 785 | |
1da177e4 LT |
786 | .word 0 @ unrecognised type |
787 | .word 0 | |
788 | mov pc, lr | |
0e056f20 | 789 | THUMB( nop ) |
1da177e4 | 790 | mov pc, lr |
0e056f20 | 791 | THUMB( nop ) |
1da177e4 | 792 | mov pc, lr |
0e056f20 | 793 | THUMB( nop ) |
1da177e4 LT |
794 | |
795 | .size proc_types, . - proc_types | |
796 | ||
797 | /* | |
798 | * Turn off the Cache and MMU. ARMv3 does not support | |
799 | * reading the control register, but ARMv4 does. | |
800 | * | |
21b2841d UKK |
801 | * On exit, |
802 | * r0, r1, r2, r3, r9, r12 corrupted | |
803 | * This routine must preserve: | |
6d7d0ae5 | 804 | * r4, r7, r8 |
1da177e4 LT |
805 | */ |
806 | .align 5 | |
807 | cache_off: mov r3, #12 @ cache_off function | |
808 | b call_cache_fn | |
809 | ||
10c2df65 HC |
810 | __armv4_mpu_cache_off: |
811 | mrc p15, 0, r0, c1, c0 | |
812 | bic r0, r0, #0x000d | |
813 | mcr p15, 0, r0, c1, c0 @ turn MPU and cache off | |
814 | mov r0, #0 | |
815 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | |
816 | mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache | |
817 | mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache | |
818 | mov pc, lr | |
819 | ||
820 | __armv3_mpu_cache_off: | |
821 | mrc p15, 0, r0, c1, c0 | |
822 | bic r0, r0, #0x000d | |
823 | mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off | |
824 | mov r0, #0 | |
825 | mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 | |
826 | mov pc, lr | |
827 | ||
c76b6b41 | 828 | __armv4_mmu_cache_off: |
8bdca0ac | 829 | #ifdef CONFIG_MMU |
1da177e4 LT |
830 | mrc p15, 0, r0, c1, c0 |
831 | bic r0, r0, #0x000d | |
832 | mcr p15, 0, r0, c1, c0 @ turn MMU and cache off | |
833 | mov r0, #0 | |
834 | mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 | |
835 | mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 | |
8bdca0ac | 836 | #endif |
1da177e4 LT |
837 | mov pc, lr |
838 | ||
7d09e854 CM |
839 | __armv7_mmu_cache_off: |
840 | mrc p15, 0, r0, c1, c0 | |
8bdca0ac | 841 | #ifdef CONFIG_MMU |
7d09e854 | 842 | bic r0, r0, #0x000d |
8bdca0ac CM |
843 | #else |
844 | bic r0, r0, #0x000c | |
845 | #endif | |
7d09e854 CM |
846 | mcr p15, 0, r0, c1, c0 @ turn MMU and cache off |
847 | mov r12, lr | |
848 | bl __armv7_mmu_cache_flush | |
849 | mov r0, #0 | |
8bdca0ac | 850 | #ifdef CONFIG_MMU |
7d09e854 | 851 | mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB |
8bdca0ac | 852 | #endif |
c30c2f99 CM |
853 | mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC |
854 | mcr p15, 0, r0, c7, c10, 4 @ DSB | |
855 | mcr p15, 0, r0, c7, c5, 4 @ ISB | |
7d09e854 CM |
856 | mov pc, r12 |
857 | ||
c76b6b41 | 858 | __arm6_mmu_cache_off: |
1da177e4 | 859 | mov r0, #0x00000030 @ ARM6 control reg. |
c76b6b41 | 860 | b __armv3_mmu_cache_off |
1da177e4 | 861 | |
c76b6b41 | 862 | __arm7_mmu_cache_off: |
1da177e4 | 863 | mov r0, #0x00000070 @ ARM7 control reg. |
c76b6b41 | 864 | b __armv3_mmu_cache_off |
1da177e4 | 865 | |
c76b6b41 | 866 | __armv3_mmu_cache_off: |
1da177e4 LT |
867 | mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off |
868 | mov r0, #0 | |
869 | mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 | |
870 | mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3 | |
871 | mov pc, lr | |
872 | ||
873 | /* | |
874 | * Clean and flush the cache to maintain consistency. | |
875 | * | |
1da177e4 | 876 | * On exit, |
21b2841d | 877 | * r1, r2, r3, r9, r10, r11, r12 corrupted |
1da177e4 | 878 | * This routine must preserve: |
6d7d0ae5 | 879 | * r4, r6, r7, r8 |
1da177e4 LT |
880 | */ |
881 | .align 5 | |
882 | cache_clean_flush: | |
883 | mov r3, #16 | |
884 | b call_cache_fn | |
885 | ||
10c2df65 HC |
886 | __armv4_mpu_cache_flush: |
887 | mov r2, #1 | |
888 | mov r3, #0 | |
889 | mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache | |
890 | mov r1, #7 << 5 @ 8 segments | |
891 | 1: orr r3, r1, #63 << 26 @ 64 entries | |
892 | 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index | |
893 | subs r3, r3, #1 << 26 | |
894 | bcs 2b @ entries 63 to 0 | |
895 | subs r1, r1, #1 << 5 | |
896 | bcs 1b @ segments 7 to 0 | |
897 | ||
898 | teq r2, #0 | |
899 | mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache | |
900 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | |
901 | mov pc, lr | |
902 | ||
28853ac8 PZ |
903 | __fa526_cache_flush: |
904 | mov r1, #0 | |
905 | mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache | |
906 | mcr p15, 0, r1, c7, c5, 0 @ flush I cache | |
907 | mcr p15, 0, r1, c7, c10, 4 @ drain WB | |
908 | mov pc, lr | |
10c2df65 | 909 | |
c76b6b41 | 910 | __armv6_mmu_cache_flush: |
1da177e4 LT |
911 | mov r1, #0 |
912 | mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D | |
913 | mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB | |
914 | mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified | |
915 | mcr p15, 0, r1, c7, c10, 4 @ drain WB | |
916 | mov pc, lr | |
917 | ||
7d09e854 CM |
918 | __armv7_mmu_cache_flush: |
919 | mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 | |
920 | tst r10, #0xf << 16 @ hierarchical cache (ARMv7) | |
7d09e854 | 921 | mov r10, #0 |
c30c2f99 | 922 | beq hierarchical |
7d09e854 CM |
923 | mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D |
924 | b iflush | |
925 | hierarchical: | |
c30c2f99 | 926 | mcr p15, 0, r10, c7, c10, 5 @ DMB |
0e056f20 | 927 | stmfd sp!, {r0-r7, r9-r11} |
7d09e854 CM |
928 | mrc p15, 1, r0, c0, c0, 1 @ read clidr |
929 | ands r3, r0, #0x7000000 @ extract loc from clidr | |
930 | mov r3, r3, lsr #23 @ left align loc bit field | |
931 | beq finished @ if loc is 0, then no need to clean | |
932 | mov r10, #0 @ start clean at cache level 0 | |
933 | loop1: | |
934 | add r2, r10, r10, lsr #1 @ work out 3x current cache level | |
935 | mov r1, r0, lsr r2 @ extract cache type bits from clidr | |
936 | and r1, r1, #7 @ mask of the bits for current cache only | |
937 | cmp r1, #2 @ see what cache we have at this level | |
938 | blt skip @ skip if no cache, or just i-cache | |
939 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr | |
940 | mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr | |
941 | mrc p15, 1, r1, c0, c0, 0 @ read the new csidr | |
942 | and r2, r1, #7 @ extract the length of the cache lines | |
943 | add r2, r2, #4 @ add 4 (line length offset) | |
944 | ldr r4, =0x3ff | |
945 | ands r4, r4, r1, lsr #3 @ find maximum number on the way size | |
000b5025 | 946 | clz r5, r4 @ find bit position of way size increment |
7d09e854 CM |
947 | ldr r7, =0x7fff |
948 | ands r7, r7, r1, lsr #13 @ extract max number of the index size | |
949 | loop2: | |
950 | mov r9, r4 @ create working copy of max way size | |
951 | loop3: | |
0e056f20 CM |
952 | ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 |
953 | ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 | |
954 | THUMB( lsl r6, r9, r5 ) | |
955 | THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 | |
956 | THUMB( lsl r6, r7, r2 ) | |
957 | THUMB( orr r11, r11, r6 ) @ factor index number into r11 | |
7d09e854 CM |
958 | mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way |
959 | subs r9, r9, #1 @ decrement the way | |
960 | bge loop3 | |
961 | subs r7, r7, #1 @ decrement the index | |
962 | bge loop2 | |
963 | skip: | |
964 | add r10, r10, #2 @ increment cache number | |
965 | cmp r3, r10 | |
966 | bgt loop1 | |
967 | finished: | |
0e056f20 | 968 | ldmfd sp!, {r0-r7, r9-r11} |
7d09e854 CM |
969 | mov r10, #0 @ swith back to cache level 0 |
970 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr | |
7d09e854 | 971 | iflush: |
c30c2f99 | 972 | mcr p15, 0, r10, c7, c10, 4 @ DSB |
7d09e854 | 973 | mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB |
c30c2f99 CM |
974 | mcr p15, 0, r10, c7, c10, 4 @ DSB |
975 | mcr p15, 0, r10, c7, c5, 4 @ ISB | |
7d09e854 CM |
976 | mov pc, lr |
977 | ||
15754bf9 NP |
978 | __armv5tej_mmu_cache_flush: |
979 | 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache | |
980 | bne 1b | |
981 | mcr p15, 0, r0, c7, c5, 0 @ flush I cache | |
982 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | |
983 | mov pc, lr | |
984 | ||
c76b6b41 | 985 | __armv4_mmu_cache_flush: |
1da177e4 LT |
986 | mov r2, #64*1024 @ default: 32K dcache size (*2) |
987 | mov r11, #32 @ default: 32 byte line size | |
988 | mrc p15, 0, r3, c0, c0, 1 @ read cache type | |
98e12b5a | 989 | teq r3, r9 @ cache ID register present? |
1da177e4 LT |
990 | beq no_cache_id |
991 | mov r1, r3, lsr #18 | |
992 | and r1, r1, #7 | |
993 | mov r2, #1024 | |
994 | mov r2, r2, lsl r1 @ base dcache size *2 | |
995 | tst r3, #1 << 14 @ test M bit | |
996 | addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1 | |
997 | mov r3, r3, lsr #12 | |
998 | and r3, r3, #3 | |
999 | mov r11, #8 | |
1000 | mov r11, r11, lsl r3 @ cache line size in bytes | |
1001 | no_cache_id: | |
0e056f20 CM |
1002 | mov r1, pc |
1003 | bic r1, r1, #63 @ align to longest cache line | |
1da177e4 | 1004 | add r2, r1, r2 |
0e056f20 CM |
1005 | 1: |
1006 | ARM( ldr r3, [r1], r11 ) @ s/w flush D cache | |
1007 | THUMB( ldr r3, [r1] ) @ s/w flush D cache | |
1008 | THUMB( add r1, r1, r11 ) | |
1da177e4 LT |
1009 | teq r1, r2 |
1010 | bne 1b | |
1011 | ||
1012 | mcr p15, 0, r1, c7, c5, 0 @ flush I cache | |
1013 | mcr p15, 0, r1, c7, c6, 0 @ flush D cache | |
1014 | mcr p15, 0, r1, c7, c10, 4 @ drain WB | |
1015 | mov pc, lr | |
1016 | ||
c76b6b41 | 1017 | __armv3_mmu_cache_flush: |
10c2df65 | 1018 | __armv3_mpu_cache_flush: |
1da177e4 | 1019 | mov r1, #0 |
63fa7187 | 1020 | mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 |
1da177e4 LT |
1021 | mov pc, lr |
1022 | ||
1023 | /* | |
1024 | * Various debugging routines for printing hex characters and | |
1025 | * memory, which again must be relocatable. | |
1026 | */ | |
1027 | #ifdef DEBUG | |
88987ef9 | 1028 | .align 2 |
1da177e4 LT |
1029 | .type phexbuf,#object |
1030 | phexbuf: .space 12 | |
1031 | .size phexbuf, . - phexbuf | |
1032 | ||
be6f9f00 | 1033 | @ phex corrupts {r0, r1, r2, r3} |
1da177e4 LT |
1034 | phex: adr r3, phexbuf |
1035 | mov r2, #0 | |
1036 | strb r2, [r3, r1] | |
1037 | 1: subs r1, r1, #1 | |
1038 | movmi r0, r3 | |
1039 | bmi puts | |
1040 | and r2, r0, #15 | |
1041 | mov r0, r0, lsr #4 | |
1042 | cmp r2, #10 | |
1043 | addge r2, r2, #7 | |
1044 | add r2, r2, #'0' | |
1045 | strb r2, [r3, r1] | |
1046 | b 1b | |
1047 | ||
be6f9f00 | 1048 | @ puts corrupts {r0, r1, r2, r3} |
4e6d488a | 1049 | puts: loadsp r3, r1 |
1da177e4 LT |
1050 | 1: ldrb r2, [r0], #1 |
1051 | teq r2, #0 | |
1052 | moveq pc, lr | |
5cd0c344 | 1053 | 2: writeb r2, r3 |
1da177e4 LT |
1054 | mov r1, #0x00020000 |
1055 | 3: subs r1, r1, #1 | |
1056 | bne 3b | |
1057 | teq r2, #'\n' | |
1058 | moveq r2, #'\r' | |
1059 | beq 2b | |
1060 | teq r0, #0 | |
1061 | bne 1b | |
1062 | mov pc, lr | |
be6f9f00 | 1063 | @ putc corrupts {r0, r1, r2, r3} |
1da177e4 LT |
1064 | putc: |
1065 | mov r2, r0 | |
1066 | mov r0, #0 | |
4e6d488a | 1067 | loadsp r3, r1 |
1da177e4 LT |
1068 | b 2b |
1069 | ||
be6f9f00 | 1070 | @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} |
1da177e4 LT |
1071 | memdump: mov r12, r0 |
1072 | mov r10, lr | |
1073 | mov r11, #0 | |
1074 | 2: mov r0, r11, lsl #2 | |
1075 | add r0, r0, r12 | |
1076 | mov r1, #8 | |
1077 | bl phex | |
1078 | mov r0, #':' | |
1079 | bl putc | |
1080 | 1: mov r0, #' ' | |
1081 | bl putc | |
1082 | ldr r0, [r12, r11, lsl #2] | |
1083 | mov r1, #8 | |
1084 | bl phex | |
1085 | and r0, r11, #7 | |
1086 | teq r0, #3 | |
1087 | moveq r0, #' ' | |
1088 | bleq putc | |
1089 | and r0, r11, #7 | |
1090 | add r11, r11, #1 | |
1091 | teq r0, #7 | |
1092 | bne 1b | |
1093 | mov r0, #'\n' | |
1094 | bl putc | |
1095 | cmp r11, #64 | |
1096 | blt 2b | |
1097 | mov pc, r10 | |
1098 | #endif | |
1099 | ||
92c83ff1 | 1100 | .ltorg |
adcc2591 | 1101 | reloc_code_end: |
1da177e4 LT |
1102 | |
1103 | .align | |
b0c4d4ee | 1104 | .section ".stack", "aw", %nobits |
8d7e4cc2 NP |
1105 | .L_user_stack: .space 4096 |
1106 | .L_user_stack_end: |