Merge tag 'regmap-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[deliverable/linux.git] / arch / powerpc / kernel / misc_32.S
CommitLineData
9994a338
PM
1/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
3d1229d6
ME
8 * kexec bits:
9 * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
10 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
674bfa48
SP
11 * PPC44x port. Copyright (C) 2011, IBM Corporation
12 * Author: Suzuki Poulose <suzuki@in.ibm.com>
3d1229d6 13 *
9994a338
PM
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
9994a338
PM
21#include <linux/sys.h>
22#include <asm/unistd.h>
23#include <asm/errno.h>
24#include <asm/reg.h>
25#include <asm/page.h>
26#include <asm/cache.h>
27#include <asm/cputable.h>
28#include <asm/mmu.h>
29#include <asm/ppc_asm.h>
30#include <asm/thread_info.h>
31#include <asm/asm-offsets.h>
3d1229d6
ME
32#include <asm/processor.h>
33#include <asm/kexec.h>
f048aace 34#include <asm/bug.h>
46f52210 35#include <asm/ptrace.h>
9994a338
PM
36
37 .text
38
cbc9565e
BH
39/*
40 * We store the saved ksp_limit in the unused part
41 * of the STACK_FRAME_OVERHEAD
42 */
85218827
KG
43_GLOBAL(call_do_softirq)
44 mflr r0
45 stw r0,4(r1)
cbc9565e
BH
46 lwz r10,THREAD+KSP_LIMIT(r2)
47 addi r11,r3,THREAD_INFO_GAP
85218827
KG
48 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
49 mr r1,r3
cbc9565e
BH
50 stw r10,8(r1)
51 stw r11,THREAD+KSP_LIMIT(r2)
85218827 52 bl __do_softirq
cbc9565e 53 lwz r10,8(r1)
85218827
KG
54 lwz r1,0(r1)
55 lwz r0,4(r1)
cbc9565e 56 stw r10,THREAD+KSP_LIMIT(r2)
85218827
KG
57 mtlr r0
58 blr
59
1a18a664
KH
60/*
61 * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
62 */
0366a1c7 63_GLOBAL(call_do_irq)
85218827
KG
64 mflr r0
65 stw r0,4(r1)
cbc9565e 66 lwz r10,THREAD+KSP_LIMIT(r2)
1a18a664 67 addi r11,r4,THREAD_INFO_GAP
0366a1c7
BH
68 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
69 mr r1,r4
cbc9565e
BH
70 stw r10,8(r1)
71 stw r11,THREAD+KSP_LIMIT(r2)
0366a1c7 72 bl __do_irq
cbc9565e 73 lwz r10,8(r1)
85218827
KG
74 lwz r1,0(r1)
75 lwz r0,4(r1)
cbc9565e 76 stw r10,THREAD+KSP_LIMIT(r2)
85218827
KG
77 mtlr r0
78 blr
85218827 79
f2783c15
PM
80/*
81 * This returns the high 64 bits of the product of two 64-bit numbers.
82 */
83_GLOBAL(mulhdu)
84 cmpwi r6,0
85 cmpwi cr1,r3,0
86 mr r10,r4
87 mulhwu r4,r4,r5
88 beq 1f
89 mulhwu r0,r10,r6
90 mullw r7,r10,r5
91 addc r7,r0,r7
92 addze r4,r4
931: beqlr cr1 /* all done if high part of A is 0 */
f2783c15 94 mullw r9,r3,r5
737b01fc 95 mulhwu r10,r3,r5
f2783c15 96 beq 2f
737b01fc
CL
97 mullw r0,r3,r6
98 mulhwu r8,r3,r6
f2783c15
PM
99 addc r7,r0,r7
100 adde r4,r4,r8
737b01fc 101 addze r10,r10
f2783c15 1022: addc r4,r4,r9
737b01fc 103 addze r3,r10
f2783c15
PM
104 blr
105
9994a338
PM
106/*
107 * sub_reloc_offset(x) returns x - reloc_offset().
108 */
109_GLOBAL(sub_reloc_offset)
110 mflr r0
111 bl 1f
1121: mflr r5
113 lis r4,1b@ha
114 addi r4,r4,1b@l
115 subf r5,r4,r5
116 subf r3,r5,r3
117 mtlr r0
118 blr
119
120/*
121 * reloc_got2 runs through the .got2 section adding an offset
122 * to each entry.
123 */
124_GLOBAL(reloc_got2)
125 mflr r11
126 lis r7,__got2_start@ha
127 addi r7,r7,__got2_start@l
128 lis r8,__got2_end@ha
129 addi r8,r8,__got2_end@l
130 subf r8,r7,r8
131 srwi. r8,r8,2
132 beqlr
133 mtctr r8
134 bl 1f
1351: mflr r0
136 lis r4,1b@ha
137 addi r4,r4,1b@l
138 subf r0,r4,r0
139 add r7,r0,r7
1402: lwz r0,0(r7)
141 add r0,r0,r3
142 stw r0,0(r7)
143 addi r7,r7,4
144 bdnz 2b
145 mtlr r11
146 blr
147
9994a338
PM
148/*
149 * call_setup_cpu - call the setup_cpu function for this cpu
150 * r3 = data offset, r24 = cpu number
151 *
152 * Setup function is called with:
153 * r3 = data offset
154 * r4 = ptr to CPU spec (relocated)
155 */
156_GLOBAL(call_setup_cpu)
157 addis r4,r3,cur_cpu_spec@ha
158 addi r4,r4,cur_cpu_spec@l
159 lwz r4,0(r4)
160 add r4,r4,r3
161 lwz r5,CPU_SPEC_SETUP(r4)
b26f100d 162 cmpwi 0,r5,0
9994a338
PM
163 add r5,r5,r3
164 beqlr
165 mtctr r5
166 bctr
167
168#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
169
170/* This gets called by via-pmu.c to switch the PLL selection
171 * on 750fx CPU. This function should really be moved to some
172 * other place (as most of the cpufreq code in via-pmu
173 */
174_GLOBAL(low_choose_750fx_pll)
175 /* Clear MSR:EE */
176 mfmsr r7
177 rlwinm r0,r7,0,17,15
178 mtmsr r0
179
180 /* If switching to PLL1, disable HID0:BTIC */
181 cmplwi cr0,r3,0
182 beq 1f
183 mfspr r5,SPRN_HID0
184 rlwinm r5,r5,0,27,25
185 sync
186 mtspr SPRN_HID0,r5
187 isync
188 sync
189
1901:
191 /* Calc new HID1 value */
192 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
193 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
194 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
195 or r4,r4,r5
196 mtspr SPRN_HID1,r4
197
198 /* Store new HID1 image */
9778b696 199 CURRENT_THREAD_INFO(r6, r1)
9994a338
PM
200 lwz r6,TI_CPU(r6)
201 slwi r6,r6,2
202 addis r6,r6,nap_save_hid1@ha
203 stw r4,nap_save_hid1@l(r6)
204
205 /* If switching to PLL0, enable HID0:BTIC */
206 cmplwi cr0,r3,0
207 bne 1f
208 mfspr r5,SPRN_HID0
209 ori r5,r5,HID0_BTIC
210 sync
211 mtspr SPRN_HID0,r5
212 isync
213 sync
214
2151:
216 /* Return */
217 mtmsr r7
218 blr
219
220_GLOBAL(low_choose_7447a_dfs)
221 /* Clear MSR:EE */
222 mfmsr r7
223 rlwinm r0,r7,0,17,15
224 mtmsr r0
225
226 /* Calc new HID1 value */
227 mfspr r4,SPRN_HID1
228 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
229 sync
230 mtspr SPRN_HID1,r4
231 sync
232 isync
233
234 /* Return */
235 mtmsr r7
236 blr
237
238#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
239
240/*
241 * complement mask on the msr then "or" some values on.
242 * _nmask_and_or_msr(nmask, value_to_or)
243 */
244_GLOBAL(_nmask_and_or_msr)
245 mfmsr r0 /* Get current msr */
246 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
247 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
248 SYNC /* Some chip revs have problems here... */
249 mtmsr r0 /* Update machine state */
250 isync
251 blr /* Done */
252
9dae8afd
BH
253#ifdef CONFIG_40x
254
255/*
256 * Do an IO access in real mode
257 */
258_GLOBAL(real_readb)
259 mfmsr r7
260 ori r0,r7,MSR_DR
261 xori r0,r0,MSR_DR
262 sync
263 mtmsr r0
264 sync
265 isync
266 lbz r3,0(r3)
267 sync
268 mtmsr r7
269 sync
270 isync
271 blr
272
273 /*
274 * Do an IO access in real mode
275 */
276_GLOBAL(real_writeb)
277 mfmsr r7
278 ori r0,r7,MSR_DR
279 xori r0,r0,MSR_DR
280 sync
281 mtmsr r0
282 sync
283 isync
284 stb r3,0(r4)
285 sync
286 mtmsr r7
287 sync
288 isync
289 blr
290
291#endif /* CONFIG_40x */
9994a338 292
0ba3418b 293
9994a338
PM
294/*
295 * Flush instruction cache.
296 * This is a no-op on the 601.
297 */
766d45cb 298#ifndef CONFIG_PPC_8xx
9994a338 299_GLOBAL(flush_instruction_cache)
766d45cb 300#if defined(CONFIG_4xx)
9994a338
PM
301#ifdef CONFIG_403GCX
302 li r3, 512
303 mtctr r3
304 lis r4, KERNELBASE@h
3051: iccci 0, r4
306 addi r4, r4, 16
307 bdnz 1b
308#else
309 lis r3, KERNELBASE@h
310 iccci 0,r3
311#endif
312#elif CONFIG_FSL_BOOKE
313BEGIN_FTR_SECTION
314 mfspr r3,SPRN_L1CSR0
315 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
316 /* msync; isync recommended here */
317 mtspr SPRN_L1CSR0,r3
318 isync
319 blr
4508dc21 320END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
9994a338
PM
321 mfspr r3,SPRN_L1CSR1
322 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
323 mtspr SPRN_L1CSR1,r3
324#else
325 mfspr r3,SPRN_PVR
326 rlwinm r3,r3,16,16,31
327 cmpwi 0,r3,1
328 beqlr /* for 601, do nothing */
329 /* 603/604 processor - use invalidate-all bit in HID0 */
330 mfspr r3,SPRN_HID0
331 ori r3,r3,HID0_ICFI
332 mtspr SPRN_HID0,r3
766d45cb 333#endif /* CONFIG_4xx */
9994a338
PM
334 isync
335 blr
766d45cb 336#endif /* CONFIG_PPC_8xx */
9994a338
PM
337
338/*
339 * Write any modified data cache blocks out to memory
340 * and invalidate the corresponding instruction cache blocks.
341 * This is a no-op on the 601.
342 *
343 * flush_icache_range(unsigned long start, unsigned long stop)
344 */
3b04c300 345_KPROBE(flush_icache_range)
9994a338 346BEGIN_FTR_SECTION
0ce63670 347 PURGE_PREFETCHED_INS
9994a338 348 blr /* for 601, do nothing */
4508dc21 349END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
716fa91d 350 rlwinm r3,r3,0,0,31 - L1_CACHE_SHIFT
9994a338 351 subf r4,r3,r4
716fa91d 352 addi r4,r4,L1_CACHE_BYTES - 1
7dffb720 353 srwi. r4,r4,L1_CACHE_SHIFT
9994a338
PM
354 beqlr
355 mtctr r4
356 mr r6,r3
3571: dcbst 0,r3
7dffb720 358 addi r3,r3,L1_CACHE_BYTES
9994a338
PM
359 bdnz 1b
360 sync /* wait for dcbst's to get to ram */
14d75752 361#ifndef CONFIG_44x
9994a338
PM
362 mtctr r4
3632: icbi 0,r6
7dffb720 364 addi r6,r6,L1_CACHE_BYTES
9994a338 365 bdnz 2b
14d75752
JB
366#else
367 /* Flash invalidate on 44x because we are passed kmapped addresses and
368 this doesn't work for userspace pages due to the virtually tagged
369 icache. Sigh. */
370 iccci 0, r0
371#endif
9994a338
PM
372 sync /* additional sync needed on g4 */
373 isync
374 blr
9994a338
PM
375/*
376 * Flush a particular page from the data cache to RAM.
377 * Note: this is necessary because the instruction cache does *not*
378 * snoop from the data cache.
379 * This is a no-op on the 601 which has a unified cache.
380 *
381 * void __flush_dcache_icache(void *page)
382 */
383_GLOBAL(__flush_dcache_icache)
384BEGIN_FTR_SECTION
0ce63670 385 PURGE_PREFETCHED_INS
4508dc21
DG
386 blr
387END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
ca9153a3
IY
388 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
389 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
9994a338
PM
390 mtctr r4
391 mr r6,r3
3920: dcbst 0,r3 /* Write line to ram */
7dffb720 393 addi r3,r3,L1_CACHE_BYTES
9994a338
PM
394 bdnz 0b
395 sync
e7f75ad0 396#ifdef CONFIG_44x
b98ac05d
BH
397 /* We don't flush the icache on 44x. Those have a virtual icache
398 * and we don't have access to the virtual address here (it's
399 * not the page vaddr but where it's mapped in user space). The
400 * flushing of the icache on these is handled elsewhere, when
401 * a change in the address space occurs, before returning to
402 * user space
403 */
e7f75ad0
DK
404BEGIN_MMU_FTR_SECTION
405 blr
406END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
407#endif /* CONFIG_44x */
9994a338
PM
408 mtctr r4
4091: icbi 0,r6
7dffb720 410 addi r6,r6,L1_CACHE_BYTES
9994a338
PM
411 bdnz 1b
412 sync
413 isync
414 blr
415
e7f75ad0 416#ifndef CONFIG_BOOKE
9994a338
PM
417/*
418 * Flush a particular page from the data cache to RAM, identified
419 * by its physical address. We turn off the MMU so we can just use
420 * the physical address (this may be a highmem page without a kernel
421 * mapping).
422 *
423 * void __flush_dcache_icache_phys(unsigned long physaddr)
424 */
425_GLOBAL(__flush_dcache_icache_phys)
426BEGIN_FTR_SECTION
0ce63670 427 PURGE_PREFETCHED_INS
9994a338 428 blr /* for 601, do nothing */
4508dc21 429END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
9994a338
PM
430 mfmsr r10
431 rlwinm r0,r10,0,28,26 /* clear DR */
432 mtmsr r0
433 isync
ca9153a3
IY
434 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
435 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
9994a338
PM
436 mtctr r4
437 mr r6,r3
4380: dcbst 0,r3 /* Write line to ram */
7dffb720 439 addi r3,r3,L1_CACHE_BYTES
9994a338
PM
440 bdnz 0b
441 sync
442 mtctr r4
4431: icbi 0,r6
7dffb720 444 addi r6,r6,L1_CACHE_BYTES
9994a338
PM
445 bdnz 1b
446 sync
447 mtmsr r10 /* restore DR */
448 isync
449 blr
e7f75ad0 450#endif /* CONFIG_BOOKE */
9994a338 451
9994a338
PM
452/*
453 * Copy a whole page. We use the dcbz instruction on the destination
454 * to reduce memory traffic (it eliminates the unnecessary reads of
455 * the destination into cache). This requires that the destination
456 * is cacheable.
457 */
458#define COPY_16_BYTES \
459 lwz r6,4(r4); \
460 lwz r7,8(r4); \
461 lwz r8,12(r4); \
462 lwzu r9,16(r4); \
463 stw r6,4(r3); \
464 stw r7,8(r3); \
465 stw r8,12(r3); \
466 stwu r9,16(r3)
467
468_GLOBAL(copy_page)
469 addi r3,r3,-4
470 addi r4,r4,-4
471
9994a338
PM
472 li r5,4
473
474#if MAX_COPY_PREFETCH > 1
475 li r0,MAX_COPY_PREFETCH
476 li r11,4
477 mtctr r0
47811: dcbt r11,r4
7dffb720 479 addi r11,r11,L1_CACHE_BYTES
9994a338
PM
480 bdnz 11b
481#else /* MAX_COPY_PREFETCH == 1 */
482 dcbt r5,r4
7dffb720 483 li r11,L1_CACHE_BYTES+4
9994a338 484#endif /* MAX_COPY_PREFETCH */
ca9153a3 485 li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
9994a338
PM
486 crclr 4*cr0+eq
4872:
488 mtctr r0
4891:
490 dcbt r11,r4
491 dcbz r5,r3
492 COPY_16_BYTES
7dffb720 493#if L1_CACHE_BYTES >= 32
9994a338 494 COPY_16_BYTES
7dffb720 495#if L1_CACHE_BYTES >= 64
9994a338
PM
496 COPY_16_BYTES
497 COPY_16_BYTES
7dffb720 498#if L1_CACHE_BYTES >= 128
9994a338
PM
499 COPY_16_BYTES
500 COPY_16_BYTES
501 COPY_16_BYTES
502 COPY_16_BYTES
503#endif
504#endif
505#endif
506 bdnz 1b
507 beqlr
508 crnot 4*cr0+eq,4*cr0+eq
509 li r0,MAX_COPY_PREFETCH
510 li r11,4
511 b 2b
9994a338 512
9994a338
PM
513/*
514 * Extended precision shifts.
515 *
516 * Updated to be valid for shift counts from 0 to 63 inclusive.
517 * -- Gabriel
518 *
519 * R3/R4 has 64 bit value
520 * R5 has shift count
521 * result in R3/R4
522 *
523 * ashrdi3: arithmetic right shift (sign propagation)
524 * lshrdi3: logical right shift
525 * ashldi3: left shift
526 */
527_GLOBAL(__ashrdi3)
528 subfic r6,r5,32
529 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
530 addi r7,r5,32 # could be xori, or addi with -32
531 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
532 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
533 sraw r7,r3,r7 # t2 = MSW >> (count-32)
534 or r4,r4,r6 # LSW |= t1
535 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
536 sraw r3,r3,r5 # MSW = MSW >> count
537 or r4,r4,r7 # LSW |= t2
538 blr
539
540_GLOBAL(__ashldi3)
541 subfic r6,r5,32
542 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
543 addi r7,r5,32 # could be xori, or addi with -32
544 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
545 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
546 or r3,r3,r6 # MSW |= t1
547 slw r4,r4,r5 # LSW = LSW << count
548 or r3,r3,r7 # MSW |= t2
549 blr
550
551_GLOBAL(__lshrdi3)
552 subfic r6,r5,32
553 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
554 addi r7,r5,32 # could be xori, or addi with -32
555 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
556 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
557 or r4,r4,r6 # LSW |= t1
558 srw r3,r3,r5 # MSW = MSW >> count
559 or r4,r4,r7 # LSW |= t2
560 blr
561
41b93b23
BB
562/*
563 * 64-bit comparison: __cmpdi2(s64 a, s64 b)
564 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
565 */
566_GLOBAL(__cmpdi2)
567 cmpw r3,r5
568 li r3,1
569 bne 1f
570 cmplw r4,r6
571 beqlr
5721: li r3,0
573 bltlr
574 li r3,2
575 blr
95ff54f5
PM
576/*
577 * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
578 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
579 */
580_GLOBAL(__ucmpdi2)
581 cmplw r3,r5
582 li r3,1
583 bne 1f
584 cmplw r4,r6
585 beqlr
5861: li r3,0
587 bltlr
588 li r3,2
589 blr
590
ca9d7aea
DW
591_GLOBAL(__bswapdi2)
592 rotlwi r9,r4,8
593 rotlwi r10,r3,8
594 rlwimi r9,r4,24,0,7
595 rlwimi r10,r3,24,0,7
596 rlwimi r9,r4,24,16,23
597 rlwimi r10,r3,24,16,23
598 mr r3,r9
599 mr r4,r10
600 blr
601
69e3cea8
BH
602#ifdef CONFIG_SMP
603_GLOBAL(start_secondary_resume)
604 /* Reset stack */
9778b696 605 CURRENT_THREAD_INFO(r1, r1)
69e3cea8
BH
606 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
607 li r3,0
6de06f31 608 stw r3,0(r1) /* Zero the stack frame pointer */
69e3cea8
BH
609 bl start_secondary
610 b .
611#endif /* CONFIG_SMP */
612
9994a338
PM
613/*
614 * This routine is just here to keep GCC happy - sigh...
615 */
616_GLOBAL(__main)
617 blr
3d1229d6
ME
618
619#ifdef CONFIG_KEXEC
620 /*
621 * Must be relocatable PIC code callable as a C function.
622 */
623 .globl relocate_new_kernel
624relocate_new_kernel:
625 /* r3 = page_list */
626 /* r4 = reboot_code_buffer */
627 /* r5 = start_address */
628
b3df895a
SAS
629#ifdef CONFIG_FSL_BOOKE
630
631 mr r29, r3
632 mr r30, r4
633 mr r31, r5
634
635#define ENTRY_MAPPING_KEXEC_SETUP
636#include "fsl_booke_entry_mapping.S"
637#undef ENTRY_MAPPING_KEXEC_SETUP
638
639 mr r3, r29
640 mr r4, r30
641 mr r5, r31
642
674bfa48 643 li r0, 0
68343020 644#elif defined(CONFIG_44x)
674bfa48 645
68343020
SP
646 /* Save our parameters */
647 mr r29, r3
648 mr r30, r4
649 mr r31, r5
650
651#ifdef CONFIG_PPC_47x
652 /* Check for 47x cores */
653 mfspr r3,SPRN_PVR
654 srwi r3,r3,16
4450022b
AP
655 cmplwi cr0,r3,PVR_476FPE@h
656 beq setup_map_47x
68343020
SP
657 cmplwi cr0,r3,PVR_476@h
658 beq setup_map_47x
659 cmplwi cr0,r3,PVR_476_ISS@h
660 beq setup_map_47x
661#endif /* CONFIG_PPC_47x */
662
674bfa48
SP
663/*
664 * Code for setting up 1:1 mapping for PPC440x for KEXEC
665 *
666 * We cannot switch off the MMU on PPC44x.
667 * So we:
668 * 1) Invalidate all the mappings except the one we are running from.
669 * 2) Create a tmp mapping for our code in the other address space(TS) and
670 * jump to it. Invalidate the entry we started in.
671 * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS.
672 * 4) Jump to the 1:1 mapping in original TS.
673 * 5) Invalidate the tmp mapping.
674 *
675 * - Based on the kexec support code for FSL BookE
674bfa48
SP
676 *
677 */
674bfa48 678
f13bfcc6
SP
679 /*
680 * Load the PID with kernel PID (0).
681 * Also load our MSR_IS and TID to MMUCR for TLB search.
682 */
683 li r3, 0
684 mtspr SPRN_PID, r3
674bfa48
SP
685 mfmsr r4
686 andi. r4,r4,MSR_IS@l
687 beq wmmucr
688 oris r3,r3,PPC44x_MMUCR_STS@h
689wmmucr:
690 mtspr SPRN_MMUCR,r3
691 sync
692
693 /*
694 * Invalidate all the TLB entries except the current entry
695 * where we are running from
696 */
697 bl 0f /* Find our address */
6980: mflr r5 /* Make it accessible */
699 tlbsx r23,0,r5 /* Find entry we are in */
700 li r4,0 /* Start at TLB entry 0 */
701 li r3,0 /* Set PAGEID inval value */
7021: cmpw r23,r4 /* Is this our entry? */
703 beq skip /* If so, skip the inval */
704 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
705skip:
706 addi r4,r4,1 /* Increment */
707 cmpwi r4,64 /* Are we done? */
708 bne 1b /* If not, repeat */
709 isync
710
711 /* Create a temp mapping and jump to it */
712 andi. r6, r23, 1 /* Find the index to use */
713 addi r24, r6, 1 /* r24 will contain 1 or 2 */
714
715 mfmsr r9 /* get the MSR */
716 rlwinm r5, r9, 27, 31, 31 /* Extract the MSR[IS] */
717 xori r7, r5, 1 /* Use the other address space */
718
719 /* Read the current mapping entries */
720 tlbre r3, r23, PPC44x_TLB_PAGEID
721 tlbre r4, r23, PPC44x_TLB_XLAT
722 tlbre r5, r23, PPC44x_TLB_ATTRIB
723
724 /* Save our current XLAT entry */
725 mr r25, r4
726
727 /* Extract the TLB PageSize */
728 li r10, 1 /* r10 will hold PageSize */
729 rlwinm r11, r3, 0, 24, 27 /* bits 24-27 */
730
731 /* XXX: As of now we use 256M, 4K pages */
732 cmpwi r11, PPC44x_TLB_256M
733 bne tlb_4k
734 rotlwi r10, r10, 28 /* r10 = 256M */
735 b write_out
736tlb_4k:
737 cmpwi r11, PPC44x_TLB_4K
738 bne default
739 rotlwi r10, r10, 12 /* r10 = 4K */
740 b write_out
741default:
742 rotlwi r10, r10, 10 /* r10 = 1K */
743
744write_out:
745 /*
746 * Write out the tmp 1:1 mapping for this code in other address space
747 * Fixup EPN = RPN , TS=other address space
748 */
749 insrwi r3, r7, 1, 23 /* Bit 23 is TS for PAGEID field */
750
751 /* Write out the tmp mapping entries */
752 tlbwe r3, r24, PPC44x_TLB_PAGEID
753 tlbwe r4, r24, PPC44x_TLB_XLAT
754 tlbwe r5, r24, PPC44x_TLB_ATTRIB
755
756 subi r11, r10, 1 /* PageOffset Mask = PageSize - 1 */
757 not r10, r11 /* Mask for PageNum */
758
759 /* Switch to other address space in MSR */
760 insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
761
762 bl 1f
7631: mflr r8
764 addi r8, r8, (2f-1b) /* Find the target offset */
765
766 /* Jump to the tmp mapping */
767 mtspr SPRN_SRR0, r8
768 mtspr SPRN_SRR1, r9
769 rfi
770
7712:
772 /* Invalidate the entry we were executing from */
773 li r3, 0
774 tlbwe r3, r23, PPC44x_TLB_PAGEID
775
776 /* attribute fields. rwx for SUPERVISOR mode */
777 li r5, 0
778 ori r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
779
780 /* Create 1:1 mapping in 256M pages */
781 xori r7, r7, 1 /* Revert back to Original TS */
782
783 li r8, 0 /* PageNumber */
784 li r6, 3 /* TLB Index, start at 3 */
785
786next_tlb:
787 rotlwi r3, r8, 28 /* Create EPN (bits 0-3) */
788 mr r4, r3 /* RPN = EPN */
789 ori r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */
790 insrwi r3, r7, 1, 23 /* Set TS from r7 */
791
792 tlbwe r3, r6, PPC44x_TLB_PAGEID /* PageID field : EPN, V, SIZE */
793 tlbwe r4, r6, PPC44x_TLB_XLAT /* Address translation : RPN */
794 tlbwe r5, r6, PPC44x_TLB_ATTRIB /* Attributes */
795
796 addi r8, r8, 1 /* Increment PN */
797 addi r6, r6, 1 /* Increment TLB Index */
798 cmpwi r8, 8 /* Are we done ? */
799 bne next_tlb
800 isync
801
802 /* Jump to the new mapping 1:1 */
803 li r9,0
804 insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
805
806 bl 1f
8071: mflr r8
808 and r8, r8, r11 /* Get our offset within page */
809 addi r8, r8, (2f-1b)
810
811 and r5, r25, r10 /* Get our target PageNum */
812 or r8, r8, r5 /* Target jump address */
813
814 mtspr SPRN_SRR0, r8
815 mtspr SPRN_SRR1, r9
816 rfi
8172:
818 /* Invalidate the tmp entry we used */
819 li r3, 0
820 tlbwe r3, r24, PPC44x_TLB_PAGEID
821 sync
68343020
SP
822 b ppc44x_map_done
823
824#ifdef CONFIG_PPC_47x
825
826 /* 1:1 mapping for 47x */
827
828setup_map_47x:
829
830 /*
831 * Load the kernel pid (0) to PID and also to MMUCR[TID].
832 * Also set the MSR IS->MMUCR STS
833 */
834 li r3, 0
835 mtspr SPRN_PID, r3 /* Set PID */
836 mfmsr r4 /* Get MSR */
837 andi. r4, r4, MSR_IS@l /* TS=1? */
838 beq 1f /* If not, leave STS=0 */
839 oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */
8401: mtspr SPRN_MMUCR, r3 /* Put MMUCR */
841 sync
842
843 /* Find the entry we are running from */
844 bl 2f
8452: mflr r23
846 tlbsx r23, 0, r23
847 tlbre r24, r23, 0 /* TLB Word 0 */
848 tlbre r25, r23, 1 /* TLB Word 1 */
849 tlbre r26, r23, 2 /* TLB Word 2 */
850
851
852 /*
853 * Invalidates all the tlb entries by writing to 256 RPNs(r4)
854 * of 4k page size in all 4 ways (0-3 in r3).
855 * This would invalidate the entire UTLB including the one we are
856 * running from. However the shadow TLB entries would help us
857 * to continue the execution, until we flush them (rfi/isync).
858 */
859 addis r3, 0, 0x8000 /* specify the way */
860 addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */
861 addi r5, 0, 0
862 b clear_utlb_entry
863
864 /* Align the loop to speed things up. from head_44x.S */
865 .align 6
866
867clear_utlb_entry:
868
869 tlbwe r4, r3, 0
870 tlbwe r5, r3, 1
871 tlbwe r5, r3, 2
872 addis r3, r3, 0x2000 /* Increment the way */
873 cmpwi r3, 0
874 bne clear_utlb_entry
875 addis r3, 0, 0x8000
876 addis r4, r4, 0x100 /* Increment the EPN */
877 cmpwi r4, 0
878 bne clear_utlb_entry
879
880 /* Create the entries in the other address space */
881 mfmsr r5
882 rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */
883 xori r7, r7, 1 /* r7 = !TS */
884
885 insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */
886
887 /*
888 * write out the TLB entries for the tmp mapping
889 * Use way '0' so that we could easily invalidate it later.
890 */
891 lis r3, 0x8000 /* Way '0' */
892
893 tlbwe r24, r3, 0
894 tlbwe r25, r3, 1
895 tlbwe r26, r3, 2
896
897 /* Update the msr to the new TS */
898 insrwi r5, r7, 1, 26
899
900 bl 1f
9011: mflr r6
902 addi r6, r6, (2f-1b)
903
904 mtspr SPRN_SRR0, r6
905 mtspr SPRN_SRR1, r5
906 rfi
907
908 /*
909 * Now we are in the tmp address space.
910 * Create a 1:1 mapping for 0-2GiB in the original TS.
911 */
9122:
913 li r3, 0
914 li r4, 0 /* TLB Word 0 */
915 li r5, 0 /* TLB Word 1 */
916 li r6, 0
917 ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */
918
919 li r8, 0 /* PageIndex */
920
921 xori r7, r7, 1 /* revert back to original TS */
922
923write_utlb:
924 rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */
925 /* ERPN = 0 as we don't use memory above 2G */
926
927 mr r4, r5 /* EPN = RPN */
928 ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M)
929 insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */
930
931 tlbwe r4, r3, 0 /* Write out the entries */
932 tlbwe r5, r3, 1
933 tlbwe r6, r3, 2
934 addi r8, r8, 1
935 cmpwi r8, 8 /* Have we completed ? */
936 bne write_utlb
937
938 /* make sure we complete the TLB write up */
939 isync
940
941 /*
942 * Prepare to jump to the 1:1 mapping.
943 * 1) Extract page size of the tmp mapping
944 * DSIZ = TLB_Word0[22:27]
945 * 2) Calculate the physical address of the address
946 * to jump to.
947 */
948 rlwinm r10, r24, 0, 22, 27
949
950 cmpwi r10, PPC47x_TLB0_4K
951 bne 0f
952 li r10, 0x1000 /* r10 = 4k */
953 bl 1f
954
9550:
956 /* Defaults to 256M */
957 lis r10, 0x1000
958
959 bl 1f
9601: mflr r4
961 addi r4, r4, (2f-1b) /* virtual address of 2f */
962
963 subi r11, r10, 1 /* offsetmask = Pagesize - 1 */
964 not r10, r11 /* Pagemask = ~(offsetmask) */
965
966 and r5, r25, r10 /* Physical page */
967 and r6, r4, r11 /* offset within the current page */
968
969 or r5, r5, r6 /* Physical address for 2f */
970
971 /* Switch the TS in MSR to the original one */
972 mfmsr r8
973 insrwi r8, r7, 1, 26
974
975 mtspr SPRN_SRR1, r8
976 mtspr SPRN_SRR0, r5
977 rfi
978
9792:
980 /* Invalidate the tmp mapping */
981 lis r3, 0x8000 /* Way '0' */
982
983 clrrwi r24, r24, 12 /* Clear the valid bit */
984 tlbwe r24, r3, 0
985 tlbwe r25, r3, 1
986 tlbwe r26, r3, 2
987
988 /* Make sure we complete the TLB write and flush the shadow TLB */
989 isync
990
991#endif
992
993ppc44x_map_done:
994
674bfa48
SP
995
996 /* Restore the parameters */
997 mr r3, r29
998 mr r4, r30
999 mr r5, r31
1000
b3df895a
SAS
1001 li r0, 0
1002#else
3d1229d6
ME
1003 li r0, 0
1004
1005 /*
1006 * Set Machine Status Register to a known status,
1007 * switch the MMU off and jump to 1: in a single step.
1008 */
1009
1010 mr r8, r0
1011 ori r8, r8, MSR_RI|MSR_ME
1012 mtspr SPRN_SRR1, r8
1013 addi r8, r4, 1f - relocate_new_kernel
1014 mtspr SPRN_SRR0, r8
1015 sync
1016 rfi
1017
10181:
b3df895a 1019#endif
3d1229d6
ME
1020 /* from this point address translation is turned off */
1021 /* and interrupts are disabled */
1022
1023 /* set a new stack at the bottom of our page... */
1024 /* (not really needed now) */
d9178f4c 1025 addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
3d1229d6
ME
1026 stw r0, 0(r1)
1027
1028 /* Do the copies */
1029 li r6, 0 /* checksum */
1030 mr r0, r3
1031 b 1f
1032
10330: /* top, read another word for the indirection page */
1034 lwzu r0, 4(r3)
1035
10361:
1037 /* is it a destination page? (r8) */
1038 rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
1039 beq 2f
1040
1041 rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */
1042 b 0b
1043
10442: /* is it an indirection page? (r3) */
1045 rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
1046 beq 2f
1047
1048 rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */
1049 subi r3, r3, 4
1050 b 0b
1051
10522: /* are we done? */
1053 rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
1054 beq 2f
1055 b 3f
1056
10572: /* is it a source page? (r9) */
1058 rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
1059 beq 0b
1060
1061 rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */
1062
1063 li r7, PAGE_SIZE / 4
1064 mtctr r7
1065 subi r9, r9, 4
1066 subi r8, r8, 4
10679:
1068 lwzu r0, 4(r9) /* do the copy */
1069 xor r6, r6, r0
1070 stwu r0, 4(r8)
1071 dcbst 0, r8
1072 sync
1073 icbi 0, r8
1074 bdnz 9b
1075
1076 addi r9, r9, 4
1077 addi r8, r8, 4
1078 b 0b
1079
10803:
1081
1082 /* To be certain of avoiding problems with self-modifying code
1083 * execute a serializing instruction here.
1084 */
1085 isync
1086 sync
1087
4562c986
MM
1088 mfspr r3, SPRN_PIR /* current core we are running on */
1089 mr r4, r5 /* load physical address of chunk called */
1090
3d1229d6
ME
1091 /* jump to the entry point, usually the setup routine */
1092 mtlr r5
1093 blrl
1094
10951: b 1b
1096
1097relocate_new_kernel_end:
1098
1099 .globl relocate_new_kernel_size
1100relocate_new_kernel_size:
1101 .long relocate_new_kernel_end - relocate_new_kernel
1102#endif
This page took 0.927103 seconds and 5 git commands to generate.