Pull thermal into release branch
[deliverable/linux.git] / arch / ppc / kernel / misc.S
1 /*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 */
14
15 #include <linux/sys.h>
16 #include <asm/unistd.h>
17 #include <asm/errno.h>
18 #include <asm/processor.h>
19 #include <asm/page.h>
20 #include <asm/cache.h>
21 #include <asm/cputable.h>
22 #include <asm/mmu.h>
23 #include <asm/ppc_asm.h>
24 #include <asm/thread_info.h>
25 #include <asm/asm-offsets.h>
26
27 #ifdef CONFIG_8xx
28 #define ISYNC_8xx isync
29 #else
30 #define ISYNC_8xx
31 #endif
32 .text
33
34 .align 5
35 _GLOBAL(__delay)
36 cmpwi 0,r3,0
37 mtctr r3
38 beqlr
39 1: bdnz 1b
40 blr
41
42 /*
43 * Returns (address we're running at) - (address we were linked at)
44 * for use before the text and data are mapped to KERNELBASE.
45 */
46 _GLOBAL(reloc_offset)
47 mflr r0
48 bl 1f
49 1: mflr r3
50 lis r4,1b@ha
51 addi r4,r4,1b@l
52 subf r3,r4,r3
53 mtlr r0
54 blr
55
56 /*
57 * add_reloc_offset(x) returns x + reloc_offset().
58 */
59 _GLOBAL(add_reloc_offset)
60 mflr r0
61 bl 1f
62 1: mflr r5
63 lis r4,1b@ha
64 addi r4,r4,1b@l
65 subf r5,r4,r5
66 add r3,r3,r5
67 mtlr r0
68 blr
69
70 /*
71 * sub_reloc_offset(x) returns x - reloc_offset().
72 */
73 _GLOBAL(sub_reloc_offset)
74 mflr r0
75 bl 1f
76 1: mflr r5
77 lis r4,1b@ha
78 addi r4,r4,1b@l
79 subf r5,r4,r5
80 subf r3,r5,r3
81 mtlr r0
82 blr
83
84 /*
85 * reloc_got2 runs through the .got2 section adding an offset
86 * to each entry.
87 */
88 _GLOBAL(reloc_got2)
89 mflr r11
90 lis r7,__got2_start@ha
91 addi r7,r7,__got2_start@l
92 lis r8,__got2_end@ha
93 addi r8,r8,__got2_end@l
94 subf r8,r7,r8
95 srwi. r8,r8,2
96 beqlr
97 mtctr r8
98 bl 1f
99 1: mflr r0
100 lis r4,1b@ha
101 addi r4,r4,1b@l
102 subf r0,r4,r0
103 add r7,r0,r7
104 2: lwz r0,0(r7)
105 add r0,r0,r3
106 stw r0,0(r7)
107 addi r7,r7,4
108 bdnz 2b
109 mtlr r11
110 blr
111
112 /*
113 * call_setup_cpu - call the setup_cpu function for this cpu
114 * r3 = data offset, r24 = cpu number
115 *
116 * Setup function is called with:
117 * r3 = data offset
118 * r4 = ptr to CPU spec (relocated)
119 */
120 _GLOBAL(call_setup_cpu)
121 addis r4,r3,cur_cpu_spec@ha
122 addi r4,r4,cur_cpu_spec@l
123 lwz r4,0(r4)
124 add r4,r4,r3
125 lwz r5,CPU_SPEC_SETUP(r4)
126 cmpi 0,r5,0
127 add r5,r5,r3
128 beqlr
129 mtctr r5
130 bctr
131
132 /*
133 * complement mask on the msr then "or" some values on.
134 * _nmask_and_or_msr(nmask, value_to_or)
135 */
136 _GLOBAL(_nmask_and_or_msr)
137 mfmsr r0 /* Get current msr */
138 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
139 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
140 SYNC /* Some chip revs have problems here... */
141 mtmsr r0 /* Update machine state */
142 isync
143 blr /* Done */
144
145
146 /*
147 * Flush MMU TLB
148 */
149 _GLOBAL(_tlbia)
150 #if defined(CONFIG_40x)
151 sync /* Flush to memory before changing mapping */
152 tlbia
153 isync /* Flush shadow TLB */
154 #elif defined(CONFIG_44x)
155 li r3,0
156 sync
157
158 /* Load high watermark */
159 lis r4,tlb_44x_hwater@ha
160 lwz r5,tlb_44x_hwater@l(r4)
161
162 1: tlbwe r3,r3,PPC44x_TLB_PAGEID
163 addi r3,r3,1
164 cmpw 0,r3,r5
165 ble 1b
166
167 isync
168 #elif defined(CONFIG_FSL_BOOKE)
169 /* Invalidate all entries in TLB0 */
170 li r3, 0x04
171 tlbivax 0,3
172 /* Invalidate all entries in TLB1 */
173 li r3, 0x0c
174 tlbivax 0,3
175 /* Invalidate all entries in TLB2 */
176 li r3, 0x14
177 tlbivax 0,3
178 /* Invalidate all entries in TLB3 */
179 li r3, 0x1c
180 tlbivax 0,3
181 msync
182 #ifdef CONFIG_SMP
183 tlbsync
184 #endif /* CONFIG_SMP */
185 #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
186 #if defined(CONFIG_SMP)
187 rlwinm r8,r1,0,0,18
188 lwz r8,TI_CPU(r8)
189 oris r8,r8,10
190 mfmsr r10
191 SYNC
192 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
193 rlwinm r0,r0,0,28,26 /* clear DR */
194 mtmsr r0
195 SYNC_601
196 isync
197 lis r9,mmu_hash_lock@h
198 ori r9,r9,mmu_hash_lock@l
199 tophys(r9,r9)
200 10: lwarx r7,0,r9
201 cmpwi 0,r7,0
202 bne- 10b
203 stwcx. r8,0,r9
204 bne- 10b
205 sync
206 tlbia
207 sync
208 TLBSYNC
209 li r0,0
210 stw r0,0(r9) /* clear mmu_hash_lock */
211 mtmsr r10
212 SYNC_601
213 isync
214 #else /* CONFIG_SMP */
215 sync
216 tlbia
217 sync
218 #endif /* CONFIG_SMP */
219 #endif /* ! defined(CONFIG_40x) */
220 blr
221
222 /*
223 * Flush MMU TLB for a particular address
224 */
225 _GLOBAL(_tlbie)
226 #if defined(CONFIG_40x)
227 tlbsx. r3, 0, r3
228 bne 10f
229 sync
230 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
231 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
232 * the TLB entry. */
233 tlbwe r3, r3, TLB_TAG
234 isync
235 10:
236 #elif defined(CONFIG_44x)
237 mfspr r4,SPRN_MMUCR
238 mfspr r5,SPRN_PID /* Get PID */
239 rlwimi r4,r5,0,24,31 /* Set TID */
240
241 /* We have to run the search with interrupts disabled, even critical
242 * and debug interrupts (in fact the only critical exceptions we have
243 * are debug and machine check). Otherwise an interrupt which causes
244 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
245 mfmsr r5
246 lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
247 addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
248 andc r6,r5,r6
249 mtmsr r6
250 mtspr SPRN_MMUCR,r4
251 tlbsx. r3, 0, r3
252 mtmsr r5
253 bne 10f
254 sync
255 /* There are only 64 TLB entries, so r3 < 64,
256 * which means bit 22, is clear. Since 22 is
257 * the V bit in the TLB_PAGEID, loading this
258 * value will invalidate the TLB entry.
259 */
260 tlbwe r3, r3, PPC44x_TLB_PAGEID
261 isync
262 10:
263 #elif defined(CONFIG_FSL_BOOKE)
264 rlwinm r4, r3, 0, 0, 19
265 ori r5, r4, 0x08 /* TLBSEL = 1 */
266 ori r6, r4, 0x10 /* TLBSEL = 2 */
267 ori r7, r4, 0x18 /* TLBSEL = 3 */
268 tlbivax 0, r4
269 tlbivax 0, r5
270 tlbivax 0, r6
271 tlbivax 0, r7
272 msync
273 #if defined(CONFIG_SMP)
274 tlbsync
275 #endif /* CONFIG_SMP */
276 #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
277 #if defined(CONFIG_SMP)
278 rlwinm r8,r1,0,0,18
279 lwz r8,TI_CPU(r8)
280 oris r8,r8,11
281 mfmsr r10
282 SYNC
283 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
284 rlwinm r0,r0,0,28,26 /* clear DR */
285 mtmsr r0
286 SYNC_601
287 isync
288 lis r9,mmu_hash_lock@h
289 ori r9,r9,mmu_hash_lock@l
290 tophys(r9,r9)
291 10: lwarx r7,0,r9
292 cmpwi 0,r7,0
293 bne- 10b
294 stwcx. r8,0,r9
295 bne- 10b
296 eieio
297 tlbie r3
298 sync
299 TLBSYNC
300 li r0,0
301 stw r0,0(r9) /* clear mmu_hash_lock */
302 mtmsr r10
303 SYNC_601
304 isync
305 #else /* CONFIG_SMP */
306 tlbie r3
307 sync
308 #endif /* CONFIG_SMP */
309 #endif /* ! CONFIG_40x */
310 blr
311
312 /*
313 * Flush instruction cache.
314 * This is a no-op on the 601.
315 */
316 _GLOBAL(flush_instruction_cache)
317 #if defined(CONFIG_8xx)
318 isync
319 lis r5, IDC_INVALL@h
320 mtspr SPRN_IC_CST, r5
321 #elif defined(CONFIG_4xx)
322 #ifdef CONFIG_403GCX
323 li r3, 512
324 mtctr r3
325 lis r4, KERNELBASE@h
326 1: iccci 0, r4
327 addi r4, r4, 16
328 bdnz 1b
329 #else
330 lis r3, KERNELBASE@h
331 iccci 0,r3
332 #endif
333 #elif CONFIG_FSL_BOOKE
334 BEGIN_FTR_SECTION
335 mfspr r3,SPRN_L1CSR0
336 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
337 /* msync; isync recommended here */
338 mtspr SPRN_L1CSR0,r3
339 isync
340 blr
341 END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
342 mfspr r3,SPRN_L1CSR1
343 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
344 mtspr SPRN_L1CSR1,r3
345 #else
346 mfspr r3,SPRN_PVR
347 rlwinm r3,r3,16,16,31
348 cmpwi 0,r3,1
349 beqlr /* for 601, do nothing */
350 /* 603/604 processor - use invalidate-all bit in HID0 */
351 mfspr r3,SPRN_HID0
352 ori r3,r3,HID0_ICFI
353 mtspr SPRN_HID0,r3
354 #endif /* CONFIG_8xx/4xx */
355 isync
356 blr
357
358 /*
359 * Write any modified data cache blocks out to memory
360 * and invalidate the corresponding instruction cache blocks.
361 * This is a no-op on the 601.
362 *
363 * __flush_icache_range(unsigned long start, unsigned long stop)
364 */
365 _GLOBAL(__flush_icache_range)
366 BEGIN_FTR_SECTION
367 blr /* for 601, do nothing */
368 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
369 li r5,L1_CACHE_BYTES-1
370 andc r3,r3,r5
371 subf r4,r3,r4
372 add r4,r4,r5
373 srwi. r4,r4,L1_CACHE_SHIFT
374 beqlr
375 mtctr r4
376 mr r6,r3
377 1: dcbst 0,r3
378 addi r3,r3,L1_CACHE_BYTES
379 bdnz 1b
380 sync /* wait for dcbst's to get to ram */
381 mtctr r4
382 2: icbi 0,r6
383 addi r6,r6,L1_CACHE_BYTES
384 bdnz 2b
385 sync /* additional sync needed on g4 */
386 isync
387 blr
388 /*
389 * Write any modified data cache blocks out to memory.
390 * Does not invalidate the corresponding cache lines (especially for
391 * any corresponding instruction cache).
392 *
393 * clean_dcache_range(unsigned long start, unsigned long stop)
394 */
395 _GLOBAL(clean_dcache_range)
396 li r5,L1_CACHE_BYTES-1
397 andc r3,r3,r5
398 subf r4,r3,r4
399 add r4,r4,r5
400 srwi. r4,r4,L1_CACHE_SHIFT
401 beqlr
402 mtctr r4
403
404 1: dcbst 0,r3
405 addi r3,r3,L1_CACHE_BYTES
406 bdnz 1b
407 sync /* wait for dcbst's to get to ram */
408 blr
409
410 /*
411 * Write any modified data cache blocks out to memory and invalidate them.
412 * Does not invalidate the corresponding instruction cache blocks.
413 *
414 * flush_dcache_range(unsigned long start, unsigned long stop)
415 */
416 _GLOBAL(flush_dcache_range)
417 li r5,L1_CACHE_BYTES-1
418 andc r3,r3,r5
419 subf r4,r3,r4
420 add r4,r4,r5
421 srwi. r4,r4,L1_CACHE_SHIFT
422 beqlr
423 mtctr r4
424
425 1: dcbf 0,r3
426 addi r3,r3,L1_CACHE_BYTES
427 bdnz 1b
428 sync /* wait for dcbst's to get to ram */
429 blr
430
431 /*
432 * Like above, but invalidate the D-cache. This is used by the 8xx
433 * to invalidate the cache so the PPC core doesn't get stale data
434 * from the CPM (no cache snooping here :-).
435 *
436 * invalidate_dcache_range(unsigned long start, unsigned long stop)
437 */
438 _GLOBAL(invalidate_dcache_range)
439 li r5,L1_CACHE_BYTES-1
440 andc r3,r3,r5
441 subf r4,r3,r4
442 add r4,r4,r5
443 srwi. r4,r4,L1_CACHE_SHIFT
444 beqlr
445 mtctr r4
446
447 1: dcbi 0,r3
448 addi r3,r3,L1_CACHE_BYTES
449 bdnz 1b
450 sync /* wait for dcbi's to get to ram */
451 blr
452
453 #ifdef CONFIG_NOT_COHERENT_CACHE
454 /*
455 * 40x cores have 8K or 16K dcache and 32 byte line size.
456 * 44x has a 32K dcache and 32 byte line size.
457 * 8xx has 1, 2, 4, 8K variants.
458 * For now, cover the worst case of the 44x.
459 * Must be called with external interrupts disabled.
460 */
461 #define CACHE_NWAYS 64
462 #define CACHE_NLINES 16
463
464 _GLOBAL(flush_dcache_all)
465 li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
466 mtctr r4
467 lis r5, KERNELBASE@h
468 1: lwz r3, 0(r5) /* Load one word from every line */
469 addi r5, r5, L1_CACHE_BYTES
470 bdnz 1b
471 blr
472 #endif /* CONFIG_NOT_COHERENT_CACHE */
473
474 /*
475 * Flush a particular page from the data cache to RAM.
476 * Note: this is necessary because the instruction cache does *not*
477 * snoop from the data cache.
478 * This is a no-op on the 601 which has a unified cache.
479 *
480 * void __flush_dcache_icache(void *page)
481 */
482 _GLOBAL(__flush_dcache_icache)
483 BEGIN_FTR_SECTION
484 blr /* for 601, do nothing */
485 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
486 rlwinm r3,r3,0,0,19 /* Get page base address */
487 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
488 mtctr r4
489 mr r6,r3
490 0: dcbst 0,r3 /* Write line to ram */
491 addi r3,r3,L1_CACHE_BYTES
492 bdnz 0b
493 sync
494 mtctr r4
495 1: icbi 0,r6
496 addi r6,r6,L1_CACHE_BYTES
497 bdnz 1b
498 sync
499 isync
500 blr
501
502 /*
503 * Flush a particular page from the data cache to RAM, identified
504 * by its physical address. We turn off the MMU so we can just use
505 * the physical address (this may be a highmem page without a kernel
506 * mapping).
507 *
508 * void __flush_dcache_icache_phys(unsigned long physaddr)
509 */
510 _GLOBAL(__flush_dcache_icache_phys)
511 BEGIN_FTR_SECTION
512 blr /* for 601, do nothing */
513 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
514 mfmsr r10
515 rlwinm r0,r10,0,28,26 /* clear DR */
516 mtmsr r0
517 isync
518 rlwinm r3,r3,0,0,19 /* Get page base address */
519 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
520 mtctr r4
521 mr r6,r3
522 0: dcbst 0,r3 /* Write line to ram */
523 addi r3,r3,L1_CACHE_BYTES
524 bdnz 0b
525 sync
526 mtctr r4
527 1: icbi 0,r6
528 addi r6,r6,L1_CACHE_BYTES
529 bdnz 1b
530 sync
531 mtmsr r10 /* restore DR */
532 isync
533 blr
534
535 /*
536 * Clear pages using the dcbz instruction, which doesn't cause any
537 * memory traffic (except to write out any cache lines which get
538 * displaced). This only works on cacheable memory.
539 *
540 * void clear_pages(void *page, int order) ;
541 */
542 _GLOBAL(clear_pages)
543 li r0,4096/L1_CACHE_BYTES
544 slw r0,r0,r4
545 mtctr r0
546 #ifdef CONFIG_8xx
547 li r4, 0
548 1: stw r4, 0(r3)
549 stw r4, 4(r3)
550 stw r4, 8(r3)
551 stw r4, 12(r3)
552 #else
553 1: dcbz 0,r3
554 #endif
555 addi r3,r3,L1_CACHE_BYTES
556 bdnz 1b
557 blr
558
559 /*
560 * Copy a whole page. We use the dcbz instruction on the destination
561 * to reduce memory traffic (it eliminates the unnecessary reads of
562 * the destination into cache). This requires that the destination
563 * is cacheable.
564 */
565 #define COPY_16_BYTES \
566 lwz r6,4(r4); \
567 lwz r7,8(r4); \
568 lwz r8,12(r4); \
569 lwzu r9,16(r4); \
570 stw r6,4(r3); \
571 stw r7,8(r3); \
572 stw r8,12(r3); \
573 stwu r9,16(r3)
574
575 _GLOBAL(copy_page)
576 addi r3,r3,-4
577 addi r4,r4,-4
578
579 #ifdef CONFIG_8xx
580 /* don't use prefetch on 8xx */
581 li r0,4096/L1_CACHE_BYTES
582 mtctr r0
583 1: COPY_16_BYTES
584 bdnz 1b
585 blr
586
587 #else /* not 8xx, we can prefetch */
588 li r5,4
589
590 #if MAX_COPY_PREFETCH > 1
591 li r0,MAX_COPY_PREFETCH
592 li r11,4
593 mtctr r0
594 11: dcbt r11,r4
595 addi r11,r11,L1_CACHE_BYTES
596 bdnz 11b
597 #else /* MAX_COPY_PREFETCH == 1 */
598 dcbt r5,r4
599 li r11,L1_CACHE_BYTES+4
600 #endif /* MAX_COPY_PREFETCH */
601 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
602 crclr 4*cr0+eq
603 2:
604 mtctr r0
605 1:
606 dcbt r11,r4
607 dcbz r5,r3
608 COPY_16_BYTES
609 #if L1_CACHE_BYTES >= 32
610 COPY_16_BYTES
611 #if L1_CACHE_BYTES >= 64
612 COPY_16_BYTES
613 COPY_16_BYTES
614 #if L1_CACHE_BYTES >= 128
615 COPY_16_BYTES
616 COPY_16_BYTES
617 COPY_16_BYTES
618 COPY_16_BYTES
619 #endif
620 #endif
621 #endif
622 bdnz 1b
623 beqlr
624 crnot 4*cr0+eq,4*cr0+eq
625 li r0,MAX_COPY_PREFETCH
626 li r11,4
627 b 2b
628 #endif /* CONFIG_8xx */
629
630 /*
631 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
632 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
633 */
634 _GLOBAL(atomic_clear_mask)
635 10: lwarx r5,0,r4
636 andc r5,r5,r3
637 PPC405_ERR77(0,r4)
638 stwcx. r5,0,r4
639 bne- 10b
640 blr
641 _GLOBAL(atomic_set_mask)
642 10: lwarx r5,0,r4
643 or r5,r5,r3
644 PPC405_ERR77(0,r4)
645 stwcx. r5,0,r4
646 bne- 10b
647 blr
648
649 /*
650 * I/O string operations
651 *
652 * insb(port, buf, len)
653 * outsb(port, buf, len)
654 * insw(port, buf, len)
655 * outsw(port, buf, len)
656 * insl(port, buf, len)
657 * outsl(port, buf, len)
658 * insw_ns(port, buf, len)
659 * outsw_ns(port, buf, len)
660 * insl_ns(port, buf, len)
661 * outsl_ns(port, buf, len)
662 *
663 * The *_ns versions don't do byte-swapping.
664 */
665 _GLOBAL(_insb)
666 cmpwi 0,r5,0
667 mtctr r5
668 subi r4,r4,1
669 blelr-
670 00: lbz r5,0(r3)
671 01: eieio
672 02: stbu r5,1(r4)
673 ISYNC_8xx
674 .section .fixup,"ax"
675 03: blr
676 .text
677 .section __ex_table, "a"
678 .align 2
679 .long 00b, 03b
680 .long 01b, 03b
681 .long 02b, 03b
682 .text
683 bdnz 00b
684 blr
685
686 _GLOBAL(_outsb)
687 cmpwi 0,r5,0
688 mtctr r5
689 subi r4,r4,1
690 blelr-
691 00: lbzu r5,1(r4)
692 01: stb r5,0(r3)
693 02: eieio
694 ISYNC_8xx
695 .section .fixup,"ax"
696 03: blr
697 .text
698 .section __ex_table, "a"
699 .align 2
700 .long 00b, 03b
701 .long 01b, 03b
702 .long 02b, 03b
703 .text
704 bdnz 00b
705 blr
706
707 _GLOBAL(_insw_ns)
708 cmpwi 0,r5,0
709 mtctr r5
710 subi r4,r4,2
711 blelr-
712 00: lhz r5,0(r3)
713 01: eieio
714 02: sthu r5,2(r4)
715 ISYNC_8xx
716 .section .fixup,"ax"
717 03: blr
718 .text
719 .section __ex_table, "a"
720 .align 2
721 .long 00b, 03b
722 .long 01b, 03b
723 .long 02b, 03b
724 .text
725 bdnz 00b
726 blr
727
728 _GLOBAL(_outsw_ns)
729 cmpwi 0,r5,0
730 mtctr r5
731 subi r4,r4,2
732 blelr-
733 00: lhzu r5,2(r4)
734 01: sth r5,0(r3)
735 02: eieio
736 ISYNC_8xx
737 .section .fixup,"ax"
738 03: blr
739 .text
740 .section __ex_table, "a"
741 .align 2
742 .long 00b, 03b
743 .long 01b, 03b
744 .long 02b, 03b
745 .text
746 bdnz 00b
747 blr
748
749 _GLOBAL(_insl_ns)
750 cmpwi 0,r5,0
751 mtctr r5
752 subi r4,r4,4
753 blelr-
754 00: lwz r5,0(r3)
755 01: eieio
756 02: stwu r5,4(r4)
757 ISYNC_8xx
758 .section .fixup,"ax"
759 03: blr
760 .text
761 .section __ex_table, "a"
762 .align 2
763 .long 00b, 03b
764 .long 01b, 03b
765 .long 02b, 03b
766 .text
767 bdnz 00b
768 blr
769
770 _GLOBAL(_outsl_ns)
771 cmpwi 0,r5,0
772 mtctr r5
773 subi r4,r4,4
774 blelr-
775 00: lwzu r5,4(r4)
776 01: stw r5,0(r3)
777 02: eieio
778 ISYNC_8xx
779 .section .fixup,"ax"
780 03: blr
781 .text
782 .section __ex_table, "a"
783 .align 2
784 .long 00b, 03b
785 .long 01b, 03b
786 .long 02b, 03b
787 .text
788 bdnz 00b
789 blr
790
791 /*
792 * Extended precision shifts.
793 *
794 * Updated to be valid for shift counts from 0 to 63 inclusive.
795 * -- Gabriel
796 *
797 * R3/R4 has 64 bit value
798 * R5 has shift count
799 * result in R3/R4
800 *
801 * ashrdi3: arithmetic right shift (sign propagation)
802 * lshrdi3: logical right shift
803 * ashldi3: left shift
804 */
805 _GLOBAL(__ashrdi3)
806 subfic r6,r5,32
807 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
808 addi r7,r5,32 # could be xori, or addi with -32
809 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
810 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
811 sraw r7,r3,r7 # t2 = MSW >> (count-32)
812 or r4,r4,r6 # LSW |= t1
813 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
814 sraw r3,r3,r5 # MSW = MSW >> count
815 or r4,r4,r7 # LSW |= t2
816 blr
817
818 _GLOBAL(__ashldi3)
819 subfic r6,r5,32
820 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
821 addi r7,r5,32 # could be xori, or addi with -32
822 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
823 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
824 or r3,r3,r6 # MSW |= t1
825 slw r4,r4,r5 # LSW = LSW << count
826 or r3,r3,r7 # MSW |= t2
827 blr
828
829 _GLOBAL(__lshrdi3)
830 subfic r6,r5,32
831 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
832 addi r7,r5,32 # could be xori, or addi with -32
833 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
834 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
835 or r4,r4,r6 # LSW |= t1
836 srw r3,r3,r5 # MSW = MSW >> count
837 or r4,r4,r7 # LSW |= t2
838 blr
839
840 _GLOBAL(abs)
841 srawi r4,r3,31
842 xor r3,r3,r4
843 sub r3,r3,r4
844 blr
845
846 _GLOBAL(_get_SP)
847 mr r3,r1 /* Close enough */
848 blr
849
850 /*
851 * Create a kernel thread
852 * kernel_thread(fn, arg, flags)
853 */
854 _GLOBAL(kernel_thread)
855 stwu r1,-16(r1)
856 stw r30,8(r1)
857 stw r31,12(r1)
858 mr r30,r3 /* function */
859 mr r31,r4 /* argument */
860 ori r3,r5,CLONE_VM /* flags */
861 oris r3,r3,CLONE_UNTRACED>>16
862 li r4,0 /* new sp (unused) */
863 li r0,__NR_clone
864 sc
865 cmpwi 0,r3,0 /* parent or child? */
866 bne 1f /* return if parent */
867 li r0,0 /* make top-level stack frame */
868 stwu r0,-16(r1)
869 mtlr r30 /* fn addr in lr */
870 mr r3,r31 /* load arg and call fn */
871 PPC440EP_ERR42
872 blrl
873 li r0,__NR_exit /* exit if function returns */
874 li r3,0
875 sc
876 1: lwz r30,8(r1)
877 lwz r31,12(r1)
878 addi r1,r1,16
879 blr
880
881 _GLOBAL(kernel_execve)
882 li r0,__NR_execve
883 sc
884 bnslr
885 neg r3,r3
886 blr
887
888 /*
889 * This routine is just here to keep GCC happy - sigh...
890 */
891 _GLOBAL(__main)
892 blr
893
This page took 0.050246 seconds and 5 git commands to generate.