Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* clear_page.S: UltraSparc optimized clear page. |
2 | * | |
3 | * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com) | |
4 | * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com) | |
5 | */ | |
6 | ||
7 | #include <asm/visasm.h> | |
8 | #include <asm/thread_info.h> | |
9 | #include <asm/page.h> | |
10 | #include <asm/pgtable.h> | |
11 | #include <asm/spitfire.h> | |
4da808c3 | 12 | #include <asm/head.h> |
d3867f04 | 13 | #include <asm/export.h> |
1da177e4 LT |
14 | |
15 | /* What we used to do was lock a TLB entry into a specific | |
16 | * TLB slot, clear the page with interrupts disabled, then | |
17 | * restore the original TLB entry. This was great for | |
18 | * disturbing the TLB as little as possible, but it meant | |
19 | * we had to keep interrupts disabled for a long time. | |
20 | * | |
21 | * Now, we simply use the normal TLB loading mechanism, | |
22 | * and this makes the cpu choose a slot all by itself. | |
23 | * Then we do a normal TLB flush on exit. We need only | |
24 | * disable preemption during the clear. | |
25 | */ | |
26 | ||
1da177e4 LT |
27 | .text |
28 | ||
29 | .globl _clear_page | |
d3867f04 | 30 | EXPORT_SYMBOL(_clear_page) |
1da177e4 LT |
31 | _clear_page: /* %o0=dest */ |
32 | ba,pt %xcc, clear_page_common | |
33 | clr %o4 | |
34 | ||
35 | /* This thing is pretty important, it shows up | |
36 | * on the profiles via do_anonymous_page(). | |
37 | */ | |
38 | .align 32 | |
39 | .globl clear_user_page | |
d3867f04 | 40 | EXPORT_SYMBOL(clear_user_page) |
1da177e4 LT |
41 | clear_user_page: /* %o0=dest, %o1=vaddr */ |
42 | lduw [%g6 + TI_PRE_COUNT], %o2 | |
b2d43834 | 43 | sethi %hi(PAGE_OFFSET), %g2 |
1da177e4 LT |
44 | sethi %hi(PAGE_SIZE), %o4 |
45 | ||
b2d43834 | 46 | ldx [%g2 + %lo(PAGE_OFFSET)], %g2 |
c4bce90e | 47 | sethi %hi(PAGE_KERNEL_LOCKED), %g3 |
1da177e4 | 48 | |
c4bce90e | 49 | ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3 |
1da177e4 LT |
50 | sub %o0, %g2, %g1 ! paddr |
51 | ||
1da177e4 LT |
52 | and %o1, %o4, %o0 ! vaddr D-cache alias bit |
53 | ||
54 | or %g1, %g3, %g1 ! TTE data | |
55 | sethi %hi(TLBTEMP_BASE), %o3 | |
56 | ||
57 | add %o2, 1, %o4 | |
58 | add %o0, %o3, %o0 ! TTE vaddr | |
59 | ||
60 | /* Disable preemption. */ | |
61 | mov TLB_TAG_ACCESS, %g3 | |
62 | stw %o4, [%g6 + TI_PRE_COUNT] | |
63 | ||
64 | /* Load TLB entry. */ | |
65 | rdpr %pstate, %o4 | |
66 | wrpr %o4, PSTATE_IE, %pstate | |
67 | stxa %o0, [%g3] ASI_DMMU | |
68 | stxa %g1, [%g0] ASI_DTLB_DATA_IN | |
4da808c3 DM |
69 | sethi %hi(KERNBASE), %g1 |
70 | flush %g1 | |
1da177e4 LT |
71 | wrpr %o4, 0x0, %pstate |
72 | ||
73 | mov 1, %o4 | |
74 | ||
75 | clear_page_common: | |
76 | VISEntryHalf | |
77 | membar #StoreLoad | #StoreStore | #LoadStore | |
78 | fzero %f0 | |
79 | sethi %hi(PAGE_SIZE/64), %o1 | |
80 | mov %o0, %g1 ! remember vaddr for tlbflush | |
81 | fzero %f2 | |
82 | or %o1, %lo(PAGE_SIZE/64), %o1 | |
83 | faddd %f0, %f2, %f4 | |
84 | fmuld %f0, %f2, %f6 | |
85 | faddd %f0, %f2, %f8 | |
86 | fmuld %f0, %f2, %f10 | |
87 | ||
88 | faddd %f0, %f2, %f12 | |
89 | fmuld %f0, %f2, %f14 | |
90 | 1: stda %f0, [%o0 + %g0] ASI_BLK_P | |
91 | subcc %o1, 1, %o1 | |
92 | bne,pt %icc, 1b | |
93 | add %o0, 0x40, %o0 | |
94 | membar #Sync | |
95 | VISExitHalf | |
96 | ||
97 | brz,pn %o4, out | |
98 | nop | |
99 | ||
100 | stxa %g0, [%g1] ASI_DMMU_DEMAP | |
101 | membar #Sync | |
102 | stw %o2, [%g6 + TI_PRE_COUNT] | |
103 | ||
104 | out: retl | |
105 | nop | |
106 |