raid5: allow arbitrary max_hw_sectors
[deliverable/linux.git] / arch / sparc / lib / clear_page.S
1 /* clear_page.S: UltraSparc optimized clear page.
2 *
3 * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
5 */
6
7 #include <asm/visasm.h>
8 #include <asm/thread_info.h>
9 #include <asm/page.h>
10 #include <asm/pgtable.h>
11 #include <asm/spitfire.h>
12 #include <asm/head.h>
13
14 /* What we used to do was lock a TLB entry into a specific
15 * TLB slot, clear the page with interrupts disabled, then
16 * restore the original TLB entry. This was great for
17 * disturbing the TLB as little as possible, but it meant
18 * we had to keep interrupts disabled for a long time.
19 *
20 * Now, we simply use the normal TLB loading mechanism,
21 * and this makes the cpu choose a slot all by itself.
22 * Then we do a normal TLB flush on exit. We need only
23 * disable preemption during the clear.
24 */
25
26 .text
27
28 .globl _clear_page
29 _clear_page: /* %o0=dest */
30 ba,pt %xcc, clear_page_common
31 clr %o4
32
33 /* This thing is pretty important, it shows up
34 * on the profiles via do_anonymous_page().
35 */
36 .align 32
37 .globl clear_user_page
38 clear_user_page: /* %o0=dest, %o1=vaddr */
39 lduw [%g6 + TI_PRE_COUNT], %o2
40 sethi %hi(PAGE_OFFSET), %g2
41 sethi %hi(PAGE_SIZE), %o4
42
43 ldx [%g2 + %lo(PAGE_OFFSET)], %g2
44 sethi %hi(PAGE_KERNEL_LOCKED), %g3
45
46 ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
47 sub %o0, %g2, %g1 ! paddr
48
49 and %o1, %o4, %o0 ! vaddr D-cache alias bit
50
51 or %g1, %g3, %g1 ! TTE data
52 sethi %hi(TLBTEMP_BASE), %o3
53
54 add %o2, 1, %o4
55 add %o0, %o3, %o0 ! TTE vaddr
56
57 /* Disable preemption. */
58 mov TLB_TAG_ACCESS, %g3
59 stw %o4, [%g6 + TI_PRE_COUNT]
60
61 /* Load TLB entry. */
62 rdpr %pstate, %o4
63 wrpr %o4, PSTATE_IE, %pstate
64 stxa %o0, [%g3] ASI_DMMU
65 stxa %g1, [%g0] ASI_DTLB_DATA_IN
66 sethi %hi(KERNBASE), %g1
67 flush %g1
68 wrpr %o4, 0x0, %pstate
69
70 mov 1, %o4
71
72 clear_page_common:
73 VISEntryHalf
74 membar #StoreLoad | #StoreStore | #LoadStore
75 fzero %f0
76 sethi %hi(PAGE_SIZE/64), %o1
77 mov %o0, %g1 ! remember vaddr for tlbflush
78 fzero %f2
79 or %o1, %lo(PAGE_SIZE/64), %o1
80 faddd %f0, %f2, %f4
81 fmuld %f0, %f2, %f6
82 faddd %f0, %f2, %f8
83 fmuld %f0, %f2, %f10
84
85 faddd %f0, %f2, %f12
86 fmuld %f0, %f2, %f14
87 1: stda %f0, [%o0 + %g0] ASI_BLK_P
88 subcc %o1, 1, %o1
89 bne,pt %icc, 1b
90 add %o0, 0x40, %o0
91 membar #Sync
92 VISExitHalf
93
94 brz,pn %o4, out
95 nop
96
97 stxa %g0, [%g1] ASI_DMMU_DEMAP
98 membar #Sync
99 stw %o2, [%g6 + TI_PRE_COUNT]
100
101 out: retl
102 nop
103
This page took 0.03605 seconds and 5 git commands to generate.