Merge nommu tree
[deliverable/linux.git] / arch / powerpc / mm / slb_low.S
1 /*
2 * Low-level SLB routines
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 *
6 * Based on earlier C version:
7 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
8 * Copyright (c) 2001 Dave Engebretsen
9 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17 #include <linux/config.h>
18 #include <asm/processor.h>
19 #include <asm/ppc_asm.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/cputable.h>
22 #include <asm/page.h>
23 #include <asm/mmu.h>
24 #include <asm/pgtable.h>
25
26 /* void slb_allocate_realmode(unsigned long ea);
27 *
28 * Create an SLB entry for the given EA (user or kernel).
29 * r3 = faulting address, r13 = PACA
30 * r9, r10, r11 are clobbered by this function
31 * No other registers are examined or changed.
32 */
33 _GLOBAL(slb_allocate_realmode)
34 /* r3 = faulting address */
35
36 srdi r9,r3,60 /* get region */
37 srdi r10,r3,28 /* get esid */
38 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
39
40 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
41 blt cr7,0f /* user or kernel? */
42
43 /* kernel address: proto-VSID = ESID */
44 /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
45 * this code will generate the protoVSID 0xfffffffff for the
46 * top segment. That's ok, the scramble below will translate
47 * it to VSID 0, which is reserved as a bad VSID - one which
48 * will never have any pages in it. */
49
50 /* Check if hitting the linear mapping of the vmalloc/ioremap
51 * kernel space
52 */
53 bne cr7,1f
54
55 /* Linear mapping encoding bits, the "li" instruction below will
56 * be patched by the kernel at boot
57 */
58 _GLOBAL(slb_miss_kernel_load_linear)
59 li r11,0
60 b slb_finish_load
61
62 1: /* vmalloc/ioremap mapping encoding bits, the "li" instruction below
63 * will be patched by the kernel at boot
64 */
65 _GLOBAL(slb_miss_kernel_load_virtual)
66 li r11,0
67 b slb_finish_load
68
69
70 0: /* user address: proto-VSID = context << 15 | ESID. First check
71 * if the address is within the boundaries of the user region
72 */
73 srdi. r9,r10,USER_ESID_BITS
74 bne- 8f /* invalid ea bits set */
75
76 /* Figure out if the segment contains huge pages */
77 #ifdef CONFIG_HUGETLB_PAGE
78 BEGIN_FTR_SECTION
79 b 1f
80 END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
81 cmpldi r10,16
82
83 lhz r9,PACALOWHTLBAREAS(r13)
84 mr r11,r10
85 blt 5f
86
87 lhz r9,PACAHIGHHTLBAREAS(r13)
88 srdi r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT)
89
90 5: srd r9,r9,r11
91 andi. r9,r9,1
92 beq 1f
93 _GLOBAL(slb_miss_user_load_huge)
94 li r11,0
95 b 2f
96 1:
97 #endif /* CONFIG_HUGETLB_PAGE */
98
99 _GLOBAL(slb_miss_user_load_normal)
100 li r11,0
101
102 2:
103 ld r9,PACACONTEXTID(r13)
104 rldimi r10,r9,USER_ESID_BITS,0
105 b slb_finish_load
106
107 8: /* invalid EA */
108 li r10,0 /* BAD_VSID */
109 li r11,SLB_VSID_USER /* flags don't much matter */
110 b slb_finish_load
111
112 #ifdef __DISABLED__
113
114 /* void slb_allocate_user(unsigned long ea);
115 *
116 * Create an SLB entry for the given EA (user or kernel).
117 * r3 = faulting address, r13 = PACA
118 * r9, r10, r11 are clobbered by this function
119 * No other registers are examined or changed.
120 *
121 * It is called with translation enabled in order to be able to walk the
122 * page tables. This is not currently used.
123 */
124 _GLOBAL(slb_allocate_user)
125 /* r3 = faulting address */
126 srdi r10,r3,28 /* get esid */
127
128 crset 4*cr7+lt /* set "user" flag for later */
129
130 /* check if we fit in the range covered by the pagetables*/
131 srdi. r9,r3,PGTABLE_EADDR_SIZE
132 crnot 4*cr0+eq,4*cr0+eq
133 beqlr
134
135 /* now we need to get to the page tables in order to get the page
136 * size encoding from the PMD. In the future, we'll be able to deal
137 * with 1T segments too by getting the encoding from the PGD instead
138 */
139 ld r9,PACAPGDIR(r13)
140 cmpldi cr0,r9,0
141 beqlr
142 rlwinm r11,r10,8,25,28
143 ldx r9,r9,r11 /* get pgd_t */
144 cmpldi cr0,r9,0
145 beqlr
146 rlwinm r11,r10,3,17,28
147 ldx r9,r9,r11 /* get pmd_t */
148 cmpldi cr0,r9,0
149 beqlr
150
151 /* build vsid flags */
152 andi. r11,r9,SLB_VSID_LLP
153 ori r11,r11,SLB_VSID_USER
154
155 /* get context to calculate proto-VSID */
156 ld r9,PACACONTEXTID(r13)
157 rldimi r10,r9,USER_ESID_BITS,0
158
159 /* fall through slb_finish_load */
160
161 #endif /* __DISABLED__ */
162
163
164 /*
165 * Finish loading of an SLB entry and return
166 *
167 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
168 */
169 slb_finish_load:
170 ASM_VSID_SCRAMBLE(r10,r9)
171 rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */
172
173 /* r3 = EA, r11 = VSID data */
174 /*
175 * Find a slot, round robin. Previously we tried to find a
176 * free slot first but that took too long. Unfortunately we
177 * dont have any LRU information to help us choose a slot.
178 */
179 #ifdef CONFIG_PPC_ISERIES
180 /*
181 * On iSeries, the "bolted" stack segment can be cast out on
182 * shared processor switch so we need to check for a miss on
183 * it and restore it to the right slot.
184 */
185 ld r9,PACAKSAVE(r13)
186 clrrdi r9,r9,28
187 clrrdi r3,r3,28
188 li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
189 cmpld r9,r3
190 beq 3f
191 #endif /* CONFIG_PPC_ISERIES */
192
193 ld r10,PACASTABRR(r13)
194 addi r10,r10,1
195 /* use a cpu feature mask if we ever change our slb size */
196 cmpldi r10,SLB_NUM_ENTRIES
197
198 blt+ 4f
199 li r10,SLB_NUM_BOLTED
200
201 4:
202 std r10,PACASTABRR(r13)
203
204 3:
205 rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */
206 oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */
207
208 /* r3 = ESID data, r11 = VSID data */
209
210 /*
211 * No need for an isync before or after this slbmte. The exception
212 * we enter with and the rfid we exit with are context synchronizing.
213 */
214 slbmte r11,r10
215
216 /* we're done for kernel addresses */
217 crclr 4*cr0+eq /* set result to "success" */
218 bgelr cr7
219
220 /* Update the slb cache */
221 lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
222 cmpldi r3,SLB_CACHE_ENTRIES
223 bge 1f
224
225 /* still room in the slb cache */
226 sldi r11,r3,1 /* r11 = offset * sizeof(u16) */
227 rldicl r10,r10,36,28 /* get low 16 bits of the ESID */
228 add r11,r11,r13 /* r11 = (u16 *)paca + offset */
229 sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
230 addi r3,r3,1 /* offset++ */
231 b 2f
232 1: /* offset >= SLB_CACHE_ENTRIES */
233 li r3,SLB_CACHE_ENTRIES+1
234 2:
235 sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
236 crclr 4*cr0+eq /* set result to "success" */
237 blr
238
This page took 0.059295 seconds and 6 git commands to generate.