Merge remote-tracking branch 'mailbox/mailbox-for-next'
[deliverable/linux.git] / arch / powerpc / mm / slb_low.S
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Low-level SLB routines
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 *
6 * Based on earlier C version:
7 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
8 * Copyright (c) 2001 Dave Engebretsen
9 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
1da177e4 17#include <asm/processor.h>
1da177e4 18#include <asm/ppc_asm.h>
0013a854 19#include <asm/asm-offsets.h>
1da177e4 20#include <asm/cputable.h>
3c726f8d
BH
21#include <asm/page.h>
22#include <asm/mmu.h>
23#include <asm/pgtable.h>
3f639ee8 24#include <asm/firmware.h>
1da177e4 25
3c726f8d 26/* void slb_allocate_realmode(unsigned long ea);
1da177e4
LT
27 *
28 * Create an SLB entry for the given EA (user or kernel).
29 * r3 = faulting address, r13 = PACA
30 * r9, r10, r11 are clobbered by this function
31 * No other registers are examined or changed.
32 */
3c726f8d 33_GLOBAL(slb_allocate_realmode)
c60ac569
AK
34 /*
35 * check for bad kernel/user address
36 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
37 */
dd1842a2 38 rldicr. r9,r3,4,(63 - H_PGTABLE_EADDR_SIZE - 4)
c60ac569 39 bne- 8f
1da177e4
LT
40
41 srdi r9,r3,60 /* get region */
c60ac569 42 srdi r10,r3,SID_SHIFT /* get esid */
b5666f70 43 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
1da177e4 44
b5666f70 45 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
1da177e4
LT
46 blt cr7,0f /* user or kernel? */
47
48 /* kernel address: proto-VSID = ESID */
49 /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
50 * this code will generate the protoVSID 0xfffffffff for the
51 * top segment. That's ok, the scramble below will translate
52 * it to VSID 0, which is reserved as a bad VSID - one which
53 * will never have any pages in it. */
1da177e4 54
cec08e7a 55 /* Check if hitting the linear mapping or some other kernel space
3c726f8d
BH
56 */
57 bne cr7,1f
58
59 /* Linear mapping encoding bits, the "li" instruction below will
60 * be patched by the kernel at boot
61 */
b86206e4
AB
62.globl slb_miss_kernel_load_linear
63slb_miss_kernel_load_linear:
3c726f8d 64 li r11,0
048ee099 65 /*
c60ac569
AK
66 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
67 * r9 = region id.
048ee099 68 */
c60ac569
AK
69 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
70 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
71
72
1189be65 73BEGIN_FTR_SECTION
3c726f8d 74 b slb_finish_load
44ae3ab3 75END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
1189be65 76 b slb_finish_load_1T
3c726f8d 77
cec08e7a
BH
781:
79#ifdef CONFIG_SPARSEMEM_VMEMMAP
80 /* Check virtual memmap region. To be patches at kernel boot */
81 cmpldi cr0,r9,0xf
82 bne 1f
b86206e4
AB
83.globl slb_miss_kernel_load_vmemmap
84slb_miss_kernel_load_vmemmap:
cec08e7a
BH
85 li r11,0
86 b 6f
871:
88#endif /* CONFIG_SPARSEMEM_VMEMMAP */
89
8d8997f3
BH
90 /* vmalloc mapping gets the encoding from the PACA as the mapping
91 * can be demoted from 64K -> 4K dynamically on some machines
3c726f8d 92 */
bf72aeba 93 clrldi r11,r10,48
d6a9996e 94 cmpldi r11,(H_VMALLOC_SIZE >> 28) - 1
bf72aeba
PM
95 bgt 5f
96 lhz r11,PACAVMALLOCSLLP(r13)
1189be65 97 b 6f
bf72aeba 985:
8d8997f3 99 /* IO mapping */
b86206e4
AB
100.globl slb_miss_kernel_load_io
101slb_miss_kernel_load_io:
3c726f8d 102 li r11,0
1189be65 1036:
048ee099 104 /*
c60ac569
AK
105 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
106 * r9 = region id.
048ee099 107 */
c60ac569
AK
108 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
109 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
110
1189be65 111BEGIN_FTR_SECTION
3c726f8d 112 b slb_finish_load
44ae3ab3 113END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
1189be65 114 b slb_finish_load_1T
3c726f8d 115
f077aaf0
PM
1160: /*
117 * For userspace addresses, make sure this is region 0.
118 */
119 cmpdi r9, 0
120 bne 8f
121
d0f13e3c
BH
122 /* when using slices, we extract the psize off the slice bitmaps
123 * and then we need to get the sllp encoding off the mmu_psize_defs
124 * array.
125 *
126 * XXX This is a bit inefficient especially for the normal case,
127 * so we should try to implement a fast path for the standard page
128 * size using the old sllp value so we avoid the array. We cannot
129 * really do dynamic patching unfortunately as processes might flip
130 * between 4k and 64k standard page size
131 */
132#ifdef CONFIG_PPC_MM_SLICES
7aa0727f 133 /* r10 have esid */
7d24f0b8 134 cmpldi r10,16
7aa0727f 135 /* below SLICE_LOW_TOP */
7d24f0b8 136 blt 5f
7aa0727f
AK
137 /*
138 * Handle hpsizes,
139 * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index
140 */
141 srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */
142 addi r9,r11,PACAHIGHSLICEPSIZE
143 lbzx r9,r13,r9 /* r9 is hpsizes[r11] */
144 /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */
145 rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63
146 b 6f
7d24f0b8 147
7aa0727f
AK
1485:
149 /*
150 * Handle lpsizes
151 * r9 is get_paca()->context.low_slices_psize, r11 is index
152 */
153 ld r9,PACALOWSLICESPSIZE(r13)
154 mr r11,r10
1556:
156 sldi r11,r11,2 /* index * 4 */
157 /* Extract the psize and multiply to get an array offset */
d0f13e3c
BH
158 srd r9,r9,r11
159 andi. r9,r9,0xf
160 mulli r9,r9,MMUPSIZEDEFSIZE
c594adad 161
d0f13e3c
BH
162 /* Now get to the array and obtain the sllp
163 */
164 ld r11,PACATOC(r13)
165 ld r11,mmu_psize_defs@got(r11)
166 add r11,r11,r9
167 ld r11,MMUPSIZESLLP(r11)
168 ori r11,r11,SLB_VSID_USER
169#else
170 /* paca context sllp already contains the SLB_VSID_USER bits */
bf72aeba 171 lhz r11,PACACONTEXTSLLP(r13)
d0f13e3c
BH
172#endif /* CONFIG_PPC_MM_SLICES */
173
3c726f8d 174 ld r9,PACACONTEXTID(r13)
1189be65
PM
175BEGIN_FTR_SECTION
176 cmpldi r10,0x1000
1189be65 177 bge slb_finish_load_1T
44ae3ab3 178END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
3c726f8d
BH
179 b slb_finish_load
180
1818: /* invalid EA */
182 li r10,0 /* BAD_VSID */
c60ac569 183 li r9,0 /* BAD_VSID */
3c726f8d
BH
184 li r11,SLB_VSID_USER /* flags don't much matter */
185 b slb_finish_load
186
3c726f8d
BH
187/*
188 * Finish loading of an SLB entry and return
189 *
c60ac569 190 * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
3c726f8d
BH
191 */
192slb_finish_load:
af81d787 193 rldimi r10,r9,ESID_BITS,0
1189be65 194 ASM_VSID_SCRAMBLE(r10,r9,256M)
ac8dc282
AK
195 /*
196 * bits above VSID_BITS_256M need to be ignored from r10
197 * also combine VSID and flags
198 */
199 rldimi r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))
3c726f8d
BH
200
201 /* r3 = EA, r11 = VSID data */
202 /*
203 * Find a slot, round robin. Previously we tried to find a
204 * free slot first but that took too long. Unfortunately we
205 * dont have any LRU information to help us choose a slot.
206 */
3c726f8d 207
1189be65 2087: ld r10,PACASTABRR(r13)
3c726f8d 209 addi r10,r10,1
584f8b71 210 /* This gets soft patched on boot. */
b86206e4
AB
211.globl slb_compare_rr_to_size
212slb_compare_rr_to_size:
584f8b71 213 cmpldi r10,0
3c726f8d
BH
214
215 blt+ 4f
216 li r10,SLB_NUM_BOLTED
217
2184:
219 std r10,PACASTABRR(r13)
220
2213:
222 rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */
223 oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */
224
225 /* r3 = ESID data, r11 = VSID data */
1da177e4
LT
226
227 /*
228 * No need for an isync before or after this slbmte. The exception
229 * we enter with and the rfid we exit with are context synchronizing.
230 */
231 slbmte r11,r10
232
3c726f8d
BH
233 /* we're done for kernel addresses */
234 crclr 4*cr0+eq /* set result to "success" */
235 bgelr cr7
1da177e4
LT
236
237 /* Update the slb cache */
238 lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
239 cmpldi r3,SLB_CACHE_ENTRIES
240 bge 1f
241
242 /* still room in the slb cache */
735cafc3
AK
243 sldi r11,r3,2 /* r11 = offset * sizeof(u32) */
244 srdi r10,r10,28 /* get the 36 bits of the ESID */
245 add r11,r11,r13 /* r11 = (u32 *)paca + offset */
246 stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
1da177e4
LT
247 addi r3,r3,1 /* offset++ */
248 b 2f
2491: /* offset >= SLB_CACHE_ENTRIES */
250 li r3,SLB_CACHE_ENTRIES+1
2512:
252 sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
3c726f8d 253 crclr 4*cr0+eq /* set result to "success" */
1da177e4
LT
254 blr
255
1189be65
PM
256/*
257 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
1189be65 258 *
c60ac569 259 * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
1189be65
PM
260 */
261slb_finish_load_1T:
c60ac569 262 srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
af81d787 263 rldimi r10,r9,ESID_BITS_1T,0
1189be65 264 ASM_VSID_SCRAMBLE(r10,r9,1T)
ac8dc282
AK
265 /*
266 * bits above VSID_BITS_1T need to be ignored from r10
267 * also combine VSID and flags
268 */
269 rldimi r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
1189be65
PM
270 li r10,MMU_SEGSIZE_1T
271 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
272
273 /* r3 = EA, r11 = VSID data */
274 clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */
275 b 7b
276
This page took 0.804537 seconds and 5 git commands to generate.