Merge branch 'v4l_for_linus' into to_next
[deliverable/linux.git] / arch / powerpc / mm / hash_low_32.S
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 *
11 * This file contains low-level assembler routines for managing
12 * the PowerPC MMU hash table. (PPC 8xx processors don't use a
13 * hash table, so this file is not used on them.)
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22 #include <asm/reg.h>
23 #include <asm/page.h>
24 #include <asm/pgtable.h>
25 #include <asm/cputable.h>
26 #include <asm/ppc_asm.h>
27 #include <asm/thread_info.h>
28 #include <asm/asm-offsets.h>
29
30 #ifdef CONFIG_SMP
31 .section .bss
32 .align 2
33 .globl mmu_hash_lock
34 mmu_hash_lock:
35 .space 4
36 #endif /* CONFIG_SMP */
37
38 /*
39 * Load a PTE into the hash table, if possible.
40 * The address is in r4, and r3 contains an access flag:
41 * _PAGE_RW (0x400) if a write.
42 * r9 contains the SRR1 value, from which we use the MSR_PR bit.
43 * SPRG_THREAD contains the physical address of the current task's thread.
44 *
45 * Returns to the caller if the access is illegal or there is no
46 * mapping for the address. Otherwise it places an appropriate PTE
47 * in the hash table and returns from the exception.
48 * Uses r0, r3 - r8, r10, ctr, lr.
49 */
50 .text
51 _GLOBAL(hash_page)
52 tophys(r7,0) /* gets -KERNELBASE into r7 */
53 #ifdef CONFIG_SMP
54 addis r8,r7,mmu_hash_lock@h
55 ori r8,r8,mmu_hash_lock@l
56 lis r0,0x0fff
57 b 10f
58 11: lwz r6,0(r8)
59 cmpwi 0,r6,0
60 bne 11b
61 10: lwarx r6,0,r8
62 cmpwi 0,r6,0
63 bne- 11b
64 stwcx. r0,0,r8
65 bne- 10b
66 isync
67 #endif
68 /* Get PTE (linux-style) and check access */
69 lis r0,KERNELBASE@h /* check if kernel address */
70 cmplw 0,r4,r0
71 mfspr r8,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
72 ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
73 lwz r5,PGDIR(r8) /* virt page-table root */
74 blt+ 112f /* assume user more likely */
75 lis r5,swapper_pg_dir@ha /* if kernel address, use */
76 addi r5,r5,swapper_pg_dir@l /* kernel page table */
77 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
78 112: add r5,r5,r7 /* convert to phys addr */
79 #ifndef CONFIG_PTE_64BIT
80 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
81 lwz r8,0(r5) /* get pmd entry */
82 rlwinm. r8,r8,0,0,19 /* extract address of pte page */
83 #else
84 rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */
85 lwzx r8,r8,r5 /* Get L1 entry */
86 rlwinm. r8,r8,0,0,20 /* extract pt base address */
87 #endif
88 #ifdef CONFIG_SMP
89 beq- hash_page_out /* return if no mapping */
90 #else
91 /* XXX it seems like the 601 will give a machine fault on the
92 rfi if its alignment is wrong (bottom 4 bits of address are
93 8 or 0xc) and we have had a not-taken conditional branch
94 to the address following the rfi. */
95 beqlr-
96 #endif
97 #ifndef CONFIG_PTE_64BIT
98 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
99 #else
100 rlwimi r8,r4,23,20,28 /* compute pte address */
101 #endif
102 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
103 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
104
105 /*
106 * Update the linux PTE atomically. We do the lwarx up-front
107 * because almost always, there won't be a permission violation
108 * and there won't already be an HPTE, and thus we will have
109 * to update the PTE to set _PAGE_HASHPTE. -- paulus.
110 *
111 * If PTE_64BIT is set, the low word is the flags word; use that
112 * word for locking since it contains all the interesting bits.
113 */
114 #if (PTE_FLAGS_OFFSET != 0)
115 addi r8,r8,PTE_FLAGS_OFFSET
116 #endif
117 retry:
118 lwarx r6,0,r8 /* get linux-style pte, flag word */
119 andc. r5,r3,r6 /* check access & ~permission */
120 #ifdef CONFIG_SMP
121 bne- hash_page_out /* return if access not permitted */
122 #else
123 bnelr-
124 #endif
125 or r5,r0,r6 /* set accessed/dirty bits */
126 #ifdef CONFIG_PTE_64BIT
127 #ifdef CONFIG_SMP
128 subf r10,r6,r8 /* create false data dependency */
129 subi r10,r10,PTE_FLAGS_OFFSET
130 lwzx r10,r6,r10 /* Get upper PTE word */
131 #else
132 lwz r10,-PTE_FLAGS_OFFSET(r8)
133 #endif /* CONFIG_SMP */
134 #endif /* CONFIG_PTE_64BIT */
135 stwcx. r5,0,r8 /* attempt to update PTE */
136 bne- retry /* retry if someone got there first */
137
138 mfsrin r3,r4 /* get segment reg for segment */
139 mfctr r0
140 stw r0,_CTR(r11)
141 bl create_hpte /* add the hash table entry */
142
143 #ifdef CONFIG_SMP
144 eieio
145 addis r8,r7,mmu_hash_lock@ha
146 li r0,0
147 stw r0,mmu_hash_lock@l(r8)
148 #endif
149
150 /* Return from the exception */
151 lwz r5,_CTR(r11)
152 mtctr r5
153 lwz r0,GPR0(r11)
154 lwz r7,GPR7(r11)
155 lwz r8,GPR8(r11)
156 b fast_exception_return
157
158 #ifdef CONFIG_SMP
159 hash_page_out:
160 eieio
161 addis r8,r7,mmu_hash_lock@ha
162 li r0,0
163 stw r0,mmu_hash_lock@l(r8)
164 blr
165 #endif /* CONFIG_SMP */
166
167 /*
168 * Add an entry for a particular page to the hash table.
169 *
170 * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
171 *
172 * We assume any necessary modifications to the pte (e.g. setting
173 * the accessed bit) have already been done and that there is actually
174 * a hash table in use (i.e. we're not on a 603).
175 */
176 _GLOBAL(add_hash_page)
177 mflr r0
178 stw r0,4(r1)
179
180 /* Convert context and va to VSID */
181 mulli r3,r3,897*16 /* multiply context by context skew */
182 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
183 mulli r0,r0,0x111 /* multiply by ESID skew */
184 add r3,r3,r0 /* note create_hpte trims to 24 bits */
185
186 #ifdef CONFIG_SMP
187 CURRENT_THREAD_INFO(r8, r1) /* use cpu number to make tag */
188 lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */
189 oris r8,r8,12
190 #endif /* CONFIG_SMP */
191
192 /*
193 * We disable interrupts here, even on UP, because we don't
194 * want to race with hash_page, and because we want the
195 * _PAGE_HASHPTE bit to be a reliable indication of whether
196 * the HPTE exists (or at least whether one did once).
197 * We also turn off the MMU for data accesses so that we
198 * we can't take a hash table miss (assuming the code is
199 * covered by a BAT). -- paulus
200 */
201 mfmsr r9
202 SYNC
203 rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */
204 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
205 mtmsr r0
206 SYNC_601
207 isync
208
209 tophys(r7,0)
210
211 #ifdef CONFIG_SMP
212 addis r6,r7,mmu_hash_lock@ha
213 addi r6,r6,mmu_hash_lock@l
214 10: lwarx r0,0,r6 /* take the mmu_hash_lock */
215 cmpi 0,r0,0
216 bne- 11f
217 stwcx. r8,0,r6
218 beq+ 12f
219 11: lwz r0,0(r6)
220 cmpi 0,r0,0
221 beq 10b
222 b 11b
223 12: isync
224 #endif
225
226 /*
227 * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
228 * If _PAGE_HASHPTE was already set, we don't replace the existing
229 * HPTE, so we just unlock and return.
230 */
231 mr r8,r5
232 #ifndef CONFIG_PTE_64BIT
233 rlwimi r8,r4,22,20,29
234 #else
235 rlwimi r8,r4,23,20,28
236 addi r8,r8,PTE_FLAGS_OFFSET
237 #endif
238 1: lwarx r6,0,r8
239 andi. r0,r6,_PAGE_HASHPTE
240 bne 9f /* if HASHPTE already set, done */
241 #ifdef CONFIG_PTE_64BIT
242 #ifdef CONFIG_SMP
243 subf r10,r6,r8 /* create false data dependency */
244 subi r10,r10,PTE_FLAGS_OFFSET
245 lwzx r10,r6,r10 /* Get upper PTE word */
246 #else
247 lwz r10,-PTE_FLAGS_OFFSET(r8)
248 #endif /* CONFIG_SMP */
249 #endif /* CONFIG_PTE_64BIT */
250 ori r5,r6,_PAGE_HASHPTE
251 stwcx. r5,0,r8
252 bne- 1b
253
254 bl create_hpte
255
256 9:
257 #ifdef CONFIG_SMP
258 addis r6,r7,mmu_hash_lock@ha
259 addi r6,r6,mmu_hash_lock@l
260 eieio
261 li r0,0
262 stw r0,0(r6) /* clear mmu_hash_lock */
263 #endif
264
265 /* reenable interrupts and DR */
266 mtmsr r9
267 SYNC_601
268 isync
269
270 lwz r0,4(r1)
271 mtlr r0
272 blr
273
274 /*
275 * This routine adds a hardware PTE to the hash table.
276 * It is designed to be called with the MMU either on or off.
277 * r3 contains the VSID, r4 contains the virtual address,
278 * r5 contains the linux PTE, r6 contains the old value of the
279 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
280 * offset to be added to addresses (0 if the MMU is on,
281 * -KERNELBASE if it is off). r10 contains the upper half of
282 * the PTE if CONFIG_PTE_64BIT.
283 * On SMP, the caller should have the mmu_hash_lock held.
284 * We assume that the caller has (or will) set the _PAGE_HASHPTE
285 * bit in the linux PTE in memory. The value passed in r6 should
286 * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
287 * this routine will skip the search for an existing HPTE.
288 * This procedure modifies r0, r3 - r6, r8, cr0.
289 * -- paulus.
290 *
291 * For speed, 4 of the instructions get patched once the size and
292 * physical address of the hash table are known. These definitions
293 * of Hash_base and Hash_bits below are just an example.
294 */
295 Hash_base = 0xc0180000
296 Hash_bits = 12 /* e.g. 256kB hash table */
297 Hash_msk = (((1 << Hash_bits) - 1) * 64)
298
299 /* defines for the PTE format for 32-bit PPCs */
300 #define HPTE_SIZE 8
301 #define PTEG_SIZE 64
302 #define LG_PTEG_SIZE 6
303 #define LDPTEu lwzu
304 #define LDPTE lwz
305 #define STPTE stw
306 #define CMPPTE cmpw
307 #define PTE_H 0x40
308 #define PTE_V 0x80000000
309 #define TST_V(r) rlwinm. r,r,0,0,0
310 #define SET_V(r) oris r,r,PTE_V@h
311 #define CLR_V(r,t) rlwinm r,r,0,1,31
312
313 #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
314 #define HASH_RIGHT 31-LG_PTEG_SIZE
315
316 _GLOBAL(create_hpte)
317 /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
318 rlwinm r8,r5,32-10,31,31 /* _PAGE_RW -> PP lsb */
319 rlwinm r0,r5,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
320 and r8,r8,r0 /* writable if _RW & _DIRTY */
321 rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
322 rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
323 ori r8,r8,0xe04 /* clear out reserved bits */
324 andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */
325 BEGIN_FTR_SECTION
326 rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
327 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
328 #ifdef CONFIG_PTE_64BIT
329 /* Put the XPN bits into the PTE */
330 rlwimi r8,r10,8,20,22
331 rlwimi r8,r10,2,29,29
332 #endif
333
334 /* Construct the high word of the PPC-style PTE (r5) */
335 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
336 rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
337 SET_V(r5) /* set V (valid) bit */
338
339 /* Get the address of the primary PTE group in the hash table (r3) */
340 _GLOBAL(hash_page_patch_A)
341 addis r0,r7,Hash_base@h /* base address of hash table */
342 rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
343 rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
344 xor r3,r3,r0 /* make primary hash */
345 li r0,8 /* PTEs/group */
346
347 /*
348 * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
349 * if it is clear, meaning that the HPTE isn't there already...
350 */
351 andi. r6,r6,_PAGE_HASHPTE
352 beq+ 10f /* no PTE: go look for an empty slot */
353 tlbie r4
354
355 addis r4,r7,htab_hash_searches@ha
356 lwz r6,htab_hash_searches@l(r4)
357 addi r6,r6,1 /* count how many searches we do */
358 stw r6,htab_hash_searches@l(r4)
359
360 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
361 mtctr r0
362 addi r4,r3,-HPTE_SIZE
363 1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
364 CMPPTE 0,r6,r5
365 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
366 beq+ found_slot
367
368 /* Search the secondary PTEG for a matching PTE */
369 ori r5,r5,PTE_H /* set H (secondary hash) bit */
370 _GLOBAL(hash_page_patch_B)
371 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
372 xori r4,r4,(-PTEG_SIZE & 0xffff)
373 addi r4,r4,-HPTE_SIZE
374 mtctr r0
375 2: LDPTEu r6,HPTE_SIZE(r4)
376 CMPPTE 0,r6,r5
377 bdnzf 2,2b
378 beq+ found_slot
379 xori r5,r5,PTE_H /* clear H bit again */
380
381 /* Search the primary PTEG for an empty slot */
382 10: mtctr r0
383 addi r4,r3,-HPTE_SIZE /* search primary PTEG */
384 1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
385 TST_V(r6) /* test valid bit */
386 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
387 beq+ found_empty
388
389 /* update counter of times that the primary PTEG is full */
390 addis r4,r7,primary_pteg_full@ha
391 lwz r6,primary_pteg_full@l(r4)
392 addi r6,r6,1
393 stw r6,primary_pteg_full@l(r4)
394
395 /* Search the secondary PTEG for an empty slot */
396 ori r5,r5,PTE_H /* set H (secondary hash) bit */
397 _GLOBAL(hash_page_patch_C)
398 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
399 xori r4,r4,(-PTEG_SIZE & 0xffff)
400 addi r4,r4,-HPTE_SIZE
401 mtctr r0
402 2: LDPTEu r6,HPTE_SIZE(r4)
403 TST_V(r6)
404 bdnzf 2,2b
405 beq+ found_empty
406 xori r5,r5,PTE_H /* clear H bit again */
407
408 /*
409 * Choose an arbitrary slot in the primary PTEG to overwrite.
410 * Since both the primary and secondary PTEGs are full, and we
411 * have no information that the PTEs in the primary PTEG are
412 * more important or useful than those in the secondary PTEG,
413 * and we know there is a definite (although small) speed
414 * advantage to putting the PTE in the primary PTEG, we always
415 * put the PTE in the primary PTEG.
416 *
417 * In addition, we skip any slot that is mapping kernel text in
418 * order to avoid a deadlock when not using BAT mappings if
419 * trying to hash in the kernel hash code itself after it has
420 * already taken the hash table lock. This works in conjunction
421 * with pre-faulting of the kernel text.
422 *
423 * If the hash table bucket is full of kernel text entries, we'll
424 * lockup here but that shouldn't happen
425 */
426
427 1: addis r4,r7,next_slot@ha /* get next evict slot */
428 lwz r6,next_slot@l(r4)
429 addi r6,r6,HPTE_SIZE /* search for candidate */
430 andi. r6,r6,7*HPTE_SIZE
431 stw r6,next_slot@l(r4)
432 add r4,r3,r6
433 LDPTE r0,HPTE_SIZE/2(r4) /* get PTE second word */
434 clrrwi r0,r0,12
435 lis r6,etext@h
436 ori r6,r6,etext@l /* get etext */
437 tophys(r6,r6)
438 cmpl cr0,r0,r6 /* compare and try again */
439 blt 1b
440
441 #ifndef CONFIG_SMP
442 /* Store PTE in PTEG */
443 found_empty:
444 STPTE r5,0(r4)
445 found_slot:
446 STPTE r8,HPTE_SIZE/2(r4)
447
448 #else /* CONFIG_SMP */
449 /*
450 * Between the tlbie above and updating the hash table entry below,
451 * another CPU could read the hash table entry and put it in its TLB.
452 * There are 3 cases:
453 * 1. using an empty slot
454 * 2. updating an earlier entry to change permissions (i.e. enable write)
455 * 3. taking over the PTE for an unrelated address
456 *
457 * In each case it doesn't really matter if the other CPUs have the old
458 * PTE in their TLB. So we don't need to bother with another tlbie here,
459 * which is convenient as we've overwritten the register that had the
460 * address. :-) The tlbie above is mainly to make sure that this CPU comes
461 * and gets the new PTE from the hash table.
462 *
463 * We do however have to make sure that the PTE is never in an invalid
464 * state with the V bit set.
465 */
466 found_empty:
467 found_slot:
468 CLR_V(r5,r0) /* clear V (valid) bit in PTE */
469 STPTE r5,0(r4)
470 sync
471 TLBSYNC
472 STPTE r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
473 sync
474 SET_V(r5)
475 STPTE r5,0(r4) /* finally set V bit in PTE */
476 #endif /* CONFIG_SMP */
477
478 sync /* make sure pte updates get to memory */
479 blr
480
481 .section .bss
482 .align 2
483 next_slot:
484 .space 4
485 primary_pteg_full:
486 .space 4
487 htab_hash_searches:
488 .space 4
489 .previous
490
491 /*
492 * Flush the entry for a particular page from the hash table.
493 *
494 * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
495 * int count)
496 *
497 * We assume that there is a hash table in use (Hash != 0).
498 */
499 _GLOBAL(flush_hash_pages)
500 tophys(r7,0)
501
502 /*
503 * We disable interrupts here, even on UP, because we want
504 * the _PAGE_HASHPTE bit to be a reliable indication of
505 * whether the HPTE exists (or at least whether one did once).
506 * We also turn off the MMU for data accesses so that we
507 * we can't take a hash table miss (assuming the code is
508 * covered by a BAT). -- paulus
509 */
510 mfmsr r10
511 SYNC
512 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
513 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
514 mtmsr r0
515 SYNC_601
516 isync
517
518 /* First find a PTE in the range that has _PAGE_HASHPTE set */
519 #ifndef CONFIG_PTE_64BIT
520 rlwimi r5,r4,22,20,29
521 #else
522 rlwimi r5,r4,23,20,28
523 #endif
524 1: lwz r0,PTE_FLAGS_OFFSET(r5)
525 cmpwi cr1,r6,1
526 andi. r0,r0,_PAGE_HASHPTE
527 bne 2f
528 ble cr1,19f
529 addi r4,r4,0x1000
530 addi r5,r5,PTE_SIZE
531 addi r6,r6,-1
532 b 1b
533
534 /* Convert context and va to VSID */
535 2: mulli r3,r3,897*16 /* multiply context by context skew */
536 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
537 mulli r0,r0,0x111 /* multiply by ESID skew */
538 add r3,r3,r0 /* note code below trims to 24 bits */
539
540 /* Construct the high word of the PPC-style PTE (r11) */
541 rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
542 rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
543 SET_V(r11) /* set V (valid) bit */
544
545 #ifdef CONFIG_SMP
546 addis r9,r7,mmu_hash_lock@ha
547 addi r9,r9,mmu_hash_lock@l
548 CURRENT_THREAD_INFO(r8, r1)
549 add r8,r8,r7
550 lwz r8,TI_CPU(r8)
551 oris r8,r8,9
552 10: lwarx r0,0,r9
553 cmpi 0,r0,0
554 bne- 11f
555 stwcx. r8,0,r9
556 beq+ 12f
557 11: lwz r0,0(r9)
558 cmpi 0,r0,0
559 beq 10b
560 b 11b
561 12: isync
562 #endif
563
564 /*
565 * Check the _PAGE_HASHPTE bit in the linux PTE. If it is
566 * already clear, we're done (for this pte). If not,
567 * clear it (atomically) and proceed. -- paulus.
568 */
569 #if (PTE_FLAGS_OFFSET != 0)
570 addi r5,r5,PTE_FLAGS_OFFSET
571 #endif
572 33: lwarx r8,0,r5 /* fetch the pte flags word */
573 andi. r0,r8,_PAGE_HASHPTE
574 beq 8f /* done if HASHPTE is already clear */
575 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
576 stwcx. r8,0,r5 /* update the pte */
577 bne- 33b
578
579 /* Get the address of the primary PTE group in the hash table (r3) */
580 _GLOBAL(flush_hash_patch_A)
581 addis r8,r7,Hash_base@h /* base address of hash table */
582 rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
583 rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
584 xor r8,r0,r8 /* make primary hash */
585
586 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
587 li r0,8 /* PTEs/group */
588 mtctr r0
589 addi r12,r8,-HPTE_SIZE
590 1: LDPTEu r0,HPTE_SIZE(r12) /* get next PTE */
591 CMPPTE 0,r0,r11
592 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
593 beq+ 3f
594
595 /* Search the secondary PTEG for a matching PTE */
596 ori r11,r11,PTE_H /* set H (secondary hash) bit */
597 li r0,8 /* PTEs/group */
598 _GLOBAL(flush_hash_patch_B)
599 xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
600 xori r12,r12,(-PTEG_SIZE & 0xffff)
601 addi r12,r12,-HPTE_SIZE
602 mtctr r0
603 2: LDPTEu r0,HPTE_SIZE(r12)
604 CMPPTE 0,r0,r11
605 bdnzf 2,2b
606 xori r11,r11,PTE_H /* clear H again */
607 bne- 4f /* should rarely fail to find it */
608
609 3: li r0,0
610 STPTE r0,0(r12) /* invalidate entry */
611 4: sync
612 tlbie r4 /* in hw tlb too */
613 sync
614
615 8: ble cr1,9f /* if all ptes checked */
616 81: addi r6,r6,-1
617 addi r5,r5,PTE_SIZE
618 addi r4,r4,0x1000
619 lwz r0,0(r5) /* check next pte */
620 cmpwi cr1,r6,1
621 andi. r0,r0,_PAGE_HASHPTE
622 bne 33b
623 bgt cr1,81b
624
625 9:
626 #ifdef CONFIG_SMP
627 TLBSYNC
628 li r0,0
629 stw r0,0(r9) /* clear mmu_hash_lock */
630 #endif
631
632 19: mtmsr r10
633 SYNC_601
634 isync
635 blr
636
637 /*
638 * Flush an entry from the TLB
639 */
640 _GLOBAL(_tlbie)
641 #ifdef CONFIG_SMP
642 CURRENT_THREAD_INFO(r8, r1)
643 lwz r8,TI_CPU(r8)
644 oris r8,r8,11
645 mfmsr r10
646 SYNC
647 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
648 rlwinm r0,r0,0,28,26 /* clear DR */
649 mtmsr r0
650 SYNC_601
651 isync
652 lis r9,mmu_hash_lock@h
653 ori r9,r9,mmu_hash_lock@l
654 tophys(r9,r9)
655 10: lwarx r7,0,r9
656 cmpwi 0,r7,0
657 bne- 10b
658 stwcx. r8,0,r9
659 bne- 10b
660 eieio
661 tlbie r3
662 sync
663 TLBSYNC
664 li r0,0
665 stw r0,0(r9) /* clear mmu_hash_lock */
666 mtmsr r10
667 SYNC_601
668 isync
669 #else /* CONFIG_SMP */
670 tlbie r3
671 sync
672 #endif /* CONFIG_SMP */
673 blr
674
675 /*
676 * Flush the entire TLB. 603/603e only
677 */
678 _GLOBAL(_tlbia)
679 #if defined(CONFIG_SMP)
680 CURRENT_THREAD_INFO(r8, r1)
681 lwz r8,TI_CPU(r8)
682 oris r8,r8,10
683 mfmsr r10
684 SYNC
685 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
686 rlwinm r0,r0,0,28,26 /* clear DR */
687 mtmsr r0
688 SYNC_601
689 isync
690 lis r9,mmu_hash_lock@h
691 ori r9,r9,mmu_hash_lock@l
692 tophys(r9,r9)
693 10: lwarx r7,0,r9
694 cmpwi 0,r7,0
695 bne- 10b
696 stwcx. r8,0,r9
697 bne- 10b
698 sync
699 tlbia
700 sync
701 TLBSYNC
702 li r0,0
703 stw r0,0(r9) /* clear mmu_hash_lock */
704 mtmsr r10
705 SYNC_601
706 isync
707 #else /* CONFIG_SMP */
708 sync
709 tlbia
710 sync
711 #endif /* CONFIG_SMP */
712 blr
This page took 0.044857 seconds and 5 git commands to generate.