2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Synthesize TLB refill handlers at runtime.
8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
13 * ... and the days got worse and worse and now you see
14 * I've gone completly out of my mind.
16 * They're coming to take me a away haha
17 * they're coming to take me a away hoho hihi haha
18 * to the funny farm where code is beautiful all the time ...
20 * (Condolences to Napoleon XIV)
23 #include <linux/bug.h>
24 #include <linux/kernel.h>
25 #include <linux/types.h>
26 #include <linux/smp.h>
27 #include <linux/string.h>
28 #include <linux/init.h>
29 #include <linux/cache.h>
31 #include <asm/cacheflush.h>
32 #include <asm/pgtable.h>
37 * TLB load/store/modify handlers.
39 * Only the fastpath gets synthesized at runtime, the slowpath for
40 * do_page_fault remains normal asm.
42 extern void tlb_do_page_fault_0(void);
43 extern void tlb_do_page_fault_1(void);
46 static inline int r45k_bvahwbug(void)
48 /* XXX: We should probe for the presence of this bug, but we don't. */
52 static inline int r4k_250MHZhwbug(void)
54 /* XXX: We should probe for the presence of this bug, but we don't. */
58 static inline int __maybe_unused
bcm1250_m3_war(void)
60 return BCM1250_M3_WAR
;
63 static inline int __maybe_unused
r10000_llsc_war(void)
65 return R10000_LLSC_WAR
;
69 * Found by experiment: At least some revisions of the 4kc throw under
70 * some circumstances a machine check exception, triggered by invalid
71 * values in the index register. Delaying the tlbp instruction until
72 * after the next branch, plus adding an additional nop in front of
73 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
74 * why; it's not an issue caused by the core RTL.
77 static int __cpuinit
m4kc_tlbp_war(void)
79 return (current_cpu_data
.processor_id
& 0xffff00) ==
80 (PRID_COMP_MIPS
| PRID_IMP_4KC
);
83 /* Handle labels (which must be positive integers). */
85 label_second_part
= 1,
96 label_smp_pgtable_change
,
97 label_r3000_write_probe_fail
,
98 label_large_segbits_fault
,
99 #ifdef CONFIG_HUGETLB_PAGE
100 label_tlb_huge_update
,
104 UASM_L_LA(_second_part
)
107 UASM_L_LA(_vmalloc_done
)
108 UASM_L_LA(_tlbw_hazard
)
110 UASM_L_LA(_tlbl_goaround1
)
111 UASM_L_LA(_tlbl_goaround2
)
112 UASM_L_LA(_nopage_tlbl
)
113 UASM_L_LA(_nopage_tlbs
)
114 UASM_L_LA(_nopage_tlbm
)
115 UASM_L_LA(_smp_pgtable_change
)
116 UASM_L_LA(_r3000_write_probe_fail
)
117 UASM_L_LA(_large_segbits_fault
)
118 #ifdef CONFIG_HUGETLB_PAGE
119 UASM_L_LA(_tlb_huge_update
)
123 * For debug purposes.
125 static inline void dump_handler(const u32
*handler
, int count
)
129 pr_debug("\t.set push\n");
130 pr_debug("\t.set noreorder\n");
132 for (i
= 0; i
< count
; i
++)
133 pr_debug("\t%p\t.word 0x%08x\n", &handler
[i
], handler
[i
]);
135 pr_debug("\t.set pop\n");
138 /* The only general purpose registers allowed in TLB handlers. */
142 /* Some CP0 registers */
143 #define C0_INDEX 0, 0
144 #define C0_ENTRYLO0 2, 0
145 #define C0_TCBIND 2, 2
146 #define C0_ENTRYLO1 3, 0
147 #define C0_CONTEXT 4, 0
148 #define C0_PAGEMASK 5, 0
149 #define C0_BADVADDR 8, 0
150 #define C0_ENTRYHI 10, 0
152 #define C0_XCONTEXT 20, 0
155 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
157 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
160 /* The worst case length of the handler is around 18 instructions for
161 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
162 * Maximum space available is 32 instructions for R3000 and 64
163 * instructions for R4000.
165 * We deliberately chose a buffer size of 128, so we won't scribble
166 * over anything important on overflow before we panic.
168 static u32 tlb_handler
[128] __cpuinitdata
;
170 /* simply assume worst case size for labels and relocs */
171 static struct uasm_label labels
[128] __cpuinitdata
;
172 static struct uasm_reloc relocs
[128] __cpuinitdata
;
175 static int check_for_high_segbits __cpuinitdata
;
178 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
180 static unsigned int kscratch_used_mask __cpuinitdata
;
182 static int __cpuinit
allocate_kscratch(void)
185 unsigned int a
= cpu_data
[0].kscratch_mask
& ~kscratch_used_mask
;
192 r
--; /* make it zero based */
194 kscratch_used_mask
|= (1 << r
);
199 static int pgd_reg __cpuinitdata
;
201 #else /* !CONFIG_MIPS_PGD_C0_CONTEXT*/
203 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
204 * we cannot do r3000 under these circumstances.
206 * Declare pgd_current here instead of including mmu_context.h to avoid type
207 * conflicts for tlbmiss_handler_setup_pgd
209 extern unsigned long pgd_current
[];
212 * The R3000 TLB handler is simple.
214 static void __cpuinit
build_r3000_tlb_refill_handler(void)
216 long pgdc
= (long)pgd_current
;
219 memset(tlb_handler
, 0, sizeof(tlb_handler
));
222 uasm_i_mfc0(&p
, K0
, C0_BADVADDR
);
223 uasm_i_lui(&p
, K1
, uasm_rel_hi(pgdc
)); /* cp0 delay */
224 uasm_i_lw(&p
, K1
, uasm_rel_lo(pgdc
), K1
);
225 uasm_i_srl(&p
, K0
, K0
, 22); /* load delay */
226 uasm_i_sll(&p
, K0
, K0
, 2);
227 uasm_i_addu(&p
, K1
, K1
, K0
);
228 uasm_i_mfc0(&p
, K0
, C0_CONTEXT
);
229 uasm_i_lw(&p
, K1
, 0, K1
); /* cp0 delay */
230 uasm_i_andi(&p
, K0
, K0
, 0xffc); /* load delay */
231 uasm_i_addu(&p
, K1
, K1
, K0
);
232 uasm_i_lw(&p
, K0
, 0, K1
);
233 uasm_i_nop(&p
); /* load delay */
234 uasm_i_mtc0(&p
, K0
, C0_ENTRYLO0
);
235 uasm_i_mfc0(&p
, K1
, C0_EPC
); /* cp0 delay */
236 uasm_i_tlbwr(&p
); /* cp0 delay */
238 uasm_i_rfe(&p
); /* branch delay */
240 if (p
> tlb_handler
+ 32)
241 panic("TLB refill handler space exceeded");
243 pr_debug("Wrote TLB refill handler (%u instructions).\n",
244 (unsigned int)(p
- tlb_handler
));
246 memcpy((void *)ebase
, tlb_handler
, 0x80);
248 dump_handler((u32
*)ebase
, 32);
250 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
253 * The R4000 TLB handler is much more complicated. We have two
254 * consecutive handler areas with 32 instructions space each.
255 * Since they aren't used at the same time, we can overflow in the
256 * other one.To keep things simple, we first assume linear space,
257 * then we relocate it to the final handler layout as needed.
259 static u32 final_handler
[64] __cpuinitdata
;
264 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
265 * 2. A timing hazard exists for the TLBP instruction.
267 * stalling_instruction
270 * The JTLB is being read for the TLBP throughout the stall generated by the
271 * previous instruction. This is not really correct as the stalling instruction
272 * can modify the address used to access the JTLB. The failure symptom is that
273 * the TLBP instruction will use an address created for the stalling instruction
274 * and not the address held in C0_ENHI and thus report the wrong results.
276 * The software work-around is to not allow the instruction preceding the TLBP
277 * to stall - make it an NOP or some other instruction guaranteed not to stall.
279 * Errata 2 will not be fixed. This errata is also on the R5000.
281 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
283 static void __cpuinit __maybe_unused
build_tlb_probe_entry(u32
**p
)
285 switch (current_cpu_type()) {
286 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
303 * Write random or indexed TLB entry, and care about the hazards from
304 * the preceeding mtc0 and for the following eret.
306 enum tlb_write_entry
{ tlb_random
, tlb_indexed
};
308 static void __cpuinit
build_tlb_write_entry(u32
**p
, struct uasm_label
**l
,
309 struct uasm_reloc
**r
,
310 enum tlb_write_entry wmode
)
312 void(*tlbw
)(u32
**) = NULL
;
315 case tlb_random
: tlbw
= uasm_i_tlbwr
; break;
316 case tlb_indexed
: tlbw
= uasm_i_tlbwi
; break;
319 if (cpu_has_mips_r2
) {
320 if (cpu_has_mips_r2_exec_hazard
)
326 switch (current_cpu_type()) {
334 * This branch uses up a mtc0 hazard nop slot and saves
335 * two nops after the tlbw instruction.
337 uasm_il_bgezl(p
, r
, 0, label_tlbw_hazard
);
339 uasm_l_tlbw_hazard(l
, *p
);
384 uasm_i_nop(p
); /* QED specifies 2 nops hazard */
386 * This branch uses up a mtc0 hazard nop slot and saves
387 * a nop after the tlbw instruction.
389 uasm_il_bgezl(p
, r
, 0, label_tlbw_hazard
);
391 uasm_l_tlbw_hazard(l
, *p
);
404 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
405 * use of the JTLB for instructions should not occur for 4
406 * cpu cycles and use for data translations should not occur
446 panic("No TLB refill handler yet (CPU type: %d)",
447 current_cpu_data
.cputype
);
452 static __cpuinit __maybe_unused
void build_convert_pte_to_entrylo(u32
**p
,
455 if (kernel_uses_smartmips_rixi
) {
456 UASM_i_SRL(p
, reg
, reg
, ilog2(_PAGE_NO_EXEC
));
457 UASM_i_ROTR(p
, reg
, reg
, ilog2(_PAGE_GLOBAL
) - ilog2(_PAGE_NO_EXEC
));
459 #ifdef CONFIG_64BIT_PHYS_ADDR
460 uasm_i_dsrl_safe(p
, reg
, reg
, ilog2(_PAGE_GLOBAL
));
462 UASM_i_SRL(p
, reg
, reg
, ilog2(_PAGE_GLOBAL
));
467 #ifdef CONFIG_HUGETLB_PAGE
469 static __cpuinit
void build_restore_pagemask(u32
**p
,
470 struct uasm_reloc
**r
,
474 /* Reset default page size */
475 if (PM_DEFAULT_MASK
>> 16) {
476 uasm_i_lui(p
, tmp
, PM_DEFAULT_MASK
>> 16);
477 uasm_i_ori(p
, tmp
, tmp
, PM_DEFAULT_MASK
& 0xffff);
478 uasm_il_b(p
, r
, lid
);
479 uasm_i_mtc0(p
, tmp
, C0_PAGEMASK
);
480 } else if (PM_DEFAULT_MASK
) {
481 uasm_i_ori(p
, tmp
, 0, PM_DEFAULT_MASK
);
482 uasm_il_b(p
, r
, lid
);
483 uasm_i_mtc0(p
, tmp
, C0_PAGEMASK
);
485 uasm_il_b(p
, r
, lid
);
486 uasm_i_mtc0(p
, 0, C0_PAGEMASK
);
490 static __cpuinit
void build_huge_tlb_write_entry(u32
**p
,
491 struct uasm_label
**l
,
492 struct uasm_reloc
**r
,
494 enum tlb_write_entry wmode
)
496 /* Set huge page tlb entry size */
497 uasm_i_lui(p
, tmp
, PM_HUGE_MASK
>> 16);
498 uasm_i_ori(p
, tmp
, tmp
, PM_HUGE_MASK
& 0xffff);
499 uasm_i_mtc0(p
, tmp
, C0_PAGEMASK
);
501 build_tlb_write_entry(p
, l
, r
, wmode
);
503 build_restore_pagemask(p
, r
, tmp
, label_leave
);
507 * Check if Huge PTE is present, if so then jump to LABEL.
509 static void __cpuinit
510 build_is_huge_pte(u32
**p
, struct uasm_reloc
**r
, unsigned int tmp
,
511 unsigned int pmd
, int lid
)
513 UASM_i_LW(p
, tmp
, 0, pmd
);
514 uasm_i_andi(p
, tmp
, tmp
, _PAGE_HUGE
);
515 uasm_il_bnez(p
, r
, tmp
, lid
);
518 static __cpuinit
void build_huge_update_entries(u32
**p
,
525 * A huge PTE describes an area the size of the
526 * configured huge page size. This is twice the
527 * of the large TLB entry size we intend to use.
528 * A TLB entry half the size of the configured
529 * huge page size is configured into entrylo0
530 * and entrylo1 to cover the contiguous huge PTE
533 small_sequence
= (HPAGE_SIZE
>> 7) < 0x10000;
535 /* We can clobber tmp. It isn't used after this.*/
537 uasm_i_lui(p
, tmp
, HPAGE_SIZE
>> (7 + 16));
539 build_convert_pte_to_entrylo(p
, pte
);
540 UASM_i_MTC0(p
, pte
, C0_ENTRYLO0
); /* load it */
541 /* convert to entrylo1 */
543 UASM_i_ADDIU(p
, pte
, pte
, HPAGE_SIZE
>> 7);
545 UASM_i_ADDU(p
, pte
, pte
, tmp
);
547 UASM_i_MTC0(p
, pte
, C0_ENTRYLO1
); /* load it */
550 static __cpuinit
void build_huge_handler_tail(u32
**p
,
551 struct uasm_reloc
**r
,
552 struct uasm_label
**l
,
557 UASM_i_SC(p
, pte
, 0, ptr
);
558 uasm_il_beqz(p
, r
, pte
, label_tlb_huge_update
);
559 UASM_i_LW(p
, pte
, 0, ptr
); /* Needed because SC killed our PTE */
561 UASM_i_SW(p
, pte
, 0, ptr
);
563 build_huge_update_entries(p
, pte
, ptr
);
564 build_huge_tlb_write_entry(p
, l
, r
, pte
, tlb_indexed
);
566 #endif /* CONFIG_HUGETLB_PAGE */
570 * TMP and PTR are scratch.
571 * TMP will be clobbered, PTR will hold the pmd entry.
573 static void __cpuinit
574 build_get_pmde64(u32
**p
, struct uasm_label
**l
, struct uasm_reloc
**r
,
575 unsigned int tmp
, unsigned int ptr
)
577 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
578 long pgdc
= (long)pgd_current
;
581 * The vmalloc handling is not in the hotpath.
583 uasm_i_dmfc0(p
, tmp
, C0_BADVADDR
);
585 if (check_for_high_segbits
) {
587 * The kernel currently implicitely assumes that the
588 * MIPS SEGBITS parameter for the processor is
589 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
590 * allocate virtual addresses outside the maximum
591 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
592 * that doesn't prevent user code from accessing the
593 * higher xuseg addresses. Here, we make sure that
594 * everything but the lower xuseg addresses goes down
595 * the module_alloc/vmalloc path.
597 uasm_i_dsrl_safe(p
, ptr
, tmp
, PGDIR_SHIFT
+ PGD_ORDER
+ PAGE_SHIFT
- 3);
598 uasm_il_bnez(p
, r
, ptr
, label_vmalloc
);
600 uasm_il_bltz(p
, r
, tmp
, label_vmalloc
);
602 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
604 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
606 /* pgd is in pgd_reg */
607 UASM_i_MFC0(p
, ptr
, 31, pgd_reg
);
610 * &pgd << 11 stored in CONTEXT [23..63].
612 UASM_i_MFC0(p
, ptr
, C0_CONTEXT
);
614 /* Clear lower 23 bits of context. */
615 uasm_i_dins(p
, ptr
, 0, 0, 23);
617 /* 1 0 1 0 1 << 6 xkphys cached */
618 uasm_i_ori(p
, ptr
, ptr
, 0x540);
619 uasm_i_drotr(p
, ptr
, ptr
, 11);
621 #elif defined(CONFIG_SMP)
622 # ifdef CONFIG_MIPS_MT_SMTC
624 * SMTC uses TCBind value as "CPU" index
626 uasm_i_mfc0(p
, ptr
, C0_TCBIND
);
627 uasm_i_dsrl_safe(p
, ptr
, ptr
, 19);
630 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
633 uasm_i_dmfc0(p
, ptr
, C0_CONTEXT
);
634 uasm_i_dsrl_safe(p
, ptr
, ptr
, 23);
636 UASM_i_LA_mostly(p
, tmp
, pgdc
);
637 uasm_i_daddu(p
, ptr
, ptr
, tmp
);
638 uasm_i_dmfc0(p
, tmp
, C0_BADVADDR
);
639 uasm_i_ld(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
641 UASM_i_LA_mostly(p
, ptr
, pgdc
);
642 uasm_i_ld(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
645 uasm_l_vmalloc_done(l
, *p
);
647 /* get pgd offset in bytes */
648 uasm_i_dsrl_safe(p
, tmp
, tmp
, PGDIR_SHIFT
- 3);
650 uasm_i_andi(p
, tmp
, tmp
, (PTRS_PER_PGD
- 1)<<3);
651 uasm_i_daddu(p
, ptr
, ptr
, tmp
); /* add in pgd offset */
652 #ifndef __PAGETABLE_PMD_FOLDED
653 uasm_i_dmfc0(p
, tmp
, C0_BADVADDR
); /* get faulting address */
654 uasm_i_ld(p
, ptr
, 0, ptr
); /* get pmd pointer */
655 uasm_i_dsrl_safe(p
, tmp
, tmp
, PMD_SHIFT
-3); /* get pmd offset in bytes */
656 uasm_i_andi(p
, tmp
, tmp
, (PTRS_PER_PMD
- 1)<<3);
657 uasm_i_daddu(p
, ptr
, ptr
, tmp
); /* add in pmd offset */
661 enum vmalloc64_mode
{not_refill
, refill
};
663 * BVADDR is the faulting address, PTR is scratch.
664 * PTR will hold the pgd for vmalloc.
666 static void __cpuinit
667 build_get_pgd_vmalloc64(u32
**p
, struct uasm_label
**l
, struct uasm_reloc
**r
,
668 unsigned int bvaddr
, unsigned int ptr
,
669 enum vmalloc64_mode mode
)
671 long swpd
= (long)swapper_pg_dir
;
672 int single_insn_swpd
;
673 int did_vmalloc_branch
= 0;
675 single_insn_swpd
= uasm_in_compat_space_p(swpd
) && !uasm_rel_lo(swpd
);
677 uasm_l_vmalloc(l
, *p
);
679 if (mode
== refill
&& check_for_high_segbits
) {
680 if (single_insn_swpd
) {
681 uasm_il_bltz(p
, r
, bvaddr
, label_vmalloc_done
);
682 uasm_i_lui(p
, ptr
, uasm_rel_hi(swpd
));
683 did_vmalloc_branch
= 1;
686 uasm_il_bgez(p
, r
, bvaddr
, label_large_segbits_fault
);
689 if (!did_vmalloc_branch
) {
690 if (uasm_in_compat_space_p(swpd
) && !uasm_rel_lo(swpd
)) {
691 uasm_il_b(p
, r
, label_vmalloc_done
);
692 uasm_i_lui(p
, ptr
, uasm_rel_hi(swpd
));
694 UASM_i_LA_mostly(p
, ptr
, swpd
);
695 uasm_il_b(p
, r
, label_vmalloc_done
);
696 if (uasm_in_compat_space_p(swpd
))
697 uasm_i_addiu(p
, ptr
, ptr
, uasm_rel_lo(swpd
));
699 uasm_i_daddiu(p
, ptr
, ptr
, uasm_rel_lo(swpd
));
702 if (mode
== refill
&& check_for_high_segbits
) {
703 uasm_l_large_segbits_fault(l
, *p
);
705 * We get here if we are an xsseg address, or if we are
706 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
708 * Ignoring xsseg (assume disabled so would generate
709 * (address errors?), the only remaining possibility
710 * is the upper xuseg addresses. On processors with
711 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
712 * addresses would have taken an address error. We try
713 * to mimic that here by taking a load/istream page
716 UASM_i_LA(p
, ptr
, (unsigned long)tlb_do_page_fault_0
);
722 #else /* !CONFIG_64BIT */
725 * TMP and PTR are scratch.
726 * TMP will be clobbered, PTR will hold the pgd entry.
728 static void __cpuinit __maybe_unused
729 build_get_pgde32(u32
**p
, unsigned int tmp
, unsigned int ptr
)
731 long pgdc
= (long)pgd_current
;
733 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
735 #ifdef CONFIG_MIPS_MT_SMTC
737 * SMTC uses TCBind value as "CPU" index
739 uasm_i_mfc0(p
, ptr
, C0_TCBIND
);
740 UASM_i_LA_mostly(p
, tmp
, pgdc
);
741 uasm_i_srl(p
, ptr
, ptr
, 19);
744 * smp_processor_id() << 3 is stored in CONTEXT.
746 uasm_i_mfc0(p
, ptr
, C0_CONTEXT
);
747 UASM_i_LA_mostly(p
, tmp
, pgdc
);
748 uasm_i_srl(p
, ptr
, ptr
, 23);
750 uasm_i_addu(p
, ptr
, tmp
, ptr
);
752 UASM_i_LA_mostly(p
, ptr
, pgdc
);
754 uasm_i_mfc0(p
, tmp
, C0_BADVADDR
); /* get faulting address */
755 uasm_i_lw(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
756 uasm_i_srl(p
, tmp
, tmp
, PGDIR_SHIFT
); /* get pgd only bits */
757 uasm_i_sll(p
, tmp
, tmp
, PGD_T_LOG2
);
758 uasm_i_addu(p
, ptr
, ptr
, tmp
); /* add in pgd offset */
761 #endif /* !CONFIG_64BIT */
763 static void __cpuinit
build_adjust_context(u32
**p
, unsigned int ctx
)
765 unsigned int shift
= 4 - (PTE_T_LOG2
+ 1) + PAGE_SHIFT
- 12;
766 unsigned int mask
= (PTRS_PER_PTE
/ 2 - 1) << (PTE_T_LOG2
+ 1);
768 switch (current_cpu_type()) {
785 UASM_i_SRL(p
, ctx
, ctx
, shift
);
786 uasm_i_andi(p
, ctx
, ctx
, mask
);
789 static void __cpuinit
build_get_ptep(u32
**p
, unsigned int tmp
, unsigned int ptr
)
792 * Bug workaround for the Nevada. It seems as if under certain
793 * circumstances the move from cp0_context might produce a
794 * bogus result when the mfc0 instruction and its consumer are
795 * in a different cacheline or a load instruction, probably any
796 * memory reference, is between them.
798 switch (current_cpu_type()) {
800 UASM_i_LW(p
, ptr
, 0, ptr
);
801 GET_CONTEXT(p
, tmp
); /* get context reg */
805 GET_CONTEXT(p
, tmp
); /* get context reg */
806 UASM_i_LW(p
, ptr
, 0, ptr
);
810 build_adjust_context(p
, tmp
);
811 UASM_i_ADDU(p
, ptr
, ptr
, tmp
); /* add in offset */
814 static void __cpuinit
build_update_entries(u32
**p
, unsigned int tmp
,
818 * 64bit address support (36bit on a 32bit CPU) in a 32bit
819 * Kernel is a special case. Only a few CPUs use it.
821 #ifdef CONFIG_64BIT_PHYS_ADDR
822 if (cpu_has_64bits
) {
823 uasm_i_ld(p
, tmp
, 0, ptep
); /* get even pte */
824 uasm_i_ld(p
, ptep
, sizeof(pte_t
), ptep
); /* get odd pte */
825 if (kernel_uses_smartmips_rixi
) {
826 UASM_i_SRL(p
, tmp
, tmp
, ilog2(_PAGE_NO_EXEC
));
827 UASM_i_SRL(p
, ptep
, ptep
, ilog2(_PAGE_NO_EXEC
));
828 UASM_i_ROTR(p
, tmp
, tmp
, ilog2(_PAGE_GLOBAL
) - ilog2(_PAGE_NO_EXEC
));
829 UASM_i_MTC0(p
, tmp
, C0_ENTRYLO0
); /* load it */
830 UASM_i_ROTR(p
, ptep
, ptep
, ilog2(_PAGE_GLOBAL
) - ilog2(_PAGE_NO_EXEC
));
832 uasm_i_dsrl_safe(p
, tmp
, tmp
, ilog2(_PAGE_GLOBAL
)); /* convert to entrylo0 */
833 UASM_i_MTC0(p
, tmp
, C0_ENTRYLO0
); /* load it */
834 uasm_i_dsrl_safe(p
, ptep
, ptep
, ilog2(_PAGE_GLOBAL
)); /* convert to entrylo1 */
836 UASM_i_MTC0(p
, ptep
, C0_ENTRYLO1
); /* load it */
838 int pte_off_even
= sizeof(pte_t
) / 2;
839 int pte_off_odd
= pte_off_even
+ sizeof(pte_t
);
841 /* The pte entries are pre-shifted */
842 uasm_i_lw(p
, tmp
, pte_off_even
, ptep
); /* get even pte */
843 UASM_i_MTC0(p
, tmp
, C0_ENTRYLO0
); /* load it */
844 uasm_i_lw(p
, ptep
, pte_off_odd
, ptep
); /* get odd pte */
845 UASM_i_MTC0(p
, ptep
, C0_ENTRYLO1
); /* load it */
848 UASM_i_LW(p
, tmp
, 0, ptep
); /* get even pte */
849 UASM_i_LW(p
, ptep
, sizeof(pte_t
), ptep
); /* get odd pte */
851 build_tlb_probe_entry(p
);
852 if (kernel_uses_smartmips_rixi
) {
853 UASM_i_SRL(p
, tmp
, tmp
, ilog2(_PAGE_NO_EXEC
));
854 UASM_i_SRL(p
, ptep
, ptep
, ilog2(_PAGE_NO_EXEC
));
855 UASM_i_ROTR(p
, tmp
, tmp
, ilog2(_PAGE_GLOBAL
) - ilog2(_PAGE_NO_EXEC
));
856 if (r4k_250MHZhwbug())
857 UASM_i_MTC0(p
, 0, C0_ENTRYLO0
);
858 UASM_i_MTC0(p
, tmp
, C0_ENTRYLO0
); /* load it */
859 UASM_i_ROTR(p
, ptep
, ptep
, ilog2(_PAGE_GLOBAL
) - ilog2(_PAGE_NO_EXEC
));
861 UASM_i_SRL(p
, tmp
, tmp
, ilog2(_PAGE_GLOBAL
)); /* convert to entrylo0 */
862 if (r4k_250MHZhwbug())
863 UASM_i_MTC0(p
, 0, C0_ENTRYLO0
);
864 UASM_i_MTC0(p
, tmp
, C0_ENTRYLO0
); /* load it */
865 UASM_i_SRL(p
, ptep
, ptep
, ilog2(_PAGE_GLOBAL
)); /* convert to entrylo1 */
867 uasm_i_mfc0(p
, tmp
, C0_INDEX
);
869 if (r4k_250MHZhwbug())
870 UASM_i_MTC0(p
, 0, C0_ENTRYLO1
);
871 UASM_i_MTC0(p
, ptep
, C0_ENTRYLO1
); /* load it */
876 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
877 * because EXL == 0. If we wrap, we can also use the 32 instruction
878 * slots before the XTLB refill exception handler which belong to the
879 * unused TLB refill exception.
881 #define MIPS64_REFILL_INSNS 32
883 static void __cpuinit
build_r4000_tlb_refill_handler(void)
885 u32
*p
= tlb_handler
;
886 struct uasm_label
*l
= labels
;
887 struct uasm_reloc
*r
= relocs
;
889 unsigned int final_len
;
891 memset(tlb_handler
, 0, sizeof(tlb_handler
));
892 memset(labels
, 0, sizeof(labels
));
893 memset(relocs
, 0, sizeof(relocs
));
894 memset(final_handler
, 0, sizeof(final_handler
));
897 * create the plain linear handler
899 if (bcm1250_m3_war()) {
900 unsigned int segbits
= 44;
902 uasm_i_dmfc0(&p
, K0
, C0_BADVADDR
);
903 uasm_i_dmfc0(&p
, K1
, C0_ENTRYHI
);
904 uasm_i_xor(&p
, K0
, K0
, K1
);
905 uasm_i_dsrl_safe(&p
, K1
, K0
, 62);
906 uasm_i_dsrl_safe(&p
, K0
, K0
, 12 + 1);
907 uasm_i_dsll_safe(&p
, K0
, K0
, 64 + 12 + 1 - segbits
);
908 uasm_i_or(&p
, K0
, K0
, K1
);
909 uasm_il_bnez(&p
, &r
, K0
, label_leave
);
910 /* No need for uasm_i_nop */
914 build_get_pmde64(&p
, &l
, &r
, K0
, K1
); /* get pmd in K1 */
916 build_get_pgde32(&p
, K0
, K1
); /* get pgd in K1 */
919 #ifdef CONFIG_HUGETLB_PAGE
920 build_is_huge_pte(&p
, &r
, K0
, K1
, label_tlb_huge_update
);
923 build_get_ptep(&p
, K0
, K1
);
924 build_update_entries(&p
, K0
, K1
);
925 build_tlb_write_entry(&p
, &l
, &r
, tlb_random
);
927 uasm_i_eret(&p
); /* return from trap */
929 #ifdef CONFIG_HUGETLB_PAGE
930 uasm_l_tlb_huge_update(&l
, p
);
931 UASM_i_LW(&p
, K0
, 0, K1
);
932 build_huge_update_entries(&p
, K0
, K1
);
933 build_huge_tlb_write_entry(&p
, &l
, &r
, K0
, tlb_random
);
937 build_get_pgd_vmalloc64(&p
, &l
, &r
, K0
, K1
, refill
);
941 * Overflow check: For the 64bit handler, we need at least one
942 * free instruction slot for the wrap-around branch. In worst
943 * case, if the intended insertion point is a delay slot, we
944 * need three, with the second nop'ed and the third being
947 /* Loongson2 ebase is different than r4k, we have more space */
948 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
949 if ((p
- tlb_handler
) > 64)
950 panic("TLB refill handler space exceeded");
952 if (((p
- tlb_handler
) > (MIPS64_REFILL_INSNS
* 2) - 1)
953 || (((p
- tlb_handler
) > (MIPS64_REFILL_INSNS
* 2) - 3)
954 && uasm_insn_has_bdelay(relocs
,
955 tlb_handler
+ MIPS64_REFILL_INSNS
- 3)))
956 panic("TLB refill handler space exceeded");
960 * Now fold the handler in the TLB refill handler space.
962 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
964 /* Simplest case, just copy the handler. */
965 uasm_copy_handler(relocs
, labels
, tlb_handler
, p
, f
);
966 final_len
= p
- tlb_handler
;
967 #else /* CONFIG_64BIT */
968 f
= final_handler
+ MIPS64_REFILL_INSNS
;
969 if ((p
- tlb_handler
) <= MIPS64_REFILL_INSNS
) {
970 /* Just copy the handler. */
971 uasm_copy_handler(relocs
, labels
, tlb_handler
, p
, f
);
972 final_len
= p
- tlb_handler
;
974 #if defined(CONFIG_HUGETLB_PAGE)
975 const enum label_id ls
= label_tlb_huge_update
;
977 const enum label_id ls
= label_vmalloc
;
983 for (i
= 0; i
< ARRAY_SIZE(labels
) && labels
[i
].lab
!= ls
; i
++)
985 BUG_ON(i
== ARRAY_SIZE(labels
));
986 split
= labels
[i
].addr
;
989 * See if we have overflown one way or the other.
991 if (split
> tlb_handler
+ MIPS64_REFILL_INSNS
||
992 split
< p
- MIPS64_REFILL_INSNS
)
997 * Split two instructions before the end. One
998 * for the branch and one for the instruction
1001 split
= tlb_handler
+ MIPS64_REFILL_INSNS
- 2;
1004 * If the branch would fall in a delay slot,
1005 * we must back up an additional instruction
1006 * so that it is no longer in a delay slot.
1008 if (uasm_insn_has_bdelay(relocs
, split
- 1))
1011 /* Copy first part of the handler. */
1012 uasm_copy_handler(relocs
, labels
, tlb_handler
, split
, f
);
1013 f
+= split
- tlb_handler
;
1016 /* Insert branch. */
1017 uasm_l_split(&l
, final_handler
);
1018 uasm_il_b(&f
, &r
, label_split
);
1019 if (uasm_insn_has_bdelay(relocs
, split
))
1022 uasm_copy_handler(relocs
, labels
,
1023 split
, split
+ 1, f
);
1024 uasm_move_labels(labels
, f
, f
+ 1, -1);
1030 /* Copy the rest of the handler. */
1031 uasm_copy_handler(relocs
, labels
, split
, p
, final_handler
);
1032 final_len
= (f
- (final_handler
+ MIPS64_REFILL_INSNS
)) +
1035 #endif /* CONFIG_64BIT */
1037 uasm_resolve_relocs(relocs
, labels
);
1038 pr_debug("Wrote TLB refill handler (%u instructions).\n",
1041 memcpy((void *)ebase
, final_handler
, 0x100);
1043 dump_handler((u32
*)ebase
, 64);
1047 * 128 instructions for the fastpath handler is generous and should
1048 * never be exceeded.
1050 #define FASTPATH_SIZE 128
1052 u32 handle_tlbl
[FASTPATH_SIZE
] __cacheline_aligned
;
1053 u32 handle_tlbs
[FASTPATH_SIZE
] __cacheline_aligned
;
1054 u32 handle_tlbm
[FASTPATH_SIZE
] __cacheline_aligned
;
1055 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1056 u32 tlbmiss_handler_setup_pgd
[16] __cacheline_aligned
;
1058 static void __cpuinit
build_r4000_setup_pgd(void)
1062 u32
*p
= tlbmiss_handler_setup_pgd
;
1063 struct uasm_label
*l
= labels
;
1064 struct uasm_reloc
*r
= relocs
;
1066 memset(tlbmiss_handler_setup_pgd
, 0, sizeof(tlbmiss_handler_setup_pgd
));
1067 memset(labels
, 0, sizeof(labels
));
1068 memset(relocs
, 0, sizeof(relocs
));
1070 pgd_reg
= allocate_kscratch();
1072 if (pgd_reg
== -1) {
1073 /* PGD << 11 in c0_Context */
1075 * If it is a ckseg0 address, convert to a physical
1076 * address. Shifting right by 29 and adding 4 will
1077 * result in zero for these addresses.
1080 UASM_i_SRA(&p
, a1
, a0
, 29);
1081 UASM_i_ADDIU(&p
, a1
, a1
, 4);
1082 uasm_il_bnez(&p
, &r
, a1
, label_tlbl_goaround1
);
1084 uasm_i_dinsm(&p
, a0
, 0, 29, 64 - 29);
1085 uasm_l_tlbl_goaround1(&l
, p
);
1086 UASM_i_SLL(&p
, a0
, a0
, 11);
1088 UASM_i_MTC0(&p
, a0
, C0_CONTEXT
);
1090 /* PGD in c0_KScratch */
1092 UASM_i_MTC0(&p
, a0
, 31, pgd_reg
);
1094 if (p
- tlbmiss_handler_setup_pgd
> ARRAY_SIZE(tlbmiss_handler_setup_pgd
))
1095 panic("tlbmiss_handler_setup_pgd space exceeded");
1096 uasm_resolve_relocs(relocs
, labels
);
1097 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
1098 (unsigned int)(p
- tlbmiss_handler_setup_pgd
));
1100 dump_handler(tlbmiss_handler_setup_pgd
,
1101 ARRAY_SIZE(tlbmiss_handler_setup_pgd
));
1105 static void __cpuinit
1106 iPTE_LW(u32
**p
, unsigned int pte
, unsigned int ptr
)
1109 # ifdef CONFIG_64BIT_PHYS_ADDR
1111 uasm_i_lld(p
, pte
, 0, ptr
);
1114 UASM_i_LL(p
, pte
, 0, ptr
);
1116 # ifdef CONFIG_64BIT_PHYS_ADDR
1118 uasm_i_ld(p
, pte
, 0, ptr
);
1121 UASM_i_LW(p
, pte
, 0, ptr
);
1125 static void __cpuinit
1126 iPTE_SW(u32
**p
, struct uasm_reloc
**r
, unsigned int pte
, unsigned int ptr
,
1129 #ifdef CONFIG_64BIT_PHYS_ADDR
1130 unsigned int hwmode
= mode
& (_PAGE_VALID
| _PAGE_DIRTY
);
1133 uasm_i_ori(p
, pte
, pte
, mode
);
1135 # ifdef CONFIG_64BIT_PHYS_ADDR
1137 uasm_i_scd(p
, pte
, 0, ptr
);
1140 UASM_i_SC(p
, pte
, 0, ptr
);
1142 if (r10000_llsc_war())
1143 uasm_il_beqzl(p
, r
, pte
, label_smp_pgtable_change
);
1145 uasm_il_beqz(p
, r
, pte
, label_smp_pgtable_change
);
1147 # ifdef CONFIG_64BIT_PHYS_ADDR
1148 if (!cpu_has_64bits
) {
1149 /* no uasm_i_nop needed */
1150 uasm_i_ll(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1151 uasm_i_ori(p
, pte
, pte
, hwmode
);
1152 uasm_i_sc(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1153 uasm_il_beqz(p
, r
, pte
, label_smp_pgtable_change
);
1154 /* no uasm_i_nop needed */
1155 uasm_i_lw(p
, pte
, 0, ptr
);
1162 # ifdef CONFIG_64BIT_PHYS_ADDR
1164 uasm_i_sd(p
, pte
, 0, ptr
);
1167 UASM_i_SW(p
, pte
, 0, ptr
);
1169 # ifdef CONFIG_64BIT_PHYS_ADDR
1170 if (!cpu_has_64bits
) {
1171 uasm_i_lw(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1172 uasm_i_ori(p
, pte
, pte
, hwmode
);
1173 uasm_i_sw(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1174 uasm_i_lw(p
, pte
, 0, ptr
);
1181 * Check if PTE is present, if not then jump to LABEL. PTR points to
1182 * the page table where this PTE is located, PTE will be re-loaded
1183 * with it's original value.
1185 static void __cpuinit
1186 build_pte_present(u32
**p
, struct uasm_reloc
**r
,
1187 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
1189 if (kernel_uses_smartmips_rixi
) {
1190 uasm_i_andi(p
, pte
, pte
, _PAGE_PRESENT
);
1191 uasm_il_beqz(p
, r
, pte
, lid
);
1193 uasm_i_andi(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_READ
);
1194 uasm_i_xori(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_READ
);
1195 uasm_il_bnez(p
, r
, pte
, lid
);
1197 iPTE_LW(p
, pte
, ptr
);
1200 /* Make PTE valid, store result in PTR. */
1201 static void __cpuinit
1202 build_make_valid(u32
**p
, struct uasm_reloc
**r
, unsigned int pte
,
1205 unsigned int mode
= _PAGE_VALID
| _PAGE_ACCESSED
;
1207 iPTE_SW(p
, r
, pte
, ptr
, mode
);
1211 * Check if PTE can be written to, if not branch to LABEL. Regardless
1212 * restore PTE with value from PTR when done.
1214 static void __cpuinit
1215 build_pte_writable(u32
**p
, struct uasm_reloc
**r
,
1216 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
1218 uasm_i_andi(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_WRITE
);
1219 uasm_i_xori(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_WRITE
);
1220 uasm_il_bnez(p
, r
, pte
, lid
);
1221 iPTE_LW(p
, pte
, ptr
);
1224 /* Make PTE writable, update software status bits as well, then store
1227 static void __cpuinit
1228 build_make_write(u32
**p
, struct uasm_reloc
**r
, unsigned int pte
,
1231 unsigned int mode
= (_PAGE_ACCESSED
| _PAGE_MODIFIED
| _PAGE_VALID
1234 iPTE_SW(p
, r
, pte
, ptr
, mode
);
1238 * Check if PTE can be modified, if not branch to LABEL. Regardless
1239 * restore PTE with value from PTR when done.
1241 static void __cpuinit
1242 build_pte_modifiable(u32
**p
, struct uasm_reloc
**r
,
1243 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
1245 uasm_i_andi(p
, pte
, pte
, _PAGE_WRITE
);
1246 uasm_il_beqz(p
, r
, pte
, lid
);
1247 iPTE_LW(p
, pte
, ptr
);
1250 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1254 * R3000 style TLB load/store/modify handlers.
1258 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1261 static void __cpuinit
1262 build_r3000_pte_reload_tlbwi(u32
**p
, unsigned int pte
, unsigned int tmp
)
1264 uasm_i_mtc0(p
, pte
, C0_ENTRYLO0
); /* cp0 delay */
1265 uasm_i_mfc0(p
, tmp
, C0_EPC
); /* cp0 delay */
1268 uasm_i_rfe(p
); /* branch delay */
1272 * This places the pte into ENTRYLO0 and writes it with tlbwi
1273 * or tlbwr as appropriate. This is because the index register
1274 * may have the probe fail bit set as a result of a trap on a
1275 * kseg2 access, i.e. without refill. Then it returns.
1277 static void __cpuinit
1278 build_r3000_tlb_reload_write(u32
**p
, struct uasm_label
**l
,
1279 struct uasm_reloc
**r
, unsigned int pte
,
1282 uasm_i_mfc0(p
, tmp
, C0_INDEX
);
1283 uasm_i_mtc0(p
, pte
, C0_ENTRYLO0
); /* cp0 delay */
1284 uasm_il_bltz(p
, r
, tmp
, label_r3000_write_probe_fail
); /* cp0 delay */
1285 uasm_i_mfc0(p
, tmp
, C0_EPC
); /* branch delay */
1286 uasm_i_tlbwi(p
); /* cp0 delay */
1288 uasm_i_rfe(p
); /* branch delay */
1289 uasm_l_r3000_write_probe_fail(l
, *p
);
1290 uasm_i_tlbwr(p
); /* cp0 delay */
1292 uasm_i_rfe(p
); /* branch delay */
1295 static void __cpuinit
1296 build_r3000_tlbchange_handler_head(u32
**p
, unsigned int pte
,
1299 long pgdc
= (long)pgd_current
;
1301 uasm_i_mfc0(p
, pte
, C0_BADVADDR
);
1302 uasm_i_lui(p
, ptr
, uasm_rel_hi(pgdc
)); /* cp0 delay */
1303 uasm_i_lw(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
1304 uasm_i_srl(p
, pte
, pte
, 22); /* load delay */
1305 uasm_i_sll(p
, pte
, pte
, 2);
1306 uasm_i_addu(p
, ptr
, ptr
, pte
);
1307 uasm_i_mfc0(p
, pte
, C0_CONTEXT
);
1308 uasm_i_lw(p
, ptr
, 0, ptr
); /* cp0 delay */
1309 uasm_i_andi(p
, pte
, pte
, 0xffc); /* load delay */
1310 uasm_i_addu(p
, ptr
, ptr
, pte
);
1311 uasm_i_lw(p
, pte
, 0, ptr
);
1312 uasm_i_tlbp(p
); /* load delay */
1315 static void __cpuinit
build_r3000_tlb_load_handler(void)
1317 u32
*p
= handle_tlbl
;
1318 struct uasm_label
*l
= labels
;
1319 struct uasm_reloc
*r
= relocs
;
1321 memset(handle_tlbl
, 0, sizeof(handle_tlbl
));
1322 memset(labels
, 0, sizeof(labels
));
1323 memset(relocs
, 0, sizeof(relocs
));
1325 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1326 build_pte_present(&p
, &r
, K0
, K1
, label_nopage_tlbl
);
1327 uasm_i_nop(&p
); /* load delay */
1328 build_make_valid(&p
, &r
, K0
, K1
);
1329 build_r3000_tlb_reload_write(&p
, &l
, &r
, K0
, K1
);
1331 uasm_l_nopage_tlbl(&l
, p
);
1332 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_0
& 0x0fffffff);
1335 if ((p
- handle_tlbl
) > FASTPATH_SIZE
)
1336 panic("TLB load handler fastpath space exceeded");
1338 uasm_resolve_relocs(relocs
, labels
);
1339 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1340 (unsigned int)(p
- handle_tlbl
));
1342 dump_handler(handle_tlbl
, ARRAY_SIZE(handle_tlbl
));
1345 static void __cpuinit
build_r3000_tlb_store_handler(void)
1347 u32
*p
= handle_tlbs
;
1348 struct uasm_label
*l
= labels
;
1349 struct uasm_reloc
*r
= relocs
;
1351 memset(handle_tlbs
, 0, sizeof(handle_tlbs
));
1352 memset(labels
, 0, sizeof(labels
));
1353 memset(relocs
, 0, sizeof(relocs
));
1355 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1356 build_pte_writable(&p
, &r
, K0
, K1
, label_nopage_tlbs
);
1357 uasm_i_nop(&p
); /* load delay */
1358 build_make_write(&p
, &r
, K0
, K1
);
1359 build_r3000_tlb_reload_write(&p
, &l
, &r
, K0
, K1
);
1361 uasm_l_nopage_tlbs(&l
, p
);
1362 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1365 if ((p
- handle_tlbs
) > FASTPATH_SIZE
)
1366 panic("TLB store handler fastpath space exceeded");
1368 uasm_resolve_relocs(relocs
, labels
);
1369 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1370 (unsigned int)(p
- handle_tlbs
));
1372 dump_handler(handle_tlbs
, ARRAY_SIZE(handle_tlbs
));
1375 static void __cpuinit
build_r3000_tlb_modify_handler(void)
1377 u32
*p
= handle_tlbm
;
1378 struct uasm_label
*l
= labels
;
1379 struct uasm_reloc
*r
= relocs
;
1381 memset(handle_tlbm
, 0, sizeof(handle_tlbm
));
1382 memset(labels
, 0, sizeof(labels
));
1383 memset(relocs
, 0, sizeof(relocs
));
1385 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1386 build_pte_modifiable(&p
, &r
, K0
, K1
, label_nopage_tlbm
);
1387 uasm_i_nop(&p
); /* load delay */
1388 build_make_write(&p
, &r
, K0
, K1
);
1389 build_r3000_pte_reload_tlbwi(&p
, K0
, K1
);
1391 uasm_l_nopage_tlbm(&l
, p
);
1392 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1395 if ((p
- handle_tlbm
) > FASTPATH_SIZE
)
1396 panic("TLB modify handler fastpath space exceeded");
1398 uasm_resolve_relocs(relocs
, labels
);
1399 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1400 (unsigned int)(p
- handle_tlbm
));
1402 dump_handler(handle_tlbm
, ARRAY_SIZE(handle_tlbm
));
1404 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
1407 * R4000 style TLB load/store/modify handlers.
1409 static void __cpuinit
1410 build_r4000_tlbchange_handler_head(u32
**p
, struct uasm_label
**l
,
1411 struct uasm_reloc
**r
, unsigned int pte
,
1415 build_get_pmde64(p
, l
, r
, pte
, ptr
); /* get pmd in ptr */
1417 build_get_pgde32(p
, pte
, ptr
); /* get pgd in ptr */
1420 #ifdef CONFIG_HUGETLB_PAGE
1422 * For huge tlb entries, pmd doesn't contain an address but
1423 * instead contains the tlb pte. Check the PAGE_HUGE bit and
1424 * see if we need to jump to huge tlb processing.
1426 build_is_huge_pte(p
, r
, pte
, ptr
, label_tlb_huge_update
);
1429 UASM_i_MFC0(p
, pte
, C0_BADVADDR
);
1430 UASM_i_LW(p
, ptr
, 0, ptr
);
1431 UASM_i_SRL(p
, pte
, pte
, PAGE_SHIFT
+ PTE_ORDER
- PTE_T_LOG2
);
1432 uasm_i_andi(p
, pte
, pte
, (PTRS_PER_PTE
- 1) << PTE_T_LOG2
);
1433 UASM_i_ADDU(p
, ptr
, ptr
, pte
);
1436 uasm_l_smp_pgtable_change(l
, *p
);
1438 iPTE_LW(p
, pte
, ptr
); /* get even pte */
1439 if (!m4kc_tlbp_war())
1440 build_tlb_probe_entry(p
);
1443 static void __cpuinit
1444 build_r4000_tlbchange_handler_tail(u32
**p
, struct uasm_label
**l
,
1445 struct uasm_reloc
**r
, unsigned int tmp
,
1448 uasm_i_ori(p
, ptr
, ptr
, sizeof(pte_t
));
1449 uasm_i_xori(p
, ptr
, ptr
, sizeof(pte_t
));
1450 build_update_entries(p
, tmp
, ptr
);
1451 build_tlb_write_entry(p
, l
, r
, tlb_indexed
);
1452 uasm_l_leave(l
, *p
);
1453 uasm_i_eret(p
); /* return from trap */
1456 build_get_pgd_vmalloc64(p
, l
, r
, tmp
, ptr
, not_refill
);
1460 static void __cpuinit
build_r4000_tlb_load_handler(void)
1462 u32
*p
= handle_tlbl
;
1463 struct uasm_label
*l
= labels
;
1464 struct uasm_reloc
*r
= relocs
;
1466 memset(handle_tlbl
, 0, sizeof(handle_tlbl
));
1467 memset(labels
, 0, sizeof(labels
));
1468 memset(relocs
, 0, sizeof(relocs
));
1470 if (bcm1250_m3_war()) {
1471 unsigned int segbits
= 44;
1473 uasm_i_dmfc0(&p
, K0
, C0_BADVADDR
);
1474 uasm_i_dmfc0(&p
, K1
, C0_ENTRYHI
);
1475 uasm_i_xor(&p
, K0
, K0
, K1
);
1476 uasm_i_dsrl_safe(&p
, K1
, K0
, 62);
1477 uasm_i_dsrl_safe(&p
, K0
, K0
, 12 + 1);
1478 uasm_i_dsll_safe(&p
, K0
, K0
, 64 + 12 + 1 - segbits
);
1479 uasm_i_or(&p
, K0
, K0
, K1
);
1480 uasm_il_bnez(&p
, &r
, K0
, label_leave
);
1481 /* No need for uasm_i_nop */
1484 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1485 build_pte_present(&p
, &r
, K0
, K1
, label_nopage_tlbl
);
1486 if (m4kc_tlbp_war())
1487 build_tlb_probe_entry(&p
);
1489 if (kernel_uses_smartmips_rixi
) {
1491 * If the page is not _PAGE_VALID, RI or XI could not
1492 * have triggered it. Skip the expensive test..
1494 uasm_i_andi(&p
, K0
, K0
, _PAGE_VALID
);
1495 uasm_il_beqz(&p
, &r
, K0
, label_tlbl_goaround1
);
1499 /* Examine entrylo 0 or 1 based on ptr. */
1500 uasm_i_andi(&p
, K0
, K1
, sizeof(pte_t
));
1501 uasm_i_beqz(&p
, K0
, 8);
1503 UASM_i_MFC0(&p
, K0
, C0_ENTRYLO0
); /* load it in the delay slot*/
1504 UASM_i_MFC0(&p
, K0
, C0_ENTRYLO1
); /* load it if ptr is odd */
1506 * If the entryLo (now in K0) is valid (bit 1), RI or
1507 * XI must have triggered it.
1509 uasm_i_andi(&p
, K0
, K0
, 2);
1510 uasm_il_bnez(&p
, &r
, K0
, label_nopage_tlbl
);
1512 uasm_l_tlbl_goaround1(&l
, p
);
1513 /* Reload the PTE value */
1514 iPTE_LW(&p
, K0
, K1
);
1516 build_make_valid(&p
, &r
, K0
, K1
);
1517 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1519 #ifdef CONFIG_HUGETLB_PAGE
1521 * This is the entry point when build_r4000_tlbchange_handler_head
1522 * spots a huge page.
1524 uasm_l_tlb_huge_update(&l
, p
);
1525 iPTE_LW(&p
, K0
, K1
);
1526 build_pte_present(&p
, &r
, K0
, K1
, label_nopage_tlbl
);
1527 build_tlb_probe_entry(&p
);
1529 if (kernel_uses_smartmips_rixi
) {
1531 * If the page is not _PAGE_VALID, RI or XI could not
1532 * have triggered it. Skip the expensive test..
1534 uasm_i_andi(&p
, K0
, K0
, _PAGE_VALID
);
1535 uasm_il_beqz(&p
, &r
, K0
, label_tlbl_goaround2
);
1539 /* Examine entrylo 0 or 1 based on ptr. */
1540 uasm_i_andi(&p
, K0
, K1
, sizeof(pte_t
));
1541 uasm_i_beqz(&p
, K0
, 8);
1543 UASM_i_MFC0(&p
, K0
, C0_ENTRYLO0
); /* load it in the delay slot*/
1544 UASM_i_MFC0(&p
, K0
, C0_ENTRYLO1
); /* load it if ptr is odd */
1546 * If the entryLo (now in K0) is valid (bit 1), RI or
1547 * XI must have triggered it.
1549 uasm_i_andi(&p
, K0
, K0
, 2);
1550 uasm_il_beqz(&p
, &r
, K0
, label_tlbl_goaround2
);
1551 /* Reload the PTE value */
1552 iPTE_LW(&p
, K0
, K1
);
1555 * We clobbered C0_PAGEMASK, restore it. On the other branch
1556 * it is restored in build_huge_tlb_write_entry.
1558 build_restore_pagemask(&p
, &r
, K0
, label_nopage_tlbl
);
1560 uasm_l_tlbl_goaround2(&l
, p
);
1562 uasm_i_ori(&p
, K0
, K0
, (_PAGE_ACCESSED
| _PAGE_VALID
));
1563 build_huge_handler_tail(&p
, &r
, &l
, K0
, K1
);
1566 uasm_l_nopage_tlbl(&l
, p
);
1567 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_0
& 0x0fffffff);
1570 if ((p
- handle_tlbl
) > FASTPATH_SIZE
)
1571 panic("TLB load handler fastpath space exceeded");
1573 uasm_resolve_relocs(relocs
, labels
);
1574 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1575 (unsigned int)(p
- handle_tlbl
));
1577 dump_handler(handle_tlbl
, ARRAY_SIZE(handle_tlbl
));
1580 static void __cpuinit
build_r4000_tlb_store_handler(void)
1582 u32
*p
= handle_tlbs
;
1583 struct uasm_label
*l
= labels
;
1584 struct uasm_reloc
*r
= relocs
;
1586 memset(handle_tlbs
, 0, sizeof(handle_tlbs
));
1587 memset(labels
, 0, sizeof(labels
));
1588 memset(relocs
, 0, sizeof(relocs
));
1590 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1591 build_pte_writable(&p
, &r
, K0
, K1
, label_nopage_tlbs
);
1592 if (m4kc_tlbp_war())
1593 build_tlb_probe_entry(&p
);
1594 build_make_write(&p
, &r
, K0
, K1
);
1595 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1597 #ifdef CONFIG_HUGETLB_PAGE
1599 * This is the entry point when
1600 * build_r4000_tlbchange_handler_head spots a huge page.
1602 uasm_l_tlb_huge_update(&l
, p
);
1603 iPTE_LW(&p
, K0
, K1
);
1604 build_pte_writable(&p
, &r
, K0
, K1
, label_nopage_tlbs
);
1605 build_tlb_probe_entry(&p
);
1606 uasm_i_ori(&p
, K0
, K0
,
1607 _PAGE_ACCESSED
| _PAGE_MODIFIED
| _PAGE_VALID
| _PAGE_DIRTY
);
1608 build_huge_handler_tail(&p
, &r
, &l
, K0
, K1
);
1611 uasm_l_nopage_tlbs(&l
, p
);
1612 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1615 if ((p
- handle_tlbs
) > FASTPATH_SIZE
)
1616 panic("TLB store handler fastpath space exceeded");
1618 uasm_resolve_relocs(relocs
, labels
);
1619 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1620 (unsigned int)(p
- handle_tlbs
));
1622 dump_handler(handle_tlbs
, ARRAY_SIZE(handle_tlbs
));
1625 static void __cpuinit
build_r4000_tlb_modify_handler(void)
1627 u32
*p
= handle_tlbm
;
1628 struct uasm_label
*l
= labels
;
1629 struct uasm_reloc
*r
= relocs
;
1631 memset(handle_tlbm
, 0, sizeof(handle_tlbm
));
1632 memset(labels
, 0, sizeof(labels
));
1633 memset(relocs
, 0, sizeof(relocs
));
1635 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1636 build_pte_modifiable(&p
, &r
, K0
, K1
, label_nopage_tlbm
);
1637 if (m4kc_tlbp_war())
1638 build_tlb_probe_entry(&p
);
1639 /* Present and writable bits set, set accessed and dirty bits. */
1640 build_make_write(&p
, &r
, K0
, K1
);
1641 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1643 #ifdef CONFIG_HUGETLB_PAGE
1645 * This is the entry point when
1646 * build_r4000_tlbchange_handler_head spots a huge page.
1648 uasm_l_tlb_huge_update(&l
, p
);
1649 iPTE_LW(&p
, K0
, K1
);
1650 build_pte_modifiable(&p
, &r
, K0
, K1
, label_nopage_tlbm
);
1651 build_tlb_probe_entry(&p
);
1652 uasm_i_ori(&p
, K0
, K0
,
1653 _PAGE_ACCESSED
| _PAGE_MODIFIED
| _PAGE_VALID
| _PAGE_DIRTY
);
1654 build_huge_handler_tail(&p
, &r
, &l
, K0
, K1
);
1657 uasm_l_nopage_tlbm(&l
, p
);
1658 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1661 if ((p
- handle_tlbm
) > FASTPATH_SIZE
)
1662 panic("TLB modify handler fastpath space exceeded");
1664 uasm_resolve_relocs(relocs
, labels
);
1665 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1666 (unsigned int)(p
- handle_tlbm
));
1668 dump_handler(handle_tlbm
, ARRAY_SIZE(handle_tlbm
));
1671 void __cpuinit
build_tlb_refill_handler(void)
1674 * The refill handler is generated per-CPU, multi-node systems
1675 * may have local storage for it. The other handlers are only
1678 static int run_once
= 0;
1681 check_for_high_segbits
= current_cpu_data
.vmbits
> (PGDIR_SHIFT
+ PGD_ORDER
+ PAGE_SHIFT
- 3);
1684 switch (current_cpu_type()) {
1692 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1693 build_r3000_tlb_refill_handler();
1695 build_r3000_tlb_load_handler();
1696 build_r3000_tlb_store_handler();
1697 build_r3000_tlb_modify_handler();
1701 panic("No R3000 TLB refill handler");
1707 panic("No R6000 TLB refill handler yet");
1711 panic("No R8000 TLB refill handler yet");
1716 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1717 build_r4000_setup_pgd();
1719 build_r4000_tlb_load_handler();
1720 build_r4000_tlb_store_handler();
1721 build_r4000_tlb_modify_handler();
1724 build_r4000_tlb_refill_handler();
1728 void __cpuinit
flush_tlb_handlers(void)
1730 local_flush_icache_range((unsigned long)handle_tlbl
,
1731 (unsigned long)handle_tlbl
+ sizeof(handle_tlbl
));
1732 local_flush_icache_range((unsigned long)handle_tlbs
,
1733 (unsigned long)handle_tlbs
+ sizeof(handle_tlbs
));
1734 local_flush_icache_range((unsigned long)handle_tlbm
,
1735 (unsigned long)handle_tlbm
+ sizeof(handle_tlbm
));
1736 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1737 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd
,
1738 (unsigned long)tlbmiss_handler_setup_pgd
+ sizeof(handle_tlbm
));