Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Synthesize TLB refill handlers at runtime. | |
7 | * | |
e30ec452 | 8 | * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer |
95affdda | 9 | * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki |
41c594ab | 10 | * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) |
fd062c84 | 11 | * Copyright (C) 2008, 2009 Cavium Networks, Inc. |
113c62d9 | 12 | * Copyright (C) 2011 MIPS Technologies, Inc. |
41c594ab RB |
13 | * |
14 | * ... and the days got worse and worse and now you see | |
15 | * I've gone completly out of my mind. | |
16 | * | |
17 | * They're coming to take me a away haha | |
18 | * they're coming to take me a away hoho hihi haha | |
19 | * to the funny farm where code is beautiful all the time ... | |
20 | * | |
21 | * (Condolences to Napoleon XIV) | |
1da177e4 LT |
22 | */ |
23 | ||
95affdda | 24 | #include <linux/bug.h> |
1da177e4 LT |
25 | #include <linux/kernel.h> |
26 | #include <linux/types.h> | |
631330f5 | 27 | #include <linux/smp.h> |
1da177e4 LT |
28 | #include <linux/string.h> |
29 | #include <linux/init.h> | |
3d8bfdd0 | 30 | #include <linux/cache.h> |
1da177e4 | 31 | |
3d8bfdd0 DD |
32 | #include <asm/cacheflush.h> |
33 | #include <asm/pgtable.h> | |
1da177e4 | 34 | #include <asm/war.h> |
3482d713 | 35 | #include <asm/uasm.h> |
b81947c6 | 36 | #include <asm/setup.h> |
e30ec452 | 37 | |
1ec56329 DD |
38 | /* |
39 | * TLB load/store/modify handlers. | |
40 | * | |
41 | * Only the fastpath gets synthesized at runtime, the slowpath for | |
42 | * do_page_fault remains normal asm. | |
43 | */ | |
44 | extern void tlb_do_page_fault_0(void); | |
45 | extern void tlb_do_page_fault_1(void); | |
46 | ||
bf28607f DD |
47 | struct work_registers { |
48 | int r1; | |
49 | int r2; | |
50 | int r3; | |
51 | }; | |
52 | ||
53 | struct tlb_reg_save { | |
54 | unsigned long a; | |
55 | unsigned long b; | |
56 | } ____cacheline_aligned_in_smp; | |
57 | ||
58 | static struct tlb_reg_save handler_reg_save[NR_CPUS]; | |
1ec56329 | 59 | |
aeffdbba | 60 | static inline int r45k_bvahwbug(void) |
1da177e4 LT |
61 | { |
62 | /* XXX: We should probe for the presence of this bug, but we don't. */ | |
63 | return 0; | |
64 | } | |
65 | ||
aeffdbba | 66 | static inline int r4k_250MHZhwbug(void) |
1da177e4 LT |
67 | { |
68 | /* XXX: We should probe for the presence of this bug, but we don't. */ | |
69 | return 0; | |
70 | } | |
71 | ||
aeffdbba | 72 | static inline int __maybe_unused bcm1250_m3_war(void) |
1da177e4 LT |
73 | { |
74 | return BCM1250_M3_WAR; | |
75 | } | |
76 | ||
aeffdbba | 77 | static inline int __maybe_unused r10000_llsc_war(void) |
1da177e4 LT |
78 | { |
79 | return R10000_LLSC_WAR; | |
80 | } | |
81 | ||
cc33ae43 DD |
82 | static int use_bbit_insns(void) |
83 | { | |
84 | switch (current_cpu_type()) { | |
85 | case CPU_CAVIUM_OCTEON: | |
86 | case CPU_CAVIUM_OCTEON_PLUS: | |
87 | case CPU_CAVIUM_OCTEON2: | |
88 | return 1; | |
89 | default: | |
90 | return 0; | |
91 | } | |
92 | } | |
93 | ||
2c8c53e2 DD |
94 | static int use_lwx_insns(void) |
95 | { | |
96 | switch (current_cpu_type()) { | |
97 | case CPU_CAVIUM_OCTEON2: | |
98 | return 1; | |
99 | default: | |
100 | return 0; | |
101 | } | |
102 | } | |
103 | #if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \ | |
104 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 | |
105 | static bool scratchpad_available(void) | |
106 | { | |
107 | return true; | |
108 | } | |
109 | static int scratchpad_offset(int i) | |
110 | { | |
111 | /* | |
112 | * CVMSEG starts at address -32768 and extends for | |
113 | * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines. | |
114 | */ | |
115 | i += 1; /* Kernel use starts at the top and works down. */ | |
116 | return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768; | |
117 | } | |
118 | #else | |
119 | static bool scratchpad_available(void) | |
120 | { | |
121 | return false; | |
122 | } | |
123 | static int scratchpad_offset(int i) | |
124 | { | |
125 | BUG(); | |
e1c87d2a DD |
126 | /* Really unreachable, but evidently some GCC want this. */ |
127 | return 0; | |
2c8c53e2 DD |
128 | } |
129 | #endif | |
8df5beac MR |
130 | /* |
131 | * Found by experiment: At least some revisions of the 4kc throw under | |
132 | * some circumstances a machine check exception, triggered by invalid | |
133 | * values in the index register. Delaying the tlbp instruction until | |
134 | * after the next branch, plus adding an additional nop in front of | |
135 | * tlbwi/tlbwr avoids the invalid index register values. Nobody knows | |
136 | * why; it's not an issue caused by the core RTL. | |
137 | * | |
138 | */ | |
234fcd14 | 139 | static int __cpuinit m4kc_tlbp_war(void) |
8df5beac MR |
140 | { |
141 | return (current_cpu_data.processor_id & 0xffff00) == | |
142 | (PRID_COMP_MIPS | PRID_IMP_4KC); | |
143 | } | |
144 | ||
e30ec452 | 145 | /* Handle labels (which must be positive integers). */ |
1da177e4 | 146 | enum label_id { |
e30ec452 | 147 | label_second_part = 1, |
1da177e4 LT |
148 | label_leave, |
149 | label_vmalloc, | |
150 | label_vmalloc_done, | |
02a54177 RB |
151 | label_tlbw_hazard_0, |
152 | label_split = label_tlbw_hazard_0 + 8, | |
6dd9344c DD |
153 | label_tlbl_goaround1, |
154 | label_tlbl_goaround2, | |
1da177e4 LT |
155 | label_nopage_tlbl, |
156 | label_nopage_tlbs, | |
157 | label_nopage_tlbm, | |
158 | label_smp_pgtable_change, | |
159 | label_r3000_write_probe_fail, | |
1ec56329 | 160 | label_large_segbits_fault, |
aa1762f4 | 161 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
fd062c84 DD |
162 | label_tlb_huge_update, |
163 | #endif | |
1da177e4 LT |
164 | }; |
165 | ||
e30ec452 TS |
166 | UASM_L_LA(_second_part) |
167 | UASM_L_LA(_leave) | |
e30ec452 TS |
168 | UASM_L_LA(_vmalloc) |
169 | UASM_L_LA(_vmalloc_done) | |
02a54177 | 170 | /* _tlbw_hazard_x is handled differently. */ |
e30ec452 | 171 | UASM_L_LA(_split) |
6dd9344c DD |
172 | UASM_L_LA(_tlbl_goaround1) |
173 | UASM_L_LA(_tlbl_goaround2) | |
e30ec452 TS |
174 | UASM_L_LA(_nopage_tlbl) |
175 | UASM_L_LA(_nopage_tlbs) | |
176 | UASM_L_LA(_nopage_tlbm) | |
177 | UASM_L_LA(_smp_pgtable_change) | |
178 | UASM_L_LA(_r3000_write_probe_fail) | |
1ec56329 | 179 | UASM_L_LA(_large_segbits_fault) |
aa1762f4 | 180 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
fd062c84 DD |
181 | UASM_L_LA(_tlb_huge_update) |
182 | #endif | |
656be92f | 183 | |
02a54177 RB |
184 | static int __cpuinitdata hazard_instance; |
185 | ||
186 | static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance) | |
187 | { | |
188 | switch (instance) { | |
189 | case 0 ... 7: | |
190 | uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance); | |
191 | return; | |
192 | default: | |
193 | BUG(); | |
194 | } | |
195 | } | |
196 | ||
197 | static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance) | |
198 | { | |
199 | switch (instance) { | |
200 | case 0 ... 7: | |
201 | uasm_build_label(l, *p, label_tlbw_hazard_0 + instance); | |
202 | break; | |
203 | default: | |
204 | BUG(); | |
205 | } | |
206 | } | |
207 | ||
92b1e6a6 | 208 | /* |
a2c763e0 RB |
209 | * pgtable bits are assigned dynamically depending on processor feature |
210 | * and statically based on kernel configuration. This spits out the actual | |
211 | * values the kernel is using. Required to make sense from disassembled | |
212 | * TLB exception handlers. | |
92b1e6a6 | 213 | */ |
a2c763e0 RB |
214 | static void output_pgtable_bits_defines(void) |
215 | { | |
216 | #define pr_define(fmt, ...) \ | |
217 | pr_debug("#define " fmt, ##__VA_ARGS__) | |
218 | ||
219 | pr_debug("#include <asm/asm.h>\n"); | |
220 | pr_debug("#include <asm/regdef.h>\n"); | |
221 | pr_debug("\n"); | |
222 | ||
223 | pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT); | |
224 | pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT); | |
225 | pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT); | |
226 | pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT); | |
227 | pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT); | |
228 | #ifdef _PAGE_HUGE_SHIFT | |
229 | pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); | |
230 | #endif | |
231 | if (cpu_has_rixi) { | |
232 | #ifdef _PAGE_NO_EXEC_SHIFT | |
233 | pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); | |
234 | #endif | |
235 | #ifdef _PAGE_NO_READ_SHIFT | |
236 | pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT); | |
237 | #endif | |
238 | } | |
239 | pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT); | |
240 | pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT); | |
241 | pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT); | |
242 | pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT); | |
243 | pr_debug("\n"); | |
244 | } | |
245 | ||
246 | static inline void dump_handler(const char *symbol, const u32 *handler, int count) | |
92b1e6a6 FBH |
247 | { |
248 | int i; | |
249 | ||
a2c763e0 RB |
250 | pr_debug("LEAF(%s)\n", symbol); |
251 | ||
92b1e6a6 FBH |
252 | pr_debug("\t.set push\n"); |
253 | pr_debug("\t.set noreorder\n"); | |
254 | ||
255 | for (i = 0; i < count; i++) | |
a2c763e0 | 256 | pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]); |
92b1e6a6 | 257 | |
a2c763e0 RB |
258 | pr_debug("\t.set\tpop\n"); |
259 | ||
260 | pr_debug("\tEND(%s)\n", symbol); | |
92b1e6a6 FBH |
261 | } |
262 | ||
1da177e4 LT |
263 | /* The only general purpose registers allowed in TLB handlers. */ |
264 | #define K0 26 | |
265 | #define K1 27 | |
266 | ||
267 | /* Some CP0 registers */ | |
41c594ab RB |
268 | #define C0_INDEX 0, 0 |
269 | #define C0_ENTRYLO0 2, 0 | |
270 | #define C0_TCBIND 2, 2 | |
271 | #define C0_ENTRYLO1 3, 0 | |
272 | #define C0_CONTEXT 4, 0 | |
fd062c84 | 273 | #define C0_PAGEMASK 5, 0 |
41c594ab RB |
274 | #define C0_BADVADDR 8, 0 |
275 | #define C0_ENTRYHI 10, 0 | |
276 | #define C0_EPC 14, 0 | |
277 | #define C0_XCONTEXT 20, 0 | |
1da177e4 | 278 | |
875d43e7 | 279 | #ifdef CONFIG_64BIT |
e30ec452 | 280 | # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) |
1da177e4 | 281 | #else |
e30ec452 | 282 | # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT) |
1da177e4 LT |
283 | #endif |
284 | ||
285 | /* The worst case length of the handler is around 18 instructions for | |
286 | * R3000-style TLBs and up to 63 instructions for R4000-style TLBs. | |
287 | * Maximum space available is 32 instructions for R3000 and 64 | |
288 | * instructions for R4000. | |
289 | * | |
290 | * We deliberately chose a buffer size of 128, so we won't scribble | |
291 | * over anything important on overflow before we panic. | |
292 | */ | |
234fcd14 | 293 | static u32 tlb_handler[128] __cpuinitdata; |
1da177e4 LT |
294 | |
295 | /* simply assume worst case size for labels and relocs */ | |
234fcd14 RB |
296 | static struct uasm_label labels[128] __cpuinitdata; |
297 | static struct uasm_reloc relocs[128] __cpuinitdata; | |
1da177e4 | 298 | |
1ec56329 DD |
299 | #ifdef CONFIG_64BIT |
300 | static int check_for_high_segbits __cpuinitdata; | |
301 | #endif | |
302 | ||
2c8c53e2 | 303 | static int check_for_high_segbits __cpuinitdata; |
3d8bfdd0 DD |
304 | |
305 | static unsigned int kscratch_used_mask __cpuinitdata; | |
306 | ||
307 | static int __cpuinit allocate_kscratch(void) | |
308 | { | |
309 | int r; | |
310 | unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask; | |
311 | ||
312 | r = ffs(a); | |
313 | ||
314 | if (r == 0) | |
315 | return -1; | |
316 | ||
317 | r--; /* make it zero based */ | |
318 | ||
319 | kscratch_used_mask |= (1 << r); | |
320 | ||
321 | return r; | |
322 | } | |
323 | ||
2c8c53e2 | 324 | static int scratch_reg __cpuinitdata; |
3d8bfdd0 | 325 | static int pgd_reg __cpuinitdata; |
2c8c53e2 DD |
326 | enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; |
327 | ||
bf28607f DD |
328 | static struct work_registers __cpuinit build_get_work_registers(u32 **p) |
329 | { | |
330 | struct work_registers r; | |
331 | ||
332 | int smp_processor_id_reg; | |
333 | int smp_processor_id_sel; | |
334 | int smp_processor_id_shift; | |
335 | ||
336 | if (scratch_reg > 0) { | |
337 | /* Save in CPU local C0_KScratch? */ | |
338 | UASM_i_MTC0(p, 1, 31, scratch_reg); | |
339 | r.r1 = K0; | |
340 | r.r2 = K1; | |
341 | r.r3 = 1; | |
342 | return r; | |
343 | } | |
344 | ||
345 | if (num_possible_cpus() > 1) { | |
346 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT | |
347 | smp_processor_id_shift = 51; | |
348 | smp_processor_id_reg = 20; /* XContext */ | |
349 | smp_processor_id_sel = 0; | |
350 | #else | |
351 | # ifdef CONFIG_32BIT | |
352 | smp_processor_id_shift = 25; | |
353 | smp_processor_id_reg = 4; /* Context */ | |
354 | smp_processor_id_sel = 0; | |
355 | # endif | |
356 | # ifdef CONFIG_64BIT | |
357 | smp_processor_id_shift = 26; | |
358 | smp_processor_id_reg = 4; /* Context */ | |
359 | smp_processor_id_sel = 0; | |
360 | # endif | |
361 | #endif | |
362 | /* Get smp_processor_id */ | |
363 | UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel); | |
364 | UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift); | |
365 | ||
366 | /* handler_reg_save index in K0 */ | |
367 | UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save))); | |
368 | ||
369 | UASM_i_LA(p, K1, (long)&handler_reg_save); | |
370 | UASM_i_ADDU(p, K0, K0, K1); | |
371 | } else { | |
372 | UASM_i_LA(p, K0, (long)&handler_reg_save); | |
373 | } | |
374 | /* K0 now points to save area, save $1 and $2 */ | |
375 | UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0); | |
376 | UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0); | |
377 | ||
378 | r.r1 = K1; | |
379 | r.r2 = 1; | |
380 | r.r3 = 2; | |
381 | return r; | |
382 | } | |
383 | ||
384 | static void __cpuinit build_restore_work_registers(u32 **p) | |
385 | { | |
386 | if (scratch_reg > 0) { | |
387 | UASM_i_MFC0(p, 1, 31, scratch_reg); | |
388 | return; | |
389 | } | |
390 | /* K0 already points to save area, restore $1 and $2 */ | |
391 | UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0); | |
392 | UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0); | |
393 | } | |
394 | ||
2c8c53e2 | 395 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
3d8bfdd0 | 396 | |
82622284 DD |
397 | /* |
398 | * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, | |
399 | * we cannot do r3000 under these circumstances. | |
3d8bfdd0 DD |
400 | * |
401 | * Declare pgd_current here instead of including mmu_context.h to avoid type | |
402 | * conflicts for tlbmiss_handler_setup_pgd | |
82622284 | 403 | */ |
3d8bfdd0 | 404 | extern unsigned long pgd_current[]; |
82622284 | 405 | |
1da177e4 LT |
406 | /* |
407 | * The R3000 TLB handler is simple. | |
408 | */ | |
234fcd14 | 409 | static void __cpuinit build_r3000_tlb_refill_handler(void) |
1da177e4 LT |
410 | { |
411 | long pgdc = (long)pgd_current; | |
412 | u32 *p; | |
413 | ||
414 | memset(tlb_handler, 0, sizeof(tlb_handler)); | |
415 | p = tlb_handler; | |
416 | ||
e30ec452 TS |
417 | uasm_i_mfc0(&p, K0, C0_BADVADDR); |
418 | uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ | |
419 | uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); | |
420 | uasm_i_srl(&p, K0, K0, 22); /* load delay */ | |
421 | uasm_i_sll(&p, K0, K0, 2); | |
422 | uasm_i_addu(&p, K1, K1, K0); | |
423 | uasm_i_mfc0(&p, K0, C0_CONTEXT); | |
424 | uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ | |
425 | uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ | |
426 | uasm_i_addu(&p, K1, K1, K0); | |
427 | uasm_i_lw(&p, K0, 0, K1); | |
428 | uasm_i_nop(&p); /* load delay */ | |
429 | uasm_i_mtc0(&p, K0, C0_ENTRYLO0); | |
430 | uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ | |
431 | uasm_i_tlbwr(&p); /* cp0 delay */ | |
432 | uasm_i_jr(&p, K1); | |
433 | uasm_i_rfe(&p); /* branch delay */ | |
1da177e4 LT |
434 | |
435 | if (p > tlb_handler + 32) | |
436 | panic("TLB refill handler space exceeded"); | |
437 | ||
e30ec452 TS |
438 | pr_debug("Wrote TLB refill handler (%u instructions).\n", |
439 | (unsigned int)(p - tlb_handler)); | |
1da177e4 | 440 | |
91b05e67 | 441 | memcpy((void *)ebase, tlb_handler, 0x80); |
92b1e6a6 | 442 | |
a2c763e0 | 443 | dump_handler("r3000_tlb_refill", (u32 *)ebase, 32); |
1da177e4 | 444 | } |
82622284 | 445 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ |
1da177e4 LT |
446 | |
447 | /* | |
448 | * The R4000 TLB handler is much more complicated. We have two | |
449 | * consecutive handler areas with 32 instructions space each. | |
450 | * Since they aren't used at the same time, we can overflow in the | |
451 | * other one.To keep things simple, we first assume linear space, | |
452 | * then we relocate it to the final handler layout as needed. | |
453 | */ | |
234fcd14 | 454 | static u32 final_handler[64] __cpuinitdata; |
1da177e4 LT |
455 | |
456 | /* | |
457 | * Hazards | |
458 | * | |
459 | * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: | |
460 | * 2. A timing hazard exists for the TLBP instruction. | |
461 | * | |
462 | * stalling_instruction | |
463 | * TLBP | |
464 | * | |
465 | * The JTLB is being read for the TLBP throughout the stall generated by the | |
466 | * previous instruction. This is not really correct as the stalling instruction | |
467 | * can modify the address used to access the JTLB. The failure symptom is that | |
468 | * the TLBP instruction will use an address created for the stalling instruction | |
469 | * and not the address held in C0_ENHI and thus report the wrong results. | |
470 | * | |
471 | * The software work-around is to not allow the instruction preceding the TLBP | |
472 | * to stall - make it an NOP or some other instruction guaranteed not to stall. | |
473 | * | |
474 | * Errata 2 will not be fixed. This errata is also on the R5000. | |
475 | * | |
476 | * As if we MIPS hackers wouldn't know how to nop pipelines happy ... | |
477 | */ | |
234fcd14 | 478 | static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) |
1da177e4 | 479 | { |
10cc3529 | 480 | switch (current_cpu_type()) { |
326e2e1a | 481 | /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ |
f5b4d956 | 482 | case CPU_R4600: |
326e2e1a | 483 | case CPU_R4700: |
1da177e4 | 484 | case CPU_R5000: |
1da177e4 | 485 | case CPU_NEVADA: |
e30ec452 TS |
486 | uasm_i_nop(p); |
487 | uasm_i_tlbp(p); | |
1da177e4 LT |
488 | break; |
489 | ||
490 | default: | |
e30ec452 | 491 | uasm_i_tlbp(p); |
1da177e4 LT |
492 | break; |
493 | } | |
494 | } | |
495 | ||
496 | /* | |
497 | * Write random or indexed TLB entry, and care about the hazards from | |
25985edc | 498 | * the preceding mtc0 and for the following eret. |
1da177e4 LT |
499 | */ |
500 | enum tlb_write_entry { tlb_random, tlb_indexed }; | |
501 | ||
234fcd14 | 502 | static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, |
e30ec452 | 503 | struct uasm_reloc **r, |
1da177e4 LT |
504 | enum tlb_write_entry wmode) |
505 | { | |
506 | void(*tlbw)(u32 **) = NULL; | |
507 | ||
508 | switch (wmode) { | |
e30ec452 TS |
509 | case tlb_random: tlbw = uasm_i_tlbwr; break; |
510 | case tlb_indexed: tlbw = uasm_i_tlbwi; break; | |
1da177e4 LT |
511 | } |
512 | ||
161548bf | 513 | if (cpu_has_mips_r2) { |
625c0a21 SH |
514 | /* |
515 | * The architecture spec says an ehb is required here, | |
516 | * but a number of cores do not have the hazard and | |
517 | * using an ehb causes an expensive pipeline stall. | |
518 | */ | |
519 | switch (current_cpu_type()) { | |
520 | case CPU_M14KC: | |
521 | case CPU_74K: | |
522 | break; | |
523 | ||
524 | default: | |
41f0e4d0 | 525 | uasm_i_ehb(p); |
625c0a21 SH |
526 | break; |
527 | } | |
161548bf RB |
528 | tlbw(p); |
529 | return; | |
530 | } | |
531 | ||
10cc3529 | 532 | switch (current_cpu_type()) { |
1da177e4 LT |
533 | case CPU_R4000PC: |
534 | case CPU_R4000SC: | |
535 | case CPU_R4000MC: | |
536 | case CPU_R4400PC: | |
537 | case CPU_R4400SC: | |
538 | case CPU_R4400MC: | |
539 | /* | |
540 | * This branch uses up a mtc0 hazard nop slot and saves | |
541 | * two nops after the tlbw instruction. | |
542 | */ | |
02a54177 | 543 | uasm_bgezl_hazard(p, r, hazard_instance); |
1da177e4 | 544 | tlbw(p); |
02a54177 RB |
545 | uasm_bgezl_label(l, p, hazard_instance); |
546 | hazard_instance++; | |
e30ec452 | 547 | uasm_i_nop(p); |
1da177e4 LT |
548 | break; |
549 | ||
550 | case CPU_R4600: | |
551 | case CPU_R4700: | |
e30ec452 | 552 | uasm_i_nop(p); |
2c93e12c | 553 | tlbw(p); |
e30ec452 | 554 | uasm_i_nop(p); |
2c93e12c MR |
555 | break; |
556 | ||
359187d6 | 557 | case CPU_R5000: |
359187d6 RB |
558 | case CPU_NEVADA: |
559 | uasm_i_nop(p); /* QED specifies 2 nops hazard */ | |
560 | uasm_i_nop(p); /* QED specifies 2 nops hazard */ | |
561 | tlbw(p); | |
562 | break; | |
563 | ||
2c93e12c | 564 | case CPU_R4300: |
1da177e4 LT |
565 | case CPU_5KC: |
566 | case CPU_TX49XX: | |
bdf21b18 | 567 | case CPU_PR4450: |
efa0f81c | 568 | case CPU_XLR: |
e30ec452 | 569 | uasm_i_nop(p); |
1da177e4 LT |
570 | tlbw(p); |
571 | break; | |
572 | ||
573 | case CPU_R10000: | |
574 | case CPU_R12000: | |
44d921b2 | 575 | case CPU_R14000: |
1da177e4 | 576 | case CPU_4KC: |
b1ec4c8e | 577 | case CPU_4KEC: |
113c62d9 | 578 | case CPU_M14KC: |
1da177e4 | 579 | case CPU_SB1: |
93ce2f52 | 580 | case CPU_SB1A: |
1da177e4 LT |
581 | case CPU_4KSC: |
582 | case CPU_20KC: | |
583 | case CPU_25KF: | |
602977b0 KC |
584 | case CPU_BMIPS32: |
585 | case CPU_BMIPS3300: | |
586 | case CPU_BMIPS4350: | |
587 | case CPU_BMIPS4380: | |
588 | case CPU_BMIPS5000: | |
2a21c730 | 589 | case CPU_LOONGSON2: |
a644b277 | 590 | case CPU_R5500: |
8df5beac | 591 | if (m4kc_tlbp_war()) |
e30ec452 | 592 | uasm_i_nop(p); |
2f794d09 | 593 | case CPU_ALCHEMY: |
1da177e4 LT |
594 | tlbw(p); |
595 | break; | |
596 | ||
1da177e4 | 597 | case CPU_RM7000: |
e30ec452 TS |
598 | uasm_i_nop(p); |
599 | uasm_i_nop(p); | |
600 | uasm_i_nop(p); | |
601 | uasm_i_nop(p); | |
1da177e4 LT |
602 | tlbw(p); |
603 | break; | |
604 | ||
1da177e4 LT |
605 | case CPU_RM9000: |
606 | /* | |
607 | * When the JTLB is updated by tlbwi or tlbwr, a subsequent | |
608 | * use of the JTLB for instructions should not occur for 4 | |
609 | * cpu cycles and use for data translations should not occur | |
610 | * for 3 cpu cycles. | |
611 | */ | |
e30ec452 TS |
612 | uasm_i_ssnop(p); |
613 | uasm_i_ssnop(p); | |
614 | uasm_i_ssnop(p); | |
615 | uasm_i_ssnop(p); | |
1da177e4 | 616 | tlbw(p); |
e30ec452 TS |
617 | uasm_i_ssnop(p); |
618 | uasm_i_ssnop(p); | |
619 | uasm_i_ssnop(p); | |
620 | uasm_i_ssnop(p); | |
1da177e4 LT |
621 | break; |
622 | ||
623 | case CPU_VR4111: | |
624 | case CPU_VR4121: | |
625 | case CPU_VR4122: | |
626 | case CPU_VR4181: | |
627 | case CPU_VR4181A: | |
e30ec452 TS |
628 | uasm_i_nop(p); |
629 | uasm_i_nop(p); | |
1da177e4 | 630 | tlbw(p); |
e30ec452 TS |
631 | uasm_i_nop(p); |
632 | uasm_i_nop(p); | |
1da177e4 LT |
633 | break; |
634 | ||
635 | case CPU_VR4131: | |
636 | case CPU_VR4133: | |
7623debf | 637 | case CPU_R5432: |
e30ec452 TS |
638 | uasm_i_nop(p); |
639 | uasm_i_nop(p); | |
1da177e4 LT |
640 | tlbw(p); |
641 | break; | |
642 | ||
83ccf69d LPC |
643 | case CPU_JZRISC: |
644 | tlbw(p); | |
645 | uasm_i_nop(p); | |
646 | break; | |
647 | ||
1da177e4 LT |
648 | default: |
649 | panic("No TLB refill handler yet (CPU type: %d)", | |
650 | current_cpu_data.cputype); | |
651 | break; | |
652 | } | |
653 | } | |
654 | ||
6dd9344c DD |
655 | static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, |
656 | unsigned int reg) | |
fd062c84 | 657 | { |
05857c64 | 658 | if (cpu_has_rixi) { |
748e787e | 659 | UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); |
6dd9344c DD |
660 | } else { |
661 | #ifdef CONFIG_64BIT_PHYS_ADDR | |
3be6022c | 662 | uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); |
6dd9344c DD |
663 | #else |
664 | UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); | |
665 | #endif | |
666 | } | |
667 | } | |
fd062c84 | 668 | |
aa1762f4 | 669 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
fd062c84 | 670 | |
6dd9344c DD |
671 | static __cpuinit void build_restore_pagemask(u32 **p, |
672 | struct uasm_reloc **r, | |
673 | unsigned int tmp, | |
2c8c53e2 DD |
674 | enum label_id lid, |
675 | int restore_scratch) | |
6dd9344c | 676 | { |
2c8c53e2 DD |
677 | if (restore_scratch) { |
678 | /* Reset default page size */ | |
679 | if (PM_DEFAULT_MASK >> 16) { | |
680 | uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); | |
681 | uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); | |
682 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | |
683 | uasm_il_b(p, r, lid); | |
684 | } else if (PM_DEFAULT_MASK) { | |
685 | uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); | |
686 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | |
687 | uasm_il_b(p, r, lid); | |
688 | } else { | |
689 | uasm_i_mtc0(p, 0, C0_PAGEMASK); | |
690 | uasm_il_b(p, r, lid); | |
691 | } | |
692 | if (scratch_reg > 0) | |
693 | UASM_i_MFC0(p, 1, 31, scratch_reg); | |
694 | else | |
695 | UASM_i_LW(p, 1, scratchpad_offset(0), 0); | |
fd062c84 | 696 | } else { |
2c8c53e2 DD |
697 | /* Reset default page size */ |
698 | if (PM_DEFAULT_MASK >> 16) { | |
699 | uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); | |
700 | uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); | |
701 | uasm_il_b(p, r, lid); | |
702 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | |
703 | } else if (PM_DEFAULT_MASK) { | |
704 | uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); | |
705 | uasm_il_b(p, r, lid); | |
706 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | |
707 | } else { | |
708 | uasm_il_b(p, r, lid); | |
709 | uasm_i_mtc0(p, 0, C0_PAGEMASK); | |
710 | } | |
fd062c84 DD |
711 | } |
712 | } | |
713 | ||
6dd9344c DD |
714 | static __cpuinit void build_huge_tlb_write_entry(u32 **p, |
715 | struct uasm_label **l, | |
716 | struct uasm_reloc **r, | |
717 | unsigned int tmp, | |
2c8c53e2 DD |
718 | enum tlb_write_entry wmode, |
719 | int restore_scratch) | |
6dd9344c DD |
720 | { |
721 | /* Set huge page tlb entry size */ | |
722 | uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); | |
723 | uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); | |
724 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | |
725 | ||
726 | build_tlb_write_entry(p, l, r, wmode); | |
727 | ||
2c8c53e2 | 728 | build_restore_pagemask(p, r, tmp, label_leave, restore_scratch); |
6dd9344c DD |
729 | } |
730 | ||
fd062c84 DD |
731 | /* |
732 | * Check if Huge PTE is present, if so then jump to LABEL. | |
733 | */ | |
734 | static void __cpuinit | |
735 | build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, | |
736 | unsigned int pmd, int lid) | |
737 | { | |
738 | UASM_i_LW(p, tmp, 0, pmd); | |
cc33ae43 DD |
739 | if (use_bbit_insns()) { |
740 | uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid); | |
741 | } else { | |
742 | uasm_i_andi(p, tmp, tmp, _PAGE_HUGE); | |
743 | uasm_il_bnez(p, r, tmp, lid); | |
744 | } | |
fd062c84 DD |
745 | } |
746 | ||
747 | static __cpuinit void build_huge_update_entries(u32 **p, | |
748 | unsigned int pte, | |
749 | unsigned int tmp) | |
750 | { | |
751 | int small_sequence; | |
752 | ||
753 | /* | |
754 | * A huge PTE describes an area the size of the | |
755 | * configured huge page size. This is twice the | |
756 | * of the large TLB entry size we intend to use. | |
757 | * A TLB entry half the size of the configured | |
758 | * huge page size is configured into entrylo0 | |
759 | * and entrylo1 to cover the contiguous huge PTE | |
760 | * address space. | |
761 | */ | |
762 | small_sequence = (HPAGE_SIZE >> 7) < 0x10000; | |
763 | ||
764 | /* We can clobber tmp. It isn't used after this.*/ | |
765 | if (!small_sequence) | |
766 | uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); | |
767 | ||
6dd9344c | 768 | build_convert_pte_to_entrylo(p, pte); |
9b8c3891 | 769 | UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ |
fd062c84 DD |
770 | /* convert to entrylo1 */ |
771 | if (small_sequence) | |
772 | UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); | |
773 | else | |
774 | UASM_i_ADDU(p, pte, pte, tmp); | |
775 | ||
9b8c3891 | 776 | UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ |
fd062c84 DD |
777 | } |
778 | ||
779 | static __cpuinit void build_huge_handler_tail(u32 **p, | |
780 | struct uasm_reloc **r, | |
781 | struct uasm_label **l, | |
782 | unsigned int pte, | |
783 | unsigned int ptr) | |
784 | { | |
785 | #ifdef CONFIG_SMP | |
786 | UASM_i_SC(p, pte, 0, ptr); | |
787 | uasm_il_beqz(p, r, pte, label_tlb_huge_update); | |
788 | UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */ | |
789 | #else | |
790 | UASM_i_SW(p, pte, 0, ptr); | |
791 | #endif | |
792 | build_huge_update_entries(p, pte, ptr); | |
2c8c53e2 | 793 | build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); |
fd062c84 | 794 | } |
aa1762f4 | 795 | #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ |
fd062c84 | 796 | |
875d43e7 | 797 | #ifdef CONFIG_64BIT |
1da177e4 LT |
798 | /* |
799 | * TMP and PTR are scratch. | |
800 | * TMP will be clobbered, PTR will hold the pmd entry. | |
801 | */ | |
234fcd14 | 802 | static void __cpuinit |
e30ec452 | 803 | build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, |
1da177e4 LT |
804 | unsigned int tmp, unsigned int ptr) |
805 | { | |
82622284 | 806 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
1da177e4 | 807 | long pgdc = (long)pgd_current; |
82622284 | 808 | #endif |
1da177e4 LT |
809 | /* |
810 | * The vmalloc handling is not in the hotpath. | |
811 | */ | |
e30ec452 | 812 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); |
1ec56329 DD |
813 | |
814 | if (check_for_high_segbits) { | |
815 | /* | |
816 | * The kernel currently implicitely assumes that the | |
817 | * MIPS SEGBITS parameter for the processor is | |
818 | * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never | |
819 | * allocate virtual addresses outside the maximum | |
820 | * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But | |
821 | * that doesn't prevent user code from accessing the | |
822 | * higher xuseg addresses. Here, we make sure that | |
823 | * everything but the lower xuseg addresses goes down | |
824 | * the module_alloc/vmalloc path. | |
825 | */ | |
826 | uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); | |
827 | uasm_il_bnez(p, r, ptr, label_vmalloc); | |
828 | } else { | |
829 | uasm_il_bltz(p, r, tmp, label_vmalloc); | |
830 | } | |
e30ec452 | 831 | /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ |
1da177e4 | 832 | |
82622284 | 833 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT |
3d8bfdd0 DD |
834 | if (pgd_reg != -1) { |
835 | /* pgd is in pgd_reg */ | |
836 | UASM_i_MFC0(p, ptr, 31, pgd_reg); | |
837 | } else { | |
838 | /* | |
839 | * &pgd << 11 stored in CONTEXT [23..63]. | |
840 | */ | |
841 | UASM_i_MFC0(p, ptr, C0_CONTEXT); | |
842 | ||
843 | /* Clear lower 23 bits of context. */ | |
844 | uasm_i_dins(p, ptr, 0, 0, 23); | |
845 | ||
846 | /* 1 0 1 0 1 << 6 xkphys cached */ | |
847 | uasm_i_ori(p, ptr, ptr, 0x540); | |
848 | uasm_i_drotr(p, ptr, ptr, 11); | |
849 | } | |
82622284 | 850 | #elif defined(CONFIG_SMP) |
41c594ab RB |
851 | # ifdef CONFIG_MIPS_MT_SMTC |
852 | /* | |
853 | * SMTC uses TCBind value as "CPU" index | |
854 | */ | |
e30ec452 | 855 | uasm_i_mfc0(p, ptr, C0_TCBIND); |
3be6022c | 856 | uasm_i_dsrl_safe(p, ptr, ptr, 19); |
41c594ab | 857 | # else |
1da177e4 | 858 | /* |
1b3a6e97 | 859 | * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 |
1da177e4 LT |
860 | * stored in CONTEXT. |
861 | */ | |
e30ec452 | 862 | uasm_i_dmfc0(p, ptr, C0_CONTEXT); |
3be6022c | 863 | uasm_i_dsrl_safe(p, ptr, ptr, 23); |
82622284 | 864 | # endif |
e30ec452 TS |
865 | UASM_i_LA_mostly(p, tmp, pgdc); |
866 | uasm_i_daddu(p, ptr, ptr, tmp); | |
867 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); | |
868 | uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); | |
1da177e4 | 869 | #else |
e30ec452 TS |
870 | UASM_i_LA_mostly(p, ptr, pgdc); |
871 | uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); | |
1da177e4 LT |
872 | #endif |
873 | ||
e30ec452 | 874 | uasm_l_vmalloc_done(l, *p); |
242954b5 | 875 | |
3be6022c DD |
876 | /* get pgd offset in bytes */ |
877 | uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3); | |
e30ec452 TS |
878 | |
879 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); | |
880 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ | |
325f8a0a | 881 | #ifndef __PAGETABLE_PMD_FOLDED |
e30ec452 TS |
882 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ |
883 | uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ | |
3be6022c | 884 | uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ |
e30ec452 TS |
885 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); |
886 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ | |
325f8a0a | 887 | #endif |
1da177e4 LT |
888 | } |
889 | ||
890 | /* | |
891 | * BVADDR is the faulting address, PTR is scratch. | |
892 | * PTR will hold the pgd for vmalloc. | |
893 | */ | |
234fcd14 | 894 | static void __cpuinit |
e30ec452 | 895 | build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, |
1ec56329 DD |
896 | unsigned int bvaddr, unsigned int ptr, |
897 | enum vmalloc64_mode mode) | |
1da177e4 LT |
898 | { |
899 | long swpd = (long)swapper_pg_dir; | |
1ec56329 DD |
900 | int single_insn_swpd; |
901 | int did_vmalloc_branch = 0; | |
902 | ||
903 | single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd); | |
1da177e4 | 904 | |
e30ec452 | 905 | uasm_l_vmalloc(l, *p); |
1da177e4 | 906 | |
2c8c53e2 | 907 | if (mode != not_refill && check_for_high_segbits) { |
1ec56329 DD |
908 | if (single_insn_swpd) { |
909 | uasm_il_bltz(p, r, bvaddr, label_vmalloc_done); | |
910 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); | |
911 | did_vmalloc_branch = 1; | |
912 | /* fall through */ | |
913 | } else { | |
914 | uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault); | |
915 | } | |
916 | } | |
917 | if (!did_vmalloc_branch) { | |
918 | if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { | |
919 | uasm_il_b(p, r, label_vmalloc_done); | |
920 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); | |
921 | } else { | |
922 | UASM_i_LA_mostly(p, ptr, swpd); | |
923 | uasm_il_b(p, r, label_vmalloc_done); | |
924 | if (uasm_in_compat_space_p(swpd)) | |
925 | uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); | |
926 | else | |
927 | uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); | |
928 | } | |
929 | } | |
2c8c53e2 | 930 | if (mode != not_refill && check_for_high_segbits) { |
1ec56329 DD |
931 | uasm_l_large_segbits_fault(l, *p); |
932 | /* | |
933 | * We get here if we are an xsseg address, or if we are | |
934 | * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. | |
935 | * | |
936 | * Ignoring xsseg (assume disabled so would generate | |
937 | * (address errors?), the only remaining possibility | |
938 | * is the upper xuseg addresses. On processors with | |
939 | * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these | |
940 | * addresses would have taken an address error. We try | |
941 | * to mimic that here by taking a load/istream page | |
942 | * fault. | |
943 | */ | |
944 | UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); | |
945 | uasm_i_jr(p, ptr); | |
2c8c53e2 DD |
946 | |
947 | if (mode == refill_scratch) { | |
948 | if (scratch_reg > 0) | |
949 | UASM_i_MFC0(p, 1, 31, scratch_reg); | |
950 | else | |
951 | UASM_i_LW(p, 1, scratchpad_offset(0), 0); | |
952 | } else { | |
953 | uasm_i_nop(p); | |
954 | } | |
1da177e4 LT |
955 | } |
956 | } | |
957 | ||
875d43e7 | 958 | #else /* !CONFIG_64BIT */ |
1da177e4 LT |
959 | |
960 | /* | |
961 | * TMP and PTR are scratch. | |
962 | * TMP will be clobbered, PTR will hold the pgd entry. | |
963 | */ | |
234fcd14 | 964 | static void __cpuinit __maybe_unused |
1da177e4 LT |
965 | build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) |
966 | { | |
967 | long pgdc = (long)pgd_current; | |
968 | ||
969 | /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ | |
970 | #ifdef CONFIG_SMP | |
41c594ab RB |
971 | #ifdef CONFIG_MIPS_MT_SMTC |
972 | /* | |
973 | * SMTC uses TCBind value as "CPU" index | |
974 | */ | |
e30ec452 TS |
975 | uasm_i_mfc0(p, ptr, C0_TCBIND); |
976 | UASM_i_LA_mostly(p, tmp, pgdc); | |
977 | uasm_i_srl(p, ptr, ptr, 19); | |
41c594ab RB |
978 | #else |
979 | /* | |
980 | * smp_processor_id() << 3 is stored in CONTEXT. | |
981 | */ | |
e30ec452 TS |
982 | uasm_i_mfc0(p, ptr, C0_CONTEXT); |
983 | UASM_i_LA_mostly(p, tmp, pgdc); | |
984 | uasm_i_srl(p, ptr, ptr, 23); | |
41c594ab | 985 | #endif |
e30ec452 | 986 | uasm_i_addu(p, ptr, tmp, ptr); |
1da177e4 | 987 | #else |
e30ec452 | 988 | UASM_i_LA_mostly(p, ptr, pgdc); |
1da177e4 | 989 | #endif |
e30ec452 TS |
990 | uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ |
991 | uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); | |
ff401e52 SH |
992 | |
993 | if (cpu_has_mips_r2) { | |
994 | uasm_i_ext(p, tmp, tmp, PGDIR_SHIFT, (32 - PGDIR_SHIFT)); | |
995 | uasm_i_ins(p, ptr, tmp, PGD_T_LOG2, (32 - PGDIR_SHIFT)); | |
996 | return; | |
997 | } | |
998 | ||
e30ec452 TS |
999 | uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ |
1000 | uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); | |
1001 | uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ | |
1da177e4 LT |
1002 | } |
1003 | ||
875d43e7 | 1004 | #endif /* !CONFIG_64BIT */ |
1da177e4 | 1005 | |
234fcd14 | 1006 | static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) |
1da177e4 | 1007 | { |
242954b5 | 1008 | unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; |
1da177e4 LT |
1009 | unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); |
1010 | ||
10cc3529 | 1011 | switch (current_cpu_type()) { |
1da177e4 LT |
1012 | case CPU_VR41XX: |
1013 | case CPU_VR4111: | |
1014 | case CPU_VR4121: | |
1015 | case CPU_VR4122: | |
1016 | case CPU_VR4131: | |
1017 | case CPU_VR4181: | |
1018 | case CPU_VR4181A: | |
1019 | case CPU_VR4133: | |
1020 | shift += 2; | |
1021 | break; | |
1022 | ||
1023 | default: | |
1024 | break; | |
1025 | } | |
1026 | ||
1027 | if (shift) | |
e30ec452 TS |
1028 | UASM_i_SRL(p, ctx, ctx, shift); |
1029 | uasm_i_andi(p, ctx, ctx, mask); | |
1da177e4 LT |
1030 | } |
1031 | ||
234fcd14 | 1032 | static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) |
1da177e4 | 1033 | { |
ff401e52 SH |
1034 | if (cpu_has_mips_r2) { |
1035 | /* PTE ptr offset is obtained from BadVAddr */ | |
1036 | UASM_i_MFC0(p, tmp, C0_BADVADDR); | |
1037 | UASM_i_LW(p, ptr, 0, ptr); | |
1038 | uasm_i_ext(p, tmp, tmp, PAGE_SHIFT+1, PGDIR_SHIFT-PAGE_SHIFT-1); | |
1039 | uasm_i_ins(p, ptr, tmp, PTE_T_LOG2+1, PGDIR_SHIFT-PAGE_SHIFT-1); | |
1040 | return; | |
1041 | } | |
1042 | ||
1da177e4 LT |
1043 | /* |
1044 | * Bug workaround for the Nevada. It seems as if under certain | |
1045 | * circumstances the move from cp0_context might produce a | |
1046 | * bogus result when the mfc0 instruction and its consumer are | |
1047 | * in a different cacheline or a load instruction, probably any | |
1048 | * memory reference, is between them. | |
1049 | */ | |
10cc3529 | 1050 | switch (current_cpu_type()) { |
1da177e4 | 1051 | case CPU_NEVADA: |
e30ec452 | 1052 | UASM_i_LW(p, ptr, 0, ptr); |
1da177e4 LT |
1053 | GET_CONTEXT(p, tmp); /* get context reg */ |
1054 | break; | |
1055 | ||
1056 | default: | |
1057 | GET_CONTEXT(p, tmp); /* get context reg */ | |
e30ec452 | 1058 | UASM_i_LW(p, ptr, 0, ptr); |
1da177e4 LT |
1059 | break; |
1060 | } | |
1061 | ||
1062 | build_adjust_context(p, tmp); | |
e30ec452 | 1063 | UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ |
1da177e4 LT |
1064 | } |
1065 | ||
234fcd14 | 1066 | static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, |
1da177e4 LT |
1067 | unsigned int ptep) |
1068 | { | |
1069 | /* | |
1070 | * 64bit address support (36bit on a 32bit CPU) in a 32bit | |
1071 | * Kernel is a special case. Only a few CPUs use it. | |
1072 | */ | |
1073 | #ifdef CONFIG_64BIT_PHYS_ADDR | |
1074 | if (cpu_has_64bits) { | |
e30ec452 TS |
1075 | uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ |
1076 | uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | |
05857c64 | 1077 | if (cpu_has_rixi) { |
748e787e | 1078 | UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); |
6dd9344c | 1079 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ |
748e787e | 1080 | UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); |
6dd9344c | 1081 | } else { |
3be6022c | 1082 | uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ |
6dd9344c | 1083 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ |
3be6022c | 1084 | uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ |
6dd9344c | 1085 | } |
9b8c3891 | 1086 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ |
1da177e4 LT |
1087 | } else { |
1088 | int pte_off_even = sizeof(pte_t) / 2; | |
1089 | int pte_off_odd = pte_off_even + sizeof(pte_t); | |
1090 | ||
1091 | /* The pte entries are pre-shifted */ | |
e30ec452 | 1092 | uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ |
9b8c3891 | 1093 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ |
e30ec452 | 1094 | uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ |
9b8c3891 | 1095 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ |
1da177e4 LT |
1096 | } |
1097 | #else | |
e30ec452 TS |
1098 | UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ |
1099 | UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | |
1da177e4 LT |
1100 | if (r45k_bvahwbug()) |
1101 | build_tlb_probe_entry(p); | |
05857c64 | 1102 | if (cpu_has_rixi) { |
748e787e | 1103 | UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); |
6dd9344c DD |
1104 | if (r4k_250MHZhwbug()) |
1105 | UASM_i_MTC0(p, 0, C0_ENTRYLO0); | |
1106 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | |
748e787e | 1107 | UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); |
6dd9344c DD |
1108 | } else { |
1109 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ | |
1110 | if (r4k_250MHZhwbug()) | |
1111 | UASM_i_MTC0(p, 0, C0_ENTRYLO0); | |
1112 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | |
1113 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ | |
1114 | if (r45k_bvahwbug()) | |
1115 | uasm_i_mfc0(p, tmp, C0_INDEX); | |
1116 | } | |
1da177e4 | 1117 | if (r4k_250MHZhwbug()) |
9b8c3891 DD |
1118 | UASM_i_MTC0(p, 0, C0_ENTRYLO1); |
1119 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ | |
1da177e4 LT |
1120 | #endif |
1121 | } | |
1122 | ||
2c8c53e2 DD |
1123 | struct mips_huge_tlb_info { |
1124 | int huge_pte; | |
1125 | int restore_scratch; | |
1126 | }; | |
1127 | ||
1128 | static struct mips_huge_tlb_info __cpuinit | |
1129 | build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, | |
1130 | struct uasm_reloc **r, unsigned int tmp, | |
1131 | unsigned int ptr, int c0_scratch) | |
1132 | { | |
1133 | struct mips_huge_tlb_info rv; | |
1134 | unsigned int even, odd; | |
1135 | int vmalloc_branch_delay_filled = 0; | |
1136 | const int scratch = 1; /* Our extra working register */ | |
1137 | ||
1138 | rv.huge_pte = scratch; | |
1139 | rv.restore_scratch = 0; | |
1140 | ||
1141 | if (check_for_high_segbits) { | |
1142 | UASM_i_MFC0(p, tmp, C0_BADVADDR); | |
1143 | ||
1144 | if (pgd_reg != -1) | |
1145 | UASM_i_MFC0(p, ptr, 31, pgd_reg); | |
1146 | else | |
1147 | UASM_i_MFC0(p, ptr, C0_CONTEXT); | |
1148 | ||
1149 | if (c0_scratch >= 0) | |
1150 | UASM_i_MTC0(p, scratch, 31, c0_scratch); | |
1151 | else | |
1152 | UASM_i_SW(p, scratch, scratchpad_offset(0), 0); | |
1153 | ||
1154 | uasm_i_dsrl_safe(p, scratch, tmp, | |
1155 | PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); | |
1156 | uasm_il_bnez(p, r, scratch, label_vmalloc); | |
1157 | ||
1158 | if (pgd_reg == -1) { | |
1159 | vmalloc_branch_delay_filled = 1; | |
1160 | /* Clear lower 23 bits of context. */ | |
1161 | uasm_i_dins(p, ptr, 0, 0, 23); | |
1162 | } | |
1163 | } else { | |
1164 | if (pgd_reg != -1) | |
1165 | UASM_i_MFC0(p, ptr, 31, pgd_reg); | |
1166 | else | |
1167 | UASM_i_MFC0(p, ptr, C0_CONTEXT); | |
1168 | ||
1169 | UASM_i_MFC0(p, tmp, C0_BADVADDR); | |
1170 | ||
1171 | if (c0_scratch >= 0) | |
1172 | UASM_i_MTC0(p, scratch, 31, c0_scratch); | |
1173 | else | |
1174 | UASM_i_SW(p, scratch, scratchpad_offset(0), 0); | |
1175 | ||
1176 | if (pgd_reg == -1) | |
1177 | /* Clear lower 23 bits of context. */ | |
1178 | uasm_i_dins(p, ptr, 0, 0, 23); | |
1179 | ||
1180 | uasm_il_bltz(p, r, tmp, label_vmalloc); | |
1181 | } | |
1182 | ||
1183 | if (pgd_reg == -1) { | |
1184 | vmalloc_branch_delay_filled = 1; | |
1185 | /* 1 0 1 0 1 << 6 xkphys cached */ | |
1186 | uasm_i_ori(p, ptr, ptr, 0x540); | |
1187 | uasm_i_drotr(p, ptr, ptr, 11); | |
1188 | } | |
1189 | ||
1190 | #ifdef __PAGETABLE_PMD_FOLDED | |
1191 | #define LOC_PTEP scratch | |
1192 | #else | |
1193 | #define LOC_PTEP ptr | |
1194 | #endif | |
1195 | ||
1196 | if (!vmalloc_branch_delay_filled) | |
1197 | /* get pgd offset in bytes */ | |
1198 | uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); | |
1199 | ||
1200 | uasm_l_vmalloc_done(l, *p); | |
1201 | ||
1202 | /* | |
1203 | * tmp ptr | |
1204 | * fall-through case = badvaddr *pgd_current | |
1205 | * vmalloc case = badvaddr swapper_pg_dir | |
1206 | */ | |
1207 | ||
1208 | if (vmalloc_branch_delay_filled) | |
1209 | /* get pgd offset in bytes */ | |
1210 | uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); | |
1211 | ||
1212 | #ifdef __PAGETABLE_PMD_FOLDED | |
1213 | GET_CONTEXT(p, tmp); /* get context reg */ | |
1214 | #endif | |
1215 | uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3); | |
1216 | ||
1217 | if (use_lwx_insns()) { | |
1218 | UASM_i_LWX(p, LOC_PTEP, scratch, ptr); | |
1219 | } else { | |
1220 | uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */ | |
1221 | uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */ | |
1222 | } | |
1223 | ||
1224 | #ifndef __PAGETABLE_PMD_FOLDED | |
1225 | /* get pmd offset in bytes */ | |
1226 | uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3); | |
1227 | uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3); | |
1228 | GET_CONTEXT(p, tmp); /* get context reg */ | |
1229 | ||
1230 | if (use_lwx_insns()) { | |
1231 | UASM_i_LWX(p, scratch, scratch, ptr); | |
1232 | } else { | |
1233 | uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */ | |
1234 | UASM_i_LW(p, scratch, 0, ptr); | |
1235 | } | |
1236 | #endif | |
1237 | /* Adjust the context during the load latency. */ | |
1238 | build_adjust_context(p, tmp); | |
1239 | ||
aa1762f4 | 1240 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
2c8c53e2 DD |
1241 | uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update); |
1242 | /* | |
1243 | * The in the LWX case we don't want to do the load in the | |
1244 | * delay slot. It cannot issue in the same cycle and may be | |
1245 | * speculative and unneeded. | |
1246 | */ | |
1247 | if (use_lwx_insns()) | |
1248 | uasm_i_nop(p); | |
aa1762f4 | 1249 | #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ |
2c8c53e2 DD |
1250 | |
1251 | ||
1252 | /* build_update_entries */ | |
1253 | if (use_lwx_insns()) { | |
1254 | even = ptr; | |
1255 | odd = tmp; | |
1256 | UASM_i_LWX(p, even, scratch, tmp); | |
1257 | UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t)); | |
1258 | UASM_i_LWX(p, odd, scratch, tmp); | |
1259 | } else { | |
1260 | UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */ | |
1261 | even = tmp; | |
1262 | odd = ptr; | |
1263 | UASM_i_LW(p, even, 0, ptr); /* get even pte */ | |
1264 | UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */ | |
1265 | } | |
05857c64 | 1266 | if (cpu_has_rixi) { |
748e787e | 1267 | uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL)); |
2c8c53e2 | 1268 | UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ |
748e787e | 1269 | uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL)); |
2c8c53e2 DD |
1270 | } else { |
1271 | uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL)); | |
1272 | UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ | |
1273 | uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL)); | |
1274 | } | |
1275 | UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ | |
1276 | ||
1277 | if (c0_scratch >= 0) { | |
1278 | UASM_i_MFC0(p, scratch, 31, c0_scratch); | |
1279 | build_tlb_write_entry(p, l, r, tlb_random); | |
1280 | uasm_l_leave(l, *p); | |
1281 | rv.restore_scratch = 1; | |
1282 | } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) { | |
1283 | build_tlb_write_entry(p, l, r, tlb_random); | |
1284 | uasm_l_leave(l, *p); | |
1285 | UASM_i_LW(p, scratch, scratchpad_offset(0), 0); | |
1286 | } else { | |
1287 | UASM_i_LW(p, scratch, scratchpad_offset(0), 0); | |
1288 | build_tlb_write_entry(p, l, r, tlb_random); | |
1289 | uasm_l_leave(l, *p); | |
1290 | rv.restore_scratch = 1; | |
1291 | } | |
1292 | ||
1293 | uasm_i_eret(p); /* return from trap */ | |
1294 | ||
1295 | return rv; | |
1296 | } | |
1297 | ||
e6f72d3a DD |
1298 | /* |
1299 | * For a 64-bit kernel, we are using the 64-bit XTLB refill exception | |
1300 | * because EXL == 0. If we wrap, we can also use the 32 instruction | |
1301 | * slots before the XTLB refill exception handler which belong to the | |
1302 | * unused TLB refill exception. | |
1303 | */ | |
1304 | #define MIPS64_REFILL_INSNS 32 | |
1305 | ||
234fcd14 | 1306 | static void __cpuinit build_r4000_tlb_refill_handler(void) |
1da177e4 LT |
1307 | { |
1308 | u32 *p = tlb_handler; | |
e30ec452 TS |
1309 | struct uasm_label *l = labels; |
1310 | struct uasm_reloc *r = relocs; | |
1da177e4 LT |
1311 | u32 *f; |
1312 | unsigned int final_len; | |
4a9040f4 RB |
1313 | struct mips_huge_tlb_info htlb_info __maybe_unused; |
1314 | enum vmalloc64_mode vmalloc_mode __maybe_unused; | |
1da177e4 LT |
1315 | |
1316 | memset(tlb_handler, 0, sizeof(tlb_handler)); | |
1317 | memset(labels, 0, sizeof(labels)); | |
1318 | memset(relocs, 0, sizeof(relocs)); | |
1319 | memset(final_handler, 0, sizeof(final_handler)); | |
1320 | ||
2c8c53e2 DD |
1321 | if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) { |
1322 | htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, | |
1323 | scratch_reg); | |
1324 | vmalloc_mode = refill_scratch; | |
1325 | } else { | |
1326 | htlb_info.huge_pte = K0; | |
1327 | htlb_info.restore_scratch = 0; | |
1328 | vmalloc_mode = refill_noscratch; | |
1329 | /* | |
1330 | * create the plain linear handler | |
1331 | */ | |
1332 | if (bcm1250_m3_war()) { | |
1333 | unsigned int segbits = 44; | |
1334 | ||
1335 | uasm_i_dmfc0(&p, K0, C0_BADVADDR); | |
1336 | uasm_i_dmfc0(&p, K1, C0_ENTRYHI); | |
1337 | uasm_i_xor(&p, K0, K0, K1); | |
1338 | uasm_i_dsrl_safe(&p, K1, K0, 62); | |
1339 | uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); | |
1340 | uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); | |
1341 | uasm_i_or(&p, K0, K0, K1); | |
1342 | uasm_il_bnez(&p, &r, K0, label_leave); | |
1343 | /* No need for uasm_i_nop */ | |
1344 | } | |
1da177e4 | 1345 | |
875d43e7 | 1346 | #ifdef CONFIG_64BIT |
2c8c53e2 | 1347 | build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ |
1da177e4 | 1348 | #else |
2c8c53e2 | 1349 | build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ |
1da177e4 LT |
1350 | #endif |
1351 | ||
aa1762f4 | 1352 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
2c8c53e2 | 1353 | build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); |
fd062c84 DD |
1354 | #endif |
1355 | ||
2c8c53e2 DD |
1356 | build_get_ptep(&p, K0, K1); |
1357 | build_update_entries(&p, K0, K1); | |
1358 | build_tlb_write_entry(&p, &l, &r, tlb_random); | |
1359 | uasm_l_leave(&l, p); | |
1360 | uasm_i_eret(&p); /* return from trap */ | |
1361 | } | |
aa1762f4 | 1362 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
fd062c84 | 1363 | uasm_l_tlb_huge_update(&l, p); |
2c8c53e2 DD |
1364 | build_huge_update_entries(&p, htlb_info.huge_pte, K1); |
1365 | build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, | |
1366 | htlb_info.restore_scratch); | |
fd062c84 DD |
1367 | #endif |
1368 | ||
875d43e7 | 1369 | #ifdef CONFIG_64BIT |
2c8c53e2 | 1370 | build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode); |
1da177e4 LT |
1371 | #endif |
1372 | ||
1373 | /* | |
1374 | * Overflow check: For the 64bit handler, we need at least one | |
1375 | * free instruction slot for the wrap-around branch. In worst | |
1376 | * case, if the intended insertion point is a delay slot, we | |
4b3f686d | 1377 | * need three, with the second nop'ed and the third being |
1da177e4 LT |
1378 | * unused. |
1379 | */ | |
2a21c730 FZ |
1380 | /* Loongson2 ebase is different than r4k, we have more space */ |
1381 | #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) | |
1da177e4 LT |
1382 | if ((p - tlb_handler) > 64) |
1383 | panic("TLB refill handler space exceeded"); | |
1384 | #else | |
e6f72d3a DD |
1385 | if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) |
1386 | || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) | |
1387 | && uasm_insn_has_bdelay(relocs, | |
1388 | tlb_handler + MIPS64_REFILL_INSNS - 3))) | |
1da177e4 LT |
1389 | panic("TLB refill handler space exceeded"); |
1390 | #endif | |
1391 | ||
1392 | /* | |
1393 | * Now fold the handler in the TLB refill handler space. | |
1394 | */ | |
2a21c730 | 1395 | #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) |
1da177e4 LT |
1396 | f = final_handler; |
1397 | /* Simplest case, just copy the handler. */ | |
e30ec452 | 1398 | uasm_copy_handler(relocs, labels, tlb_handler, p, f); |
1da177e4 | 1399 | final_len = p - tlb_handler; |
875d43e7 | 1400 | #else /* CONFIG_64BIT */ |
e6f72d3a DD |
1401 | f = final_handler + MIPS64_REFILL_INSNS; |
1402 | if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { | |
1da177e4 | 1403 | /* Just copy the handler. */ |
e30ec452 | 1404 | uasm_copy_handler(relocs, labels, tlb_handler, p, f); |
1da177e4 LT |
1405 | final_len = p - tlb_handler; |
1406 | } else { | |
aa1762f4 | 1407 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
fd062c84 | 1408 | const enum label_id ls = label_tlb_huge_update; |
95affdda DD |
1409 | #else |
1410 | const enum label_id ls = label_vmalloc; | |
1411 | #endif | |
1412 | u32 *split; | |
1413 | int ov = 0; | |
1414 | int i; | |
1415 | ||
1416 | for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) | |
1417 | ; | |
1418 | BUG_ON(i == ARRAY_SIZE(labels)); | |
1419 | split = labels[i].addr; | |
1da177e4 LT |
1420 | |
1421 | /* | |
95affdda | 1422 | * See if we have overflown one way or the other. |
1da177e4 | 1423 | */ |
95affdda DD |
1424 | if (split > tlb_handler + MIPS64_REFILL_INSNS || |
1425 | split < p - MIPS64_REFILL_INSNS) | |
1426 | ov = 1; | |
1427 | ||
1428 | if (ov) { | |
1429 | /* | |
1430 | * Split two instructions before the end. One | |
1431 | * for the branch and one for the instruction | |
1432 | * in the delay slot. | |
1433 | */ | |
1434 | split = tlb_handler + MIPS64_REFILL_INSNS - 2; | |
1435 | ||
1436 | /* | |
1437 | * If the branch would fall in a delay slot, | |
1438 | * we must back up an additional instruction | |
1439 | * so that it is no longer in a delay slot. | |
1440 | */ | |
1441 | if (uasm_insn_has_bdelay(relocs, split - 1)) | |
1442 | split--; | |
1443 | } | |
1da177e4 | 1444 | /* Copy first part of the handler. */ |
e30ec452 | 1445 | uasm_copy_handler(relocs, labels, tlb_handler, split, f); |
1da177e4 LT |
1446 | f += split - tlb_handler; |
1447 | ||
95affdda DD |
1448 | if (ov) { |
1449 | /* Insert branch. */ | |
1450 | uasm_l_split(&l, final_handler); | |
1451 | uasm_il_b(&f, &r, label_split); | |
1452 | if (uasm_insn_has_bdelay(relocs, split)) | |
1453 | uasm_i_nop(&f); | |
1454 | else { | |
1455 | uasm_copy_handler(relocs, labels, | |
1456 | split, split + 1, f); | |
1457 | uasm_move_labels(labels, f, f + 1, -1); | |
1458 | f++; | |
1459 | split++; | |
1460 | } | |
1da177e4 LT |
1461 | } |
1462 | ||
1463 | /* Copy the rest of the handler. */ | |
e30ec452 | 1464 | uasm_copy_handler(relocs, labels, split, p, final_handler); |
e6f72d3a DD |
1465 | final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + |
1466 | (p - split); | |
1da177e4 | 1467 | } |
875d43e7 | 1468 | #endif /* CONFIG_64BIT */ |
1da177e4 | 1469 | |
e30ec452 TS |
1470 | uasm_resolve_relocs(relocs, labels); |
1471 | pr_debug("Wrote TLB refill handler (%u instructions).\n", | |
1472 | final_len); | |
1da177e4 | 1473 | |
91b05e67 | 1474 | memcpy((void *)ebase, final_handler, 0x100); |
92b1e6a6 | 1475 | |
a2c763e0 | 1476 | dump_handler("r4000_tlb_refill", (u32 *)ebase, 64); |
1da177e4 LT |
1477 | } |
1478 | ||
1da177e4 LT |
1479 | /* |
1480 | * 128 instructions for the fastpath handler is generous and should | |
1481 | * never be exceeded. | |
1482 | */ | |
1483 | #define FASTPATH_SIZE 128 | |
1484 | ||
cbdbe07f FBH |
1485 | u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned; |
1486 | u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; | |
1487 | u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; | |
3d8bfdd0 DD |
1488 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT |
1489 | u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned; | |
1490 | ||
1491 | static void __cpuinit build_r4000_setup_pgd(void) | |
1492 | { | |
1493 | const int a0 = 4; | |
1494 | const int a1 = 5; | |
1495 | u32 *p = tlbmiss_handler_setup_pgd; | |
1496 | struct uasm_label *l = labels; | |
1497 | struct uasm_reloc *r = relocs; | |
1498 | ||
1499 | memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd)); | |
1500 | memset(labels, 0, sizeof(labels)); | |
1501 | memset(relocs, 0, sizeof(relocs)); | |
1502 | ||
1503 | pgd_reg = allocate_kscratch(); | |
1504 | ||
1505 | if (pgd_reg == -1) { | |
1506 | /* PGD << 11 in c0_Context */ | |
1507 | /* | |
1508 | * If it is a ckseg0 address, convert to a physical | |
1509 | * address. Shifting right by 29 and adding 4 will | |
1510 | * result in zero for these addresses. | |
1511 | * | |
1512 | */ | |
1513 | UASM_i_SRA(&p, a1, a0, 29); | |
1514 | UASM_i_ADDIU(&p, a1, a1, 4); | |
1515 | uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1); | |
1516 | uasm_i_nop(&p); | |
1517 | uasm_i_dinsm(&p, a0, 0, 29, 64 - 29); | |
1518 | uasm_l_tlbl_goaround1(&l, p); | |
1519 | UASM_i_SLL(&p, a0, a0, 11); | |
1520 | uasm_i_jr(&p, 31); | |
1521 | UASM_i_MTC0(&p, a0, C0_CONTEXT); | |
1522 | } else { | |
1523 | /* PGD in c0_KScratch */ | |
1524 | uasm_i_jr(&p, 31); | |
1525 | UASM_i_MTC0(&p, a0, 31, pgd_reg); | |
1526 | } | |
1527 | if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd)) | |
1528 | panic("tlbmiss_handler_setup_pgd space exceeded"); | |
1529 | uasm_resolve_relocs(relocs, labels); | |
1530 | pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", | |
1531 | (unsigned int)(p - tlbmiss_handler_setup_pgd)); | |
1532 | ||
a2c763e0 RB |
1533 | dump_handler("tlbmiss_handler", |
1534 | tlbmiss_handler_setup_pgd, | |
3d8bfdd0 DD |
1535 | ARRAY_SIZE(tlbmiss_handler_setup_pgd)); |
1536 | } | |
1537 | #endif | |
1da177e4 | 1538 | |
234fcd14 | 1539 | static void __cpuinit |
bd1437e4 | 1540 | iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) |
1da177e4 LT |
1541 | { |
1542 | #ifdef CONFIG_SMP | |
1543 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1544 | if (cpu_has_64bits) | |
e30ec452 | 1545 | uasm_i_lld(p, pte, 0, ptr); |
1da177e4 LT |
1546 | else |
1547 | # endif | |
e30ec452 | 1548 | UASM_i_LL(p, pte, 0, ptr); |
1da177e4 LT |
1549 | #else |
1550 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1551 | if (cpu_has_64bits) | |
e30ec452 | 1552 | uasm_i_ld(p, pte, 0, ptr); |
1da177e4 LT |
1553 | else |
1554 | # endif | |
e30ec452 | 1555 | UASM_i_LW(p, pte, 0, ptr); |
1da177e4 LT |
1556 | #endif |
1557 | } | |
1558 | ||
234fcd14 | 1559 | static void __cpuinit |
e30ec452 | 1560 | iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, |
63b2d2f4 | 1561 | unsigned int mode) |
1da177e4 | 1562 | { |
63b2d2f4 TS |
1563 | #ifdef CONFIG_64BIT_PHYS_ADDR |
1564 | unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); | |
1565 | #endif | |
1566 | ||
e30ec452 | 1567 | uasm_i_ori(p, pte, pte, mode); |
1da177e4 LT |
1568 | #ifdef CONFIG_SMP |
1569 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1570 | if (cpu_has_64bits) | |
e30ec452 | 1571 | uasm_i_scd(p, pte, 0, ptr); |
1da177e4 LT |
1572 | else |
1573 | # endif | |
e30ec452 | 1574 | UASM_i_SC(p, pte, 0, ptr); |
1da177e4 LT |
1575 | |
1576 | if (r10000_llsc_war()) | |
e30ec452 | 1577 | uasm_il_beqzl(p, r, pte, label_smp_pgtable_change); |
1da177e4 | 1578 | else |
e30ec452 | 1579 | uasm_il_beqz(p, r, pte, label_smp_pgtable_change); |
1da177e4 LT |
1580 | |
1581 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1582 | if (!cpu_has_64bits) { | |
e30ec452 TS |
1583 | /* no uasm_i_nop needed */ |
1584 | uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); | |
1585 | uasm_i_ori(p, pte, pte, hwmode); | |
1586 | uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); | |
1587 | uasm_il_beqz(p, r, pte, label_smp_pgtable_change); | |
1588 | /* no uasm_i_nop needed */ | |
1589 | uasm_i_lw(p, pte, 0, ptr); | |
1da177e4 | 1590 | } else |
e30ec452 | 1591 | uasm_i_nop(p); |
1da177e4 | 1592 | # else |
e30ec452 | 1593 | uasm_i_nop(p); |
1da177e4 LT |
1594 | # endif |
1595 | #else | |
1596 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1597 | if (cpu_has_64bits) | |
e30ec452 | 1598 | uasm_i_sd(p, pte, 0, ptr); |
1da177e4 LT |
1599 | else |
1600 | # endif | |
e30ec452 | 1601 | UASM_i_SW(p, pte, 0, ptr); |
1da177e4 LT |
1602 | |
1603 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1604 | if (!cpu_has_64bits) { | |
e30ec452 TS |
1605 | uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); |
1606 | uasm_i_ori(p, pte, pte, hwmode); | |
1607 | uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); | |
1608 | uasm_i_lw(p, pte, 0, ptr); | |
1da177e4 LT |
1609 | } |
1610 | # endif | |
1611 | #endif | |
1612 | } | |
1613 | ||
1614 | /* | |
1615 | * Check if PTE is present, if not then jump to LABEL. PTR points to | |
1616 | * the page table where this PTE is located, PTE will be re-loaded | |
1617 | * with it's original value. | |
1618 | */ | |
234fcd14 | 1619 | static void __cpuinit |
bd1437e4 | 1620 | build_pte_present(u32 **p, struct uasm_reloc **r, |
bf28607f | 1621 | int pte, int ptr, int scratch, enum label_id lid) |
1da177e4 | 1622 | { |
bf28607f DD |
1623 | int t = scratch >= 0 ? scratch : pte; |
1624 | ||
05857c64 | 1625 | if (cpu_has_rixi) { |
cc33ae43 DD |
1626 | if (use_bbit_insns()) { |
1627 | uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); | |
1628 | uasm_i_nop(p); | |
1629 | } else { | |
bf28607f DD |
1630 | uasm_i_andi(p, t, pte, _PAGE_PRESENT); |
1631 | uasm_il_beqz(p, r, t, lid); | |
1632 | if (pte == t) | |
1633 | /* You lose the SMP race :-(*/ | |
1634 | iPTE_LW(p, pte, ptr); | |
cc33ae43 | 1635 | } |
6dd9344c | 1636 | } else { |
bf28607f DD |
1637 | uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ); |
1638 | uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ); | |
1639 | uasm_il_bnez(p, r, t, lid); | |
1640 | if (pte == t) | |
1641 | /* You lose the SMP race :-(*/ | |
1642 | iPTE_LW(p, pte, ptr); | |
6dd9344c | 1643 | } |
1da177e4 LT |
1644 | } |
1645 | ||
1646 | /* Make PTE valid, store result in PTR. */ | |
234fcd14 | 1647 | static void __cpuinit |
e30ec452 | 1648 | build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, |
1da177e4 LT |
1649 | unsigned int ptr) |
1650 | { | |
63b2d2f4 TS |
1651 | unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; |
1652 | ||
1653 | iPTE_SW(p, r, pte, ptr, mode); | |
1da177e4 LT |
1654 | } |
1655 | ||
1656 | /* | |
1657 | * Check if PTE can be written to, if not branch to LABEL. Regardless | |
1658 | * restore PTE with value from PTR when done. | |
1659 | */ | |
234fcd14 | 1660 | static void __cpuinit |
bd1437e4 | 1661 | build_pte_writable(u32 **p, struct uasm_reloc **r, |
bf28607f DD |
1662 | unsigned int pte, unsigned int ptr, int scratch, |
1663 | enum label_id lid) | |
1da177e4 | 1664 | { |
bf28607f DD |
1665 | int t = scratch >= 0 ? scratch : pte; |
1666 | ||
1667 | uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE); | |
1668 | uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE); | |
1669 | uasm_il_bnez(p, r, t, lid); | |
1670 | if (pte == t) | |
1671 | /* You lose the SMP race :-(*/ | |
cc33ae43 | 1672 | iPTE_LW(p, pte, ptr); |
bf28607f DD |
1673 | else |
1674 | uasm_i_nop(p); | |
1da177e4 LT |
1675 | } |
1676 | ||
1677 | /* Make PTE writable, update software status bits as well, then store | |
1678 | * at PTR. | |
1679 | */ | |
234fcd14 | 1680 | static void __cpuinit |
e30ec452 | 1681 | build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, |
1da177e4 LT |
1682 | unsigned int ptr) |
1683 | { | |
63b2d2f4 TS |
1684 | unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID |
1685 | | _PAGE_DIRTY); | |
1686 | ||
1687 | iPTE_SW(p, r, pte, ptr, mode); | |
1da177e4 LT |
1688 | } |
1689 | ||
1690 | /* | |
1691 | * Check if PTE can be modified, if not branch to LABEL. Regardless | |
1692 | * restore PTE with value from PTR when done. | |
1693 | */ | |
234fcd14 | 1694 | static void __cpuinit |
bd1437e4 | 1695 | build_pte_modifiable(u32 **p, struct uasm_reloc **r, |
bf28607f DD |
1696 | unsigned int pte, unsigned int ptr, int scratch, |
1697 | enum label_id lid) | |
1da177e4 | 1698 | { |
cc33ae43 DD |
1699 | if (use_bbit_insns()) { |
1700 | uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); | |
1701 | uasm_i_nop(p); | |
1702 | } else { | |
bf28607f DD |
1703 | int t = scratch >= 0 ? scratch : pte; |
1704 | uasm_i_andi(p, t, pte, _PAGE_WRITE); | |
1705 | uasm_il_beqz(p, r, t, lid); | |
1706 | if (pte == t) | |
1707 | /* You lose the SMP race :-(*/ | |
1708 | iPTE_LW(p, pte, ptr); | |
cc33ae43 | 1709 | } |
1da177e4 LT |
1710 | } |
1711 | ||
82622284 | 1712 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
3d8bfdd0 DD |
1713 | |
1714 | ||
1da177e4 LT |
1715 | /* |
1716 | * R3000 style TLB load/store/modify handlers. | |
1717 | */ | |
1718 | ||
fded2e50 MR |
1719 | /* |
1720 | * This places the pte into ENTRYLO0 and writes it with tlbwi. | |
1721 | * Then it returns. | |
1722 | */ | |
234fcd14 | 1723 | static void __cpuinit |
fded2e50 | 1724 | build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) |
1da177e4 | 1725 | { |
e30ec452 TS |
1726 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ |
1727 | uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ | |
1728 | uasm_i_tlbwi(p); | |
1729 | uasm_i_jr(p, tmp); | |
1730 | uasm_i_rfe(p); /* branch delay */ | |
1da177e4 LT |
1731 | } |
1732 | ||
1733 | /* | |
fded2e50 MR |
1734 | * This places the pte into ENTRYLO0 and writes it with tlbwi |
1735 | * or tlbwr as appropriate. This is because the index register | |
1736 | * may have the probe fail bit set as a result of a trap on a | |
1737 | * kseg2 access, i.e. without refill. Then it returns. | |
1da177e4 | 1738 | */ |
234fcd14 | 1739 | static void __cpuinit |
e30ec452 TS |
1740 | build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, |
1741 | struct uasm_reloc **r, unsigned int pte, | |
1742 | unsigned int tmp) | |
1743 | { | |
1744 | uasm_i_mfc0(p, tmp, C0_INDEX); | |
1745 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ | |
1746 | uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ | |
1747 | uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */ | |
1748 | uasm_i_tlbwi(p); /* cp0 delay */ | |
1749 | uasm_i_jr(p, tmp); | |
1750 | uasm_i_rfe(p); /* branch delay */ | |
1751 | uasm_l_r3000_write_probe_fail(l, *p); | |
1752 | uasm_i_tlbwr(p); /* cp0 delay */ | |
1753 | uasm_i_jr(p, tmp); | |
1754 | uasm_i_rfe(p); /* branch delay */ | |
1da177e4 LT |
1755 | } |
1756 | ||
234fcd14 | 1757 | static void __cpuinit |
1da177e4 LT |
1758 | build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, |
1759 | unsigned int ptr) | |
1760 | { | |
1761 | long pgdc = (long)pgd_current; | |
1762 | ||
e30ec452 TS |
1763 | uasm_i_mfc0(p, pte, C0_BADVADDR); |
1764 | uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */ | |
1765 | uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); | |
1766 | uasm_i_srl(p, pte, pte, 22); /* load delay */ | |
1767 | uasm_i_sll(p, pte, pte, 2); | |
1768 | uasm_i_addu(p, ptr, ptr, pte); | |
1769 | uasm_i_mfc0(p, pte, C0_CONTEXT); | |
1770 | uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */ | |
1771 | uasm_i_andi(p, pte, pte, 0xffc); /* load delay */ | |
1772 | uasm_i_addu(p, ptr, ptr, pte); | |
1773 | uasm_i_lw(p, pte, 0, ptr); | |
1774 | uasm_i_tlbp(p); /* load delay */ | |
1da177e4 LT |
1775 | } |
1776 | ||
234fcd14 | 1777 | static void __cpuinit build_r3000_tlb_load_handler(void) |
1da177e4 LT |
1778 | { |
1779 | u32 *p = handle_tlbl; | |
e30ec452 TS |
1780 | struct uasm_label *l = labels; |
1781 | struct uasm_reloc *r = relocs; | |
1da177e4 LT |
1782 | |
1783 | memset(handle_tlbl, 0, sizeof(handle_tlbl)); | |
1784 | memset(labels, 0, sizeof(labels)); | |
1785 | memset(relocs, 0, sizeof(relocs)); | |
1786 | ||
1787 | build_r3000_tlbchange_handler_head(&p, K0, K1); | |
bf28607f | 1788 | build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); |
e30ec452 | 1789 | uasm_i_nop(&p); /* load delay */ |
1da177e4 | 1790 | build_make_valid(&p, &r, K0, K1); |
fded2e50 | 1791 | build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); |
1da177e4 | 1792 | |
e30ec452 TS |
1793 | uasm_l_nopage_tlbl(&l, p); |
1794 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); | |
1795 | uasm_i_nop(&p); | |
1da177e4 LT |
1796 | |
1797 | if ((p - handle_tlbl) > FASTPATH_SIZE) | |
1798 | panic("TLB load handler fastpath space exceeded"); | |
1799 | ||
e30ec452 TS |
1800 | uasm_resolve_relocs(relocs, labels); |
1801 | pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", | |
1802 | (unsigned int)(p - handle_tlbl)); | |
1da177e4 | 1803 | |
a2c763e0 | 1804 | dump_handler("r3000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl)); |
1da177e4 LT |
1805 | } |
1806 | ||
234fcd14 | 1807 | static void __cpuinit build_r3000_tlb_store_handler(void) |
1da177e4 LT |
1808 | { |
1809 | u32 *p = handle_tlbs; | |
e30ec452 TS |
1810 | struct uasm_label *l = labels; |
1811 | struct uasm_reloc *r = relocs; | |
1da177e4 LT |
1812 | |
1813 | memset(handle_tlbs, 0, sizeof(handle_tlbs)); | |
1814 | memset(labels, 0, sizeof(labels)); | |
1815 | memset(relocs, 0, sizeof(relocs)); | |
1816 | ||
1817 | build_r3000_tlbchange_handler_head(&p, K0, K1); | |
bf28607f | 1818 | build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); |
e30ec452 | 1819 | uasm_i_nop(&p); /* load delay */ |
1da177e4 | 1820 | build_make_write(&p, &r, K0, K1); |
fded2e50 | 1821 | build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); |
1da177e4 | 1822 | |
e30ec452 TS |
1823 | uasm_l_nopage_tlbs(&l, p); |
1824 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | |
1825 | uasm_i_nop(&p); | |
1da177e4 LT |
1826 | |
1827 | if ((p - handle_tlbs) > FASTPATH_SIZE) | |
1828 | panic("TLB store handler fastpath space exceeded"); | |
1829 | ||
e30ec452 TS |
1830 | uasm_resolve_relocs(relocs, labels); |
1831 | pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", | |
1832 | (unsigned int)(p - handle_tlbs)); | |
1da177e4 | 1833 | |
a2c763e0 | 1834 | dump_handler("r3000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs)); |
1da177e4 LT |
1835 | } |
1836 | ||
234fcd14 | 1837 | static void __cpuinit build_r3000_tlb_modify_handler(void) |
1da177e4 LT |
1838 | { |
1839 | u32 *p = handle_tlbm; | |
e30ec452 TS |
1840 | struct uasm_label *l = labels; |
1841 | struct uasm_reloc *r = relocs; | |
1da177e4 LT |
1842 | |
1843 | memset(handle_tlbm, 0, sizeof(handle_tlbm)); | |
1844 | memset(labels, 0, sizeof(labels)); | |
1845 | memset(relocs, 0, sizeof(relocs)); | |
1846 | ||
1847 | build_r3000_tlbchange_handler_head(&p, K0, K1); | |
d954ffe3 | 1848 | build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); |
e30ec452 | 1849 | uasm_i_nop(&p); /* load delay */ |
1da177e4 | 1850 | build_make_write(&p, &r, K0, K1); |
fded2e50 | 1851 | build_r3000_pte_reload_tlbwi(&p, K0, K1); |
1da177e4 | 1852 | |
e30ec452 TS |
1853 | uasm_l_nopage_tlbm(&l, p); |
1854 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | |
1855 | uasm_i_nop(&p); | |
1da177e4 LT |
1856 | |
1857 | if ((p - handle_tlbm) > FASTPATH_SIZE) | |
1858 | panic("TLB modify handler fastpath space exceeded"); | |
1859 | ||
e30ec452 TS |
1860 | uasm_resolve_relocs(relocs, labels); |
1861 | pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", | |
1862 | (unsigned int)(p - handle_tlbm)); | |
1da177e4 | 1863 | |
a2c763e0 | 1864 | dump_handler("r3000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm)); |
1da177e4 | 1865 | } |
82622284 | 1866 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ |
1da177e4 LT |
1867 | |
1868 | /* | |
1869 | * R4000 style TLB load/store/modify handlers. | |
1870 | */ | |
bf28607f | 1871 | static struct work_registers __cpuinit |
e30ec452 | 1872 | build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, |
bf28607f | 1873 | struct uasm_reloc **r) |
1da177e4 | 1874 | { |
bf28607f DD |
1875 | struct work_registers wr = build_get_work_registers(p); |
1876 | ||
875d43e7 | 1877 | #ifdef CONFIG_64BIT |
bf28607f | 1878 | build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */ |
1da177e4 | 1879 | #else |
bf28607f | 1880 | build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */ |
1da177e4 LT |
1881 | #endif |
1882 | ||
aa1762f4 | 1883 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
fd062c84 DD |
1884 | /* |
1885 | * For huge tlb entries, pmd doesn't contain an address but | |
1886 | * instead contains the tlb pte. Check the PAGE_HUGE bit and | |
1887 | * see if we need to jump to huge tlb processing. | |
1888 | */ | |
bf28607f | 1889 | build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update); |
fd062c84 DD |
1890 | #endif |
1891 | ||
bf28607f DD |
1892 | UASM_i_MFC0(p, wr.r1, C0_BADVADDR); |
1893 | UASM_i_LW(p, wr.r2, 0, wr.r2); | |
1894 | UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); | |
1895 | uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); | |
1896 | UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); | |
1da177e4 LT |
1897 | |
1898 | #ifdef CONFIG_SMP | |
e30ec452 TS |
1899 | uasm_l_smp_pgtable_change(l, *p); |
1900 | #endif | |
bf28607f | 1901 | iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ |
8df5beac MR |
1902 | if (!m4kc_tlbp_war()) |
1903 | build_tlb_probe_entry(p); | |
bf28607f | 1904 | return wr; |
1da177e4 LT |
1905 | } |
1906 | ||
234fcd14 | 1907 | static void __cpuinit |
e30ec452 TS |
1908 | build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, |
1909 | struct uasm_reloc **r, unsigned int tmp, | |
1da177e4 LT |
1910 | unsigned int ptr) |
1911 | { | |
e30ec452 TS |
1912 | uasm_i_ori(p, ptr, ptr, sizeof(pte_t)); |
1913 | uasm_i_xori(p, ptr, ptr, sizeof(pte_t)); | |
1da177e4 LT |
1914 | build_update_entries(p, tmp, ptr); |
1915 | build_tlb_write_entry(p, l, r, tlb_indexed); | |
e30ec452 | 1916 | uasm_l_leave(l, *p); |
bf28607f | 1917 | build_restore_work_registers(p); |
e30ec452 | 1918 | uasm_i_eret(p); /* return from trap */ |
1da177e4 | 1919 | |
875d43e7 | 1920 | #ifdef CONFIG_64BIT |
1ec56329 | 1921 | build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill); |
1da177e4 LT |
1922 | #endif |
1923 | } | |
1924 | ||
234fcd14 | 1925 | static void __cpuinit build_r4000_tlb_load_handler(void) |
1da177e4 LT |
1926 | { |
1927 | u32 *p = handle_tlbl; | |
e30ec452 TS |
1928 | struct uasm_label *l = labels; |
1929 | struct uasm_reloc *r = relocs; | |
bf28607f | 1930 | struct work_registers wr; |
1da177e4 LT |
1931 | |
1932 | memset(handle_tlbl, 0, sizeof(handle_tlbl)); | |
1933 | memset(labels, 0, sizeof(labels)); | |
1934 | memset(relocs, 0, sizeof(relocs)); | |
1935 | ||
1936 | if (bcm1250_m3_war()) { | |
3d45285d RB |
1937 | unsigned int segbits = 44; |
1938 | ||
1939 | uasm_i_dmfc0(&p, K0, C0_BADVADDR); | |
1940 | uasm_i_dmfc0(&p, K1, C0_ENTRYHI); | |
e30ec452 | 1941 | uasm_i_xor(&p, K0, K0, K1); |
3be6022c DD |
1942 | uasm_i_dsrl_safe(&p, K1, K0, 62); |
1943 | uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); | |
1944 | uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); | |
3d45285d | 1945 | uasm_i_or(&p, K0, K0, K1); |
e30ec452 TS |
1946 | uasm_il_bnez(&p, &r, K0, label_leave); |
1947 | /* No need for uasm_i_nop */ | |
1da177e4 LT |
1948 | } |
1949 | ||
bf28607f DD |
1950 | wr = build_r4000_tlbchange_handler_head(&p, &l, &r); |
1951 | build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); | |
8df5beac MR |
1952 | if (m4kc_tlbp_war()) |
1953 | build_tlb_probe_entry(&p); | |
6dd9344c | 1954 | |
05857c64 | 1955 | if (cpu_has_rixi) { |
6dd9344c DD |
1956 | /* |
1957 | * If the page is not _PAGE_VALID, RI or XI could not | |
1958 | * have triggered it. Skip the expensive test.. | |
1959 | */ | |
cc33ae43 | 1960 | if (use_bbit_insns()) { |
bf28607f | 1961 | uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), |
cc33ae43 DD |
1962 | label_tlbl_goaround1); |
1963 | } else { | |
bf28607f DD |
1964 | uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); |
1965 | uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1); | |
cc33ae43 | 1966 | } |
6dd9344c DD |
1967 | uasm_i_nop(&p); |
1968 | ||
1969 | uasm_i_tlbr(&p); | |
1970 | /* Examine entrylo 0 or 1 based on ptr. */ | |
cc33ae43 | 1971 | if (use_bbit_insns()) { |
bf28607f | 1972 | uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); |
cc33ae43 | 1973 | } else { |
bf28607f DD |
1974 | uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); |
1975 | uasm_i_beqz(&p, wr.r3, 8); | |
cc33ae43 | 1976 | } |
bf28607f DD |
1977 | /* load it in the delay slot*/ |
1978 | UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); | |
1979 | /* load it if ptr is odd */ | |
1980 | UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); | |
6dd9344c | 1981 | /* |
bf28607f | 1982 | * If the entryLo (now in wr.r3) is valid (bit 1), RI or |
6dd9344c DD |
1983 | * XI must have triggered it. |
1984 | */ | |
cc33ae43 | 1985 | if (use_bbit_insns()) { |
bf28607f DD |
1986 | uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl); |
1987 | uasm_i_nop(&p); | |
cc33ae43 DD |
1988 | uasm_l_tlbl_goaround1(&l, p); |
1989 | } else { | |
bf28607f DD |
1990 | uasm_i_andi(&p, wr.r3, wr.r3, 2); |
1991 | uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl); | |
1992 | uasm_i_nop(&p); | |
cc33ae43 | 1993 | } |
bf28607f | 1994 | uasm_l_tlbl_goaround1(&l, p); |
6dd9344c | 1995 | } |
bf28607f DD |
1996 | build_make_valid(&p, &r, wr.r1, wr.r2); |
1997 | build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); | |
1da177e4 | 1998 | |
aa1762f4 | 1999 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
fd062c84 DD |
2000 | /* |
2001 | * This is the entry point when build_r4000_tlbchange_handler_head | |
2002 | * spots a huge page. | |
2003 | */ | |
2004 | uasm_l_tlb_huge_update(&l, p); | |
bf28607f DD |
2005 | iPTE_LW(&p, wr.r1, wr.r2); |
2006 | build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); | |
fd062c84 | 2007 | build_tlb_probe_entry(&p); |
6dd9344c | 2008 | |
05857c64 | 2009 | if (cpu_has_rixi) { |
6dd9344c DD |
2010 | /* |
2011 | * If the page is not _PAGE_VALID, RI or XI could not | |
2012 | * have triggered it. Skip the expensive test.. | |
2013 | */ | |
cc33ae43 | 2014 | if (use_bbit_insns()) { |
bf28607f | 2015 | uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), |
cc33ae43 DD |
2016 | label_tlbl_goaround2); |
2017 | } else { | |
bf28607f DD |
2018 | uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); |
2019 | uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); | |
cc33ae43 | 2020 | } |
6dd9344c DD |
2021 | uasm_i_nop(&p); |
2022 | ||
2023 | uasm_i_tlbr(&p); | |
2024 | /* Examine entrylo 0 or 1 based on ptr. */ | |
cc33ae43 | 2025 | if (use_bbit_insns()) { |
bf28607f | 2026 | uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); |
cc33ae43 | 2027 | } else { |
bf28607f DD |
2028 | uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); |
2029 | uasm_i_beqz(&p, wr.r3, 8); | |
cc33ae43 | 2030 | } |
bf28607f DD |
2031 | /* load it in the delay slot*/ |
2032 | UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); | |
2033 | /* load it if ptr is odd */ | |
2034 | UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); | |
6dd9344c | 2035 | /* |
bf28607f | 2036 | * If the entryLo (now in wr.r3) is valid (bit 1), RI or |
6dd9344c DD |
2037 | * XI must have triggered it. |
2038 | */ | |
cc33ae43 | 2039 | if (use_bbit_insns()) { |
bf28607f | 2040 | uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2); |
cc33ae43 | 2041 | } else { |
bf28607f DD |
2042 | uasm_i_andi(&p, wr.r3, wr.r3, 2); |
2043 | uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); | |
cc33ae43 | 2044 | } |
0f4ccbc8 DD |
2045 | if (PM_DEFAULT_MASK == 0) |
2046 | uasm_i_nop(&p); | |
6dd9344c DD |
2047 | /* |
2048 | * We clobbered C0_PAGEMASK, restore it. On the other branch | |
2049 | * it is restored in build_huge_tlb_write_entry. | |
2050 | */ | |
bf28607f | 2051 | build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0); |
6dd9344c DD |
2052 | |
2053 | uasm_l_tlbl_goaround2(&l, p); | |
2054 | } | |
bf28607f DD |
2055 | uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); |
2056 | build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); | |
fd062c84 DD |
2057 | #endif |
2058 | ||
e30ec452 | 2059 | uasm_l_nopage_tlbl(&l, p); |
bf28607f | 2060 | build_restore_work_registers(&p); |
e30ec452 TS |
2061 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); |
2062 | uasm_i_nop(&p); | |
1da177e4 LT |
2063 | |
2064 | if ((p - handle_tlbl) > FASTPATH_SIZE) | |
2065 | panic("TLB load handler fastpath space exceeded"); | |
2066 | ||
e30ec452 TS |
2067 | uasm_resolve_relocs(relocs, labels); |
2068 | pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", | |
2069 | (unsigned int)(p - handle_tlbl)); | |
1da177e4 | 2070 | |
a2c763e0 | 2071 | dump_handler("r4000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl)); |
1da177e4 LT |
2072 | } |
2073 | ||
234fcd14 | 2074 | static void __cpuinit build_r4000_tlb_store_handler(void) |
1da177e4 LT |
2075 | { |
2076 | u32 *p = handle_tlbs; | |
e30ec452 TS |
2077 | struct uasm_label *l = labels; |
2078 | struct uasm_reloc *r = relocs; | |
bf28607f | 2079 | struct work_registers wr; |
1da177e4 LT |
2080 | |
2081 | memset(handle_tlbs, 0, sizeof(handle_tlbs)); | |
2082 | memset(labels, 0, sizeof(labels)); | |
2083 | memset(relocs, 0, sizeof(relocs)); | |
2084 | ||
bf28607f DD |
2085 | wr = build_r4000_tlbchange_handler_head(&p, &l, &r); |
2086 | build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); | |
8df5beac MR |
2087 | if (m4kc_tlbp_war()) |
2088 | build_tlb_probe_entry(&p); | |
bf28607f DD |
2089 | build_make_write(&p, &r, wr.r1, wr.r2); |
2090 | build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); | |
1da177e4 | 2091 | |
aa1762f4 | 2092 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
fd062c84 DD |
2093 | /* |
2094 | * This is the entry point when | |
2095 | * build_r4000_tlbchange_handler_head spots a huge page. | |
2096 | */ | |
2097 | uasm_l_tlb_huge_update(&l, p); | |
bf28607f DD |
2098 | iPTE_LW(&p, wr.r1, wr.r2); |
2099 | build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); | |
fd062c84 | 2100 | build_tlb_probe_entry(&p); |
bf28607f | 2101 | uasm_i_ori(&p, wr.r1, wr.r1, |
fd062c84 | 2102 | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); |
bf28607f | 2103 | build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); |
fd062c84 DD |
2104 | #endif |
2105 | ||
e30ec452 | 2106 | uasm_l_nopage_tlbs(&l, p); |
bf28607f | 2107 | build_restore_work_registers(&p); |
e30ec452 TS |
2108 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); |
2109 | uasm_i_nop(&p); | |
1da177e4 LT |
2110 | |
2111 | if ((p - handle_tlbs) > FASTPATH_SIZE) | |
2112 | panic("TLB store handler fastpath space exceeded"); | |
2113 | ||
e30ec452 TS |
2114 | uasm_resolve_relocs(relocs, labels); |
2115 | pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", | |
2116 | (unsigned int)(p - handle_tlbs)); | |
1da177e4 | 2117 | |
a2c763e0 | 2118 | dump_handler("r4000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs)); |
1da177e4 LT |
2119 | } |
2120 | ||
234fcd14 | 2121 | static void __cpuinit build_r4000_tlb_modify_handler(void) |
1da177e4 LT |
2122 | { |
2123 | u32 *p = handle_tlbm; | |
e30ec452 TS |
2124 | struct uasm_label *l = labels; |
2125 | struct uasm_reloc *r = relocs; | |
bf28607f | 2126 | struct work_registers wr; |
1da177e4 LT |
2127 | |
2128 | memset(handle_tlbm, 0, sizeof(handle_tlbm)); | |
2129 | memset(labels, 0, sizeof(labels)); | |
2130 | memset(relocs, 0, sizeof(relocs)); | |
2131 | ||
bf28607f DD |
2132 | wr = build_r4000_tlbchange_handler_head(&p, &l, &r); |
2133 | build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); | |
8df5beac MR |
2134 | if (m4kc_tlbp_war()) |
2135 | build_tlb_probe_entry(&p); | |
1da177e4 | 2136 | /* Present and writable bits set, set accessed and dirty bits. */ |
bf28607f DD |
2137 | build_make_write(&p, &r, wr.r1, wr.r2); |
2138 | build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); | |
1da177e4 | 2139 | |
aa1762f4 | 2140 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
fd062c84 DD |
2141 | /* |
2142 | * This is the entry point when | |
2143 | * build_r4000_tlbchange_handler_head spots a huge page. | |
2144 | */ | |
2145 | uasm_l_tlb_huge_update(&l, p); | |
bf28607f DD |
2146 | iPTE_LW(&p, wr.r1, wr.r2); |
2147 | build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); | |
fd062c84 | 2148 | build_tlb_probe_entry(&p); |
bf28607f | 2149 | uasm_i_ori(&p, wr.r1, wr.r1, |
fd062c84 | 2150 | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); |
bf28607f | 2151 | build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); |
fd062c84 DD |
2152 | #endif |
2153 | ||
e30ec452 | 2154 | uasm_l_nopage_tlbm(&l, p); |
bf28607f | 2155 | build_restore_work_registers(&p); |
e30ec452 TS |
2156 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); |
2157 | uasm_i_nop(&p); | |
1da177e4 LT |
2158 | |
2159 | if ((p - handle_tlbm) > FASTPATH_SIZE) | |
2160 | panic("TLB modify handler fastpath space exceeded"); | |
2161 | ||
e30ec452 TS |
2162 | uasm_resolve_relocs(relocs, labels); |
2163 | pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", | |
2164 | (unsigned int)(p - handle_tlbm)); | |
115f2a44 | 2165 | |
a2c763e0 | 2166 | dump_handler("r4000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm)); |
1da177e4 LT |
2167 | } |
2168 | ||
234fcd14 | 2169 | void __cpuinit build_tlb_refill_handler(void) |
1da177e4 LT |
2170 | { |
2171 | /* | |
2172 | * The refill handler is generated per-CPU, multi-node systems | |
2173 | * may have local storage for it. The other handlers are only | |
2174 | * needed once. | |
2175 | */ | |
2176 | static int run_once = 0; | |
2177 | ||
a2c763e0 RB |
2178 | output_pgtable_bits_defines(); |
2179 | ||
1ec56329 DD |
2180 | #ifdef CONFIG_64BIT |
2181 | check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); | |
2182 | #endif | |
2183 | ||
10cc3529 | 2184 | switch (current_cpu_type()) { |
1da177e4 LT |
2185 | case CPU_R2000: |
2186 | case CPU_R3000: | |
2187 | case CPU_R3000A: | |
2188 | case CPU_R3081E: | |
2189 | case CPU_TX3912: | |
2190 | case CPU_TX3922: | |
2191 | case CPU_TX3927: | |
82622284 | 2192 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
1da177e4 LT |
2193 | build_r3000_tlb_refill_handler(); |
2194 | if (!run_once) { | |
2195 | build_r3000_tlb_load_handler(); | |
2196 | build_r3000_tlb_store_handler(); | |
2197 | build_r3000_tlb_modify_handler(); | |
2198 | run_once++; | |
2199 | } | |
82622284 DD |
2200 | #else |
2201 | panic("No R3000 TLB refill handler"); | |
2202 | #endif | |
1da177e4 LT |
2203 | break; |
2204 | ||
2205 | case CPU_R6000: | |
2206 | case CPU_R6000A: | |
2207 | panic("No R6000 TLB refill handler yet"); | |
2208 | break; | |
2209 | ||
2210 | case CPU_R8000: | |
2211 | panic("No R8000 TLB refill handler yet"); | |
2212 | break; | |
2213 | ||
2214 | default: | |
1da177e4 | 2215 | if (!run_once) { |
bf28607f | 2216 | scratch_reg = allocate_kscratch(); |
3d8bfdd0 DD |
2217 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT |
2218 | build_r4000_setup_pgd(); | |
2219 | #endif | |
1da177e4 LT |
2220 | build_r4000_tlb_load_handler(); |
2221 | build_r4000_tlb_store_handler(); | |
2222 | build_r4000_tlb_modify_handler(); | |
2223 | run_once++; | |
2224 | } | |
3d8bfdd0 | 2225 | build_r4000_tlb_refill_handler(); |
1da177e4 LT |
2226 | } |
2227 | } | |
1d40cfcd | 2228 | |
234fcd14 | 2229 | void __cpuinit flush_tlb_handlers(void) |
1d40cfcd | 2230 | { |
e0cee3ee | 2231 | local_flush_icache_range((unsigned long)handle_tlbl, |
1d40cfcd | 2232 | (unsigned long)handle_tlbl + sizeof(handle_tlbl)); |
e0cee3ee | 2233 | local_flush_icache_range((unsigned long)handle_tlbs, |
1d40cfcd | 2234 | (unsigned long)handle_tlbs + sizeof(handle_tlbs)); |
e0cee3ee | 2235 | local_flush_icache_range((unsigned long)handle_tlbm, |
1d40cfcd | 2236 | (unsigned long)handle_tlbm + sizeof(handle_tlbm)); |
3d8bfdd0 DD |
2237 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT |
2238 | local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd, | |
2239 | (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm)); | |
2240 | #endif | |
1d40cfcd | 2241 | } |