Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle | |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | |
619b6e18 | 8 | * Copyright (C) 2002, 2007 Maciej W. Rozycki |
2a0b24f5 | 9 | * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. |
1da177e4 | 10 | */ |
1da177e4 LT |
11 | #include <linux/init.h> |
12 | ||
13 | #include <asm/asm.h> | |
41c594ab | 14 | #include <asm/asmmacro.h> |
1da177e4 | 15 | #include <asm/cacheops.h> |
192ef366 | 16 | #include <asm/irqflags.h> |
1da177e4 LT |
17 | #include <asm/regdef.h> |
18 | #include <asm/fpregdef.h> | |
19 | #include <asm/mipsregs.h> | |
20 | #include <asm/stackframe.h> | |
21 | #include <asm/war.h> | |
c65a5480 | 22 | #include <asm/thread_info.h> |
1da177e4 | 23 | |
2a0b24f5 | 24 | #ifdef CONFIG_MIPS_MT_SMTC |
1da177e4 | 25 | #define PANIC_PIC(msg) \ |
2a0b24f5 SH |
26 | .set push; \ |
27 | .set nomicromips; \ | |
1da177e4 LT |
28 | .set reorder; \ |
29 | PTR_LA a0,8f; \ | |
30 | .set noat; \ | |
31 | PTR_LA AT, panic; \ | |
32 | jr AT; \ | |
33 | 9: b 9b; \ | |
34 | .set pop; \ | |
35 | TEXT(msg) | |
2a0b24f5 | 36 | #endif |
1da177e4 LT |
37 | |
38 | __INIT | |
39 | ||
1da177e4 LT |
40 | /* |
41 | * General exception vector for all other CPUs. | |
42 | * | |
43 | * Be careful when changing this, it has to be at most 128 bytes | |
44 | * to fit into space reserved for the exception handler. | |
45 | */ | |
46 | NESTED(except_vec3_generic, 0, sp) | |
47 | .set push | |
48 | .set noat | |
49 | #if R5432_CP0_INTERRUPT_WAR | |
50 | mfc0 k0, CP0_INDEX | |
51 | #endif | |
52 | mfc0 k1, CP0_CAUSE | |
53 | andi k1, k1, 0x7c | |
875d43e7 | 54 | #ifdef CONFIG_64BIT |
1da177e4 LT |
55 | dsll k1, k1, 1 |
56 | #endif | |
57 | PTR_L k0, exception_handlers(k1) | |
58 | jr k0 | |
59 | .set pop | |
60 | END(except_vec3_generic) | |
61 | ||
62 | /* | |
63 | * General exception handler for CPUs with virtual coherency exception. | |
64 | * | |
65 | * Be careful when changing this, it has to be at most 256 (as a special | |
66 | * exception) bytes to fit into space reserved for the exception handler. | |
67 | */ | |
68 | NESTED(except_vec3_r4000, 0, sp) | |
69 | .set push | |
a809d460 | 70 | .set arch=r4000 |
1da177e4 LT |
71 | .set noat |
72 | mfc0 k1, CP0_CAUSE | |
73 | li k0, 31<<2 | |
74 | andi k1, k1, 0x7c | |
75 | .set push | |
76 | .set noreorder | |
77 | .set nomacro | |
78 | beq k1, k0, handle_vced | |
79 | li k0, 14<<2 | |
80 | beq k1, k0, handle_vcei | |
875d43e7 | 81 | #ifdef CONFIG_64BIT |
69903d65 | 82 | dsll k1, k1, 1 |
1da177e4 LT |
83 | #endif |
84 | .set pop | |
85 | PTR_L k0, exception_handlers(k1) | |
86 | jr k0 | |
87 | ||
88 | /* | |
89 | * Big shit, we now may have two dirty primary cache lines for the same | |
69903d65 | 90 | * physical address. We can safely invalidate the line pointed to by |
1da177e4 LT |
91 | * c0_badvaddr because after return from this exception handler the |
92 | * load / store will be re-executed. | |
93 | */ | |
94 | handle_vced: | |
69903d65 | 95 | MFC0 k0, CP0_BADVADDR |
1da177e4 LT |
96 | li k1, -4 # Is this ... |
97 | and k0, k1 # ... really needed? | |
98 | mtc0 zero, CP0_TAGLO | |
69903d65 TS |
99 | cache Index_Store_Tag_D, (k0) |
100 | cache Hit_Writeback_Inv_SD, (k0) | |
1da177e4 LT |
101 | #ifdef CONFIG_PROC_FS |
102 | PTR_LA k0, vced_count | |
103 | lw k1, (k0) | |
104 | addiu k1, 1 | |
105 | sw k1, (k0) | |
106 | #endif | |
107 | eret | |
108 | ||
109 | handle_vcei: | |
110 | MFC0 k0, CP0_BADVADDR | |
111 | cache Hit_Writeback_Inv_SD, (k0) # also cleans pi | |
112 | #ifdef CONFIG_PROC_FS | |
113 | PTR_LA k0, vcei_count | |
114 | lw k1, (k0) | |
115 | addiu k1, 1 | |
116 | sw k1, (k0) | |
117 | #endif | |
118 | eret | |
119 | .set pop | |
120 | END(except_vec3_r4000) | |
121 | ||
e4ac58af RB |
122 | __FINIT |
123 | ||
c65a5480 | 124 | .align 5 /* 32 byte rollback region */ |
087d990b | 125 | LEAF(__r4k_wait) |
c65a5480 AN |
126 | .set push |
127 | .set noreorder | |
128 | /* start of rollback region */ | |
129 | LONG_L t0, TI_FLAGS($28) | |
130 | nop | |
131 | andi t0, _TIF_NEED_RESCHED | |
132 | bnez t0, 1f | |
133 | nop | |
134 | nop | |
135 | nop | |
2a0b24f5 SH |
136 | #ifdef CONFIG_CPU_MICROMIPS |
137 | nop | |
138 | nop | |
139 | nop | |
140 | nop | |
141 | #endif | |
a809d460 | 142 | .set arch=r4000 |
c65a5480 AN |
143 | wait |
144 | /* end of rollback region (the region size must be power of two) */ | |
c65a5480 AN |
145 | 1: |
146 | jr ra | |
2a0b24f5 SH |
147 | nop |
148 | .set pop | |
087d990b | 149 | END(__r4k_wait) |
c65a5480 AN |
150 | |
151 | .macro BUILD_ROLLBACK_PROLOGUE handler | |
152 | FEXPORT(rollback_\handler) | |
153 | .set push | |
154 | .set noat | |
155 | MFC0 k0, CP0_EPC | |
087d990b | 156 | PTR_LA k1, __r4k_wait |
c65a5480 AN |
157 | ori k0, 0x1f /* 32 byte rollback region */ |
158 | xori k0, 0x1f | |
159 | bne k0, k1, 9f | |
160 | MTC0 k0, CP0_EPC | |
161 | 9: | |
162 | .set pop | |
163 | .endm | |
164 | ||
70342287 | 165 | .align 5 |
c65a5480 | 166 | BUILD_ROLLBACK_PROLOGUE handle_int |
e4ac58af | 167 | NESTED(handle_int, PT_SIZE, sp) |
fe99f1b1 CD |
168 | #ifdef CONFIG_TRACE_IRQFLAGS |
169 | /* | |
170 | * Check to see if the interrupted code has just disabled | |
171 | * interrupts and ignore this interrupt for now if so. | |
172 | * | |
173 | * local_irq_disable() disables interrupts and then calls | |
174 | * trace_hardirqs_off() to track the state. If an interrupt is taken | |
175 | * after interrupts are disabled but before the state is updated | |
176 | * it will appear to restore_all that it is incorrectly returning with | |
177 | * interrupts disabled | |
178 | */ | |
179 | .set push | |
180 | .set noat | |
181 | mfc0 k0, CP0_STATUS | |
182 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | |
183 | and k0, ST0_IEP | |
184 | bnez k0, 1f | |
185 | ||
c6563e85 | 186 | mfc0 k0, CP0_EPC |
fe99f1b1 CD |
187 | .set noreorder |
188 | j k0 | |
189 | rfe | |
190 | #else | |
191 | and k0, ST0_IE | |
192 | bnez k0, 1f | |
193 | ||
194 | eret | |
195 | #endif | |
196 | 1: | |
197 | .set pop | |
198 | #endif | |
e4ac58af RB |
199 | SAVE_ALL |
200 | CLI | |
192ef366 | 201 | TRACE_IRQS_OFF |
e4ac58af | 202 | |
937a8015 RB |
203 | LONG_L s0, TI_REGS($28) |
204 | LONG_S sp, TI_REGS($28) | |
f431baa5 | 205 | PTR_LA ra, ret_from_irq |
2a0b24f5 SH |
206 | PTR_LA v0, plat_irq_dispatch |
207 | jr v0 | |
208 | #ifdef CONFIG_CPU_MICROMIPS | |
209 | nop | |
210 | #endif | |
e4ac58af RB |
211 | END(handle_int) |
212 | ||
213 | __INIT | |
214 | ||
1da177e4 LT |
215 | /* |
216 | * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. | |
217 | * This is a dedicated interrupt exception vector which reduces the | |
218 | * interrupt processing overhead. The jump instruction will be replaced | |
219 | * at the initialization time. | |
220 | * | |
221 | * Be careful when changing this, it has to be at most 128 bytes | |
222 | * to fit into space reserved for the exception handler. | |
223 | */ | |
224 | NESTED(except_vec4, 0, sp) | |
225 | 1: j 1b /* Dummy, will be replaced */ | |
226 | END(except_vec4) | |
227 | ||
228 | /* | |
229 | * EJTAG debug exception handler. | |
230 | * The EJTAG debug exception entry point is 0xbfc00480, which | |
2a0b24f5 | 231 | * normally is in the boot PROM, so the boot PROM must do an |
1da177e4 LT |
232 | * unconditional jump to this vector. |
233 | */ | |
234 | NESTED(except_vec_ejtag_debug, 0, sp) | |
235 | j ejtag_debug_handler | |
2a0b24f5 SH |
236 | #ifdef CONFIG_CPU_MICROMIPS |
237 | nop | |
238 | #endif | |
1da177e4 LT |
239 | END(except_vec_ejtag_debug) |
240 | ||
241 | __FINIT | |
242 | ||
e01402b1 RB |
243 | /* |
244 | * Vectored interrupt handler. | |
245 | * This prototype is copied to ebase + n*IntCtl.VS and patched | |
246 | * to invoke the handler | |
247 | */ | |
c65a5480 | 248 | BUILD_ROLLBACK_PROLOGUE except_vec_vi |
e01402b1 RB |
249 | NESTED(except_vec_vi, 0, sp) |
250 | SAVE_SOME | |
251 | SAVE_AT | |
252 | .set push | |
253 | .set noreorder | |
41c594ab RB |
254 | #ifdef CONFIG_MIPS_MT_SMTC |
255 | /* | |
256 | * To keep from blindly blocking *all* interrupts | |
257 | * during service by SMTC kernel, we also want to | |
258 | * pass the IM value to be cleared. | |
259 | */ | |
7df42461 | 260 | FEXPORT(except_vec_vi_mori) |
41c594ab RB |
261 | ori a0, $0, 0 |
262 | #endif /* CONFIG_MIPS_MT_SMTC */ | |
2a0b24f5 | 263 | PTR_LA v1, except_vec_vi_handler |
7df42461 | 264 | FEXPORT(except_vec_vi_lui) |
e01402b1 | 265 | lui v0, 0 /* Patched */ |
2a0b24f5 | 266 | jr v1 |
7df42461 | 267 | FEXPORT(except_vec_vi_ori) |
e01402b1 RB |
268 | ori v0, 0 /* Patched */ |
269 | .set pop | |
270 | END(except_vec_vi) | |
271 | EXPORT(except_vec_vi_end) | |
272 | ||
273 | /* | |
274 | * Common Vectored Interrupt code | |
275 | * Complete the register saves and invoke the handler which is passed in $v0 | |
276 | */ | |
277 | NESTED(except_vec_vi_handler, 0, sp) | |
278 | SAVE_TEMP | |
279 | SAVE_STATIC | |
41c594ab RB |
280 | #ifdef CONFIG_MIPS_MT_SMTC |
281 | /* | |
282 | * SMTC has an interesting problem that interrupts are level-triggered, | |
283 | * and the CLI macro will clear EXL, potentially causing a duplicate | |
284 | * interrupt service invocation. So we need to clear the associated | |
285 | * IM bit of Status prior to doing CLI, and restore it after the | |
286 | * service routine has been invoked - we must assume that the | |
287 | * service routine will have cleared the state, and any active | |
288 | * level represents a new or otherwised unserviced event... | |
289 | */ | |
290 | mfc0 t1, CP0_STATUS | |
291 | and t0, a0, t1 | |
0db34215 | 292 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
41c594ab | 293 | mfc0 t2, CP0_TCCONTEXT |
8531a35e KK |
294 | or t2, t0, t2 |
295 | mtc0 t2, CP0_TCCONTEXT | |
0db34215 | 296 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ |
41c594ab RB |
297 | xor t1, t1, t0 |
298 | mtc0 t1, CP0_STATUS | |
4277ff5e | 299 | _ehb |
41c594ab | 300 | #endif /* CONFIG_MIPS_MT_SMTC */ |
e01402b1 | 301 | CLI |
8c364435 RB |
302 | #ifdef CONFIG_TRACE_IRQFLAGS |
303 | move s0, v0 | |
304 | #ifdef CONFIG_MIPS_MT_SMTC | |
305 | move s1, a0 | |
306 | #endif | |
192ef366 | 307 | TRACE_IRQS_OFF |
8c364435 RB |
308 | #ifdef CONFIG_MIPS_MT_SMTC |
309 | move a0, s1 | |
310 | #endif | |
311 | move v0, s0 | |
312 | #endif | |
937a8015 RB |
313 | |
314 | LONG_L s0, TI_REGS($28) | |
315 | LONG_S sp, TI_REGS($28) | |
23126692 | 316 | PTR_LA ra, ret_from_irq |
f431baa5 | 317 | jr v0 |
e01402b1 RB |
318 | END(except_vec_vi_handler) |
319 | ||
1da177e4 LT |
320 | /* |
321 | * EJTAG debug exception handler. | |
322 | */ | |
323 | NESTED(ejtag_debug_handler, PT_SIZE, sp) | |
324 | .set push | |
325 | .set noat | |
326 | MTC0 k0, CP0_DESAVE | |
327 | mfc0 k0, CP0_DEBUG | |
328 | ||
329 | sll k0, k0, 30 # Check for SDBBP. | |
330 | bgez k0, ejtag_return | |
331 | ||
332 | PTR_LA k0, ejtag_debug_buffer | |
333 | LONG_S k1, 0(k0) | |
334 | SAVE_ALL | |
335 | move a0, sp | |
336 | jal ejtag_exception_handler | |
337 | RESTORE_ALL | |
338 | PTR_LA k0, ejtag_debug_buffer | |
339 | LONG_L k1, 0(k0) | |
340 | ||
341 | ejtag_return: | |
342 | MFC0 k0, CP0_DESAVE | |
343 | .set mips32 | |
344 | deret | |
345 | .set pop | |
346 | END(ejtag_debug_handler) | |
347 | ||
348 | /* | |
349 | * This buffer is reserved for the use of the EJTAG debug | |
350 | * handler. | |
351 | */ | |
352 | .data | |
353 | EXPORT(ejtag_debug_buffer) | |
354 | .fill LONGSIZE | |
355 | .previous | |
356 | ||
357 | __INIT | |
358 | ||
359 | /* | |
360 | * NMI debug exception handler for MIPS reference boards. | |
361 | * The NMI debug exception entry point is 0xbfc00000, which | |
362 | * normally is in the boot PROM, so the boot PROM must do a | |
363 | * unconditional jump to this vector. | |
364 | */ | |
365 | NESTED(except_vec_nmi, 0, sp) | |
366 | j nmi_handler | |
2a0b24f5 SH |
367 | #ifdef CONFIG_CPU_MICROMIPS |
368 | nop | |
369 | #endif | |
1da177e4 LT |
370 | END(except_vec_nmi) |
371 | ||
372 | __FINIT | |
373 | ||
374 | NESTED(nmi_handler, PT_SIZE, sp) | |
375 | .set push | |
376 | .set noat | |
83e4da1e LY |
377 | /* |
378 | * Clear ERL - restore segment mapping | |
379 | * Clear BEV - required for page fault exception handler to work | |
380 | */ | |
381 | mfc0 k0, CP0_STATUS | |
382 | ori k0, k0, ST0_EXL | |
383 | li k1, ~(ST0_BEV | ST0_ERL) | |
384 | and k0, k0, k1 | |
385 | mtc0 k0, CP0_STATUS | |
386 | _ehb | |
1da177e4 | 387 | SAVE_ALL |
70342287 | 388 | move a0, sp |
1da177e4 | 389 | jal nmi_exception_handler |
83e4da1e | 390 | /* nmi_exception_handler never returns */ |
1da177e4 LT |
391 | .set pop |
392 | END(nmi_handler) | |
393 | ||
394 | .macro __build_clear_none | |
395 | .endm | |
396 | ||
397 | .macro __build_clear_sti | |
192ef366 | 398 | TRACE_IRQS_ON |
1da177e4 LT |
399 | STI |
400 | .endm | |
401 | ||
402 | .macro __build_clear_cli | |
403 | CLI | |
192ef366 | 404 | TRACE_IRQS_OFF |
1da177e4 LT |
405 | .endm |
406 | ||
407 | .macro __build_clear_fpe | |
25c30003 DD |
408 | .set push |
409 | /* gas fails to assemble cfc1 for some archs (octeon).*/ \ | |
410 | .set mips1 | |
1da177e4 LT |
411 | cfc1 a1, fcr31 |
412 | li a2, ~(0x3f << 12) | |
413 | and a2, a1 | |
414 | ctc1 a2, fcr31 | |
25c30003 | 415 | .set pop |
192ef366 | 416 | TRACE_IRQS_ON |
1da177e4 LT |
417 | STI |
418 | .endm | |
419 | ||
420 | .macro __build_clear_ade | |
421 | MFC0 t0, CP0_BADVADDR | |
422 | PTR_S t0, PT_BVADDR(sp) | |
423 | KMODE | |
424 | .endm | |
425 | ||
426 | .macro __BUILD_silent exception | |
427 | .endm | |
428 | ||
429 | /* Gas tries to parse the PRINT argument as a string containing | |
430 | string escapes and emits bogus warnings if it believes to | |
431 | recognize an unknown escape code. So make the arguments | |
432 | start with an n and gas will believe \n is ok ... */ | |
70342287 | 433 | .macro __BUILD_verbose nexception |
1da177e4 | 434 | LONG_L a1, PT_EPC(sp) |
766160c2 | 435 | #ifdef CONFIG_32BIT |
1da177e4 | 436 | PRINT("Got \nexception at %08lx\012") |
42a3b4f2 | 437 | #endif |
766160c2 | 438 | #ifdef CONFIG_64BIT |
1da177e4 | 439 | PRINT("Got \nexception at %016lx\012") |
42a3b4f2 | 440 | #endif |
1da177e4 LT |
441 | .endm |
442 | ||
443 | .macro __BUILD_count exception | |
444 | LONG_L t0,exception_count_\exception | |
445 | LONG_ADDIU t0, 1 | |
446 | LONG_S t0,exception_count_\exception | |
447 | .comm exception_count\exception, 8, 8 | |
448 | .endm | |
449 | ||
450 | .macro __BUILD_HANDLER exception handler clear verbose ext | |
451 | .align 5 | |
452 | NESTED(handle_\exception, PT_SIZE, sp) | |
453 | .set noat | |
454 | SAVE_ALL | |
455 | FEXPORT(handle_\exception\ext) | |
456 | __BUILD_clear_\clear | |
457 | .set at | |
458 | __BUILD_\verbose \exception | |
459 | move a0, sp | |
23126692 AN |
460 | PTR_LA ra, ret_from_exception |
461 | j do_\handler | |
1da177e4 LT |
462 | END(handle_\exception) |
463 | .endm | |
464 | ||
465 | .macro BUILD_HANDLER exception handler clear verbose | |
70342287 | 466 | __BUILD_HANDLER \exception \handler \clear \verbose _int |
1da177e4 LT |
467 | .endm |
468 | ||
469 | BUILD_HANDLER adel ade ade silent /* #4 */ | |
470 | BUILD_HANDLER ades ade ade silent /* #5 */ | |
471 | BUILD_HANDLER ibe be cli silent /* #6 */ | |
472 | BUILD_HANDLER dbe be cli silent /* #7 */ | |
473 | BUILD_HANDLER bp bp sti silent /* #9 */ | |
474 | BUILD_HANDLER ri ri sti silent /* #10 */ | |
475 | BUILD_HANDLER cpu cpu sti silent /* #11 */ | |
476 | BUILD_HANDLER ov ov sti silent /* #12 */ | |
477 | BUILD_HANDLER tr tr sti silent /* #13 */ | |
2bcb3fbc | 478 | BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */ |
1da177e4 | 479 | BUILD_HANDLER fpe fpe fpe silent /* #15 */ |
75b5b5e0 | 480 | BUILD_HANDLER ftlb ftlb none silent /* #16 */ |
1db1af84 | 481 | BUILD_HANDLER msa msa sti silent /* #21 */ |
1da177e4 | 482 | BUILD_HANDLER mdmx mdmx sti silent /* #22 */ |
70342287 | 483 | #ifdef CONFIG_HARDWARE_WATCHPOINTS |
8bc6d05b DD |
484 | /* |
485 | * For watch, interrupts will be enabled after the watch | |
486 | * registers are read. | |
487 | */ | |
488 | BUILD_HANDLER watch watch cli silent /* #23 */ | |
b67b2b70 | 489 | #else |
1da177e4 | 490 | BUILD_HANDLER watch watch sti verbose /* #23 */ |
b67b2b70 | 491 | #endif |
1da177e4 | 492 | BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ |
e35a5e35 | 493 | BUILD_HANDLER mt mt sti silent /* #25 */ |
e50c0a8f | 494 | BUILD_HANDLER dsp dsp sti silent /* #26 */ |
1da177e4 LT |
495 | BUILD_HANDLER reserved reserved sti verbose /* others */ |
496 | ||
5b10496b AN |
497 | .align 5 |
498 | LEAF(handle_ri_rdhwr_vivt) | |
499 | #ifdef CONFIG_MIPS_MT_SMTC | |
500 | PANIC_PIC("handle_ri_rdhwr_vivt called") | |
501 | #else | |
502 | .set push | |
503 | .set noat | |
504 | .set noreorder | |
505 | /* check if TLB contains a entry for EPC */ | |
506 | MFC0 k1, CP0_ENTRYHI | |
48c4ac97 | 507 | andi k1, 0xff /* ASID_MASK */ |
5b10496b | 508 | MFC0 k0, CP0_EPC |
70342287 RB |
509 | PTR_SRL k0, _PAGE_SHIFT + 1 |
510 | PTR_SLL k0, _PAGE_SHIFT + 1 | |
5b10496b AN |
511 | or k1, k0 |
512 | MTC0 k1, CP0_ENTRYHI | |
513 | mtc0_tlbw_hazard | |
514 | tlbp | |
515 | tlb_probe_hazard | |
516 | mfc0 k1, CP0_INDEX | |
517 | .set pop | |
518 | bltz k1, handle_ri /* slow path */ | |
519 | /* fall thru */ | |
520 | #endif | |
521 | END(handle_ri_rdhwr_vivt) | |
522 | ||
523 | LEAF(handle_ri_rdhwr) | |
524 | .set push | |
525 | .set noat | |
526 | .set noreorder | |
2a0b24f5 SH |
527 | /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ |
528 | /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ | |
5b10496b | 529 | MFC0 k1, CP0_EPC |
2a0b24f5 SH |
530 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) |
531 | and k0, k1, 1 | |
532 | beqz k0, 1f | |
533 | xor k1, k0 | |
534 | lhu k0, (k1) | |
535 | lhu k1, 2(k1) | |
536 | ins k1, k0, 16, 16 | |
537 | lui k0, 0x007d | |
538 | b docheck | |
539 | ori k0, 0x6b3c | |
540 | 1: | |
541 | lui k0, 0x7c03 | |
542 | lw k1, (k1) | |
543 | ori k0, 0xe83b | |
544 | #else | |
545 | andi k0, k1, 1 | |
546 | bnez k0, handle_ri | |
547 | lui k0, 0x7c03 | |
548 | lw k1, (k1) | |
549 | ori k0, 0xe83b | |
550 | #endif | |
551 | .set reorder | |
552 | docheck: | |
5b10496b | 553 | bne k0, k1, handle_ri /* if not ours */ |
2a0b24f5 SH |
554 | |
555 | isrdhwr: | |
5b10496b AN |
556 | /* The insn is rdhwr. No need to check CAUSE.BD here. */ |
557 | get_saved_sp /* k1 := current_thread_info */ | |
558 | .set noreorder | |
559 | MFC0 k0, CP0_EPC | |
560 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | |
561 | ori k1, _THREAD_MASK | |
562 | xori k1, _THREAD_MASK | |
563 | LONG_L v1, TI_TP_VALUE(k1) | |
564 | LONG_ADDIU k0, 4 | |
565 | jr k0 | |
566 | rfe | |
567 | #else | |
619b6e18 | 568 | #ifndef CONFIG_CPU_DADDI_WORKAROUNDS |
5b10496b | 569 | LONG_ADDIU k0, 4 /* stall on $k0 */ |
619b6e18 MR |
570 | #else |
571 | .set at=v1 | |
572 | LONG_ADDIU k0, 4 | |
573 | .set noat | |
574 | #endif | |
5b10496b AN |
575 | MTC0 k0, CP0_EPC |
576 | /* I hope three instructions between MTC0 and ERET are enough... */ | |
577 | ori k1, _THREAD_MASK | |
578 | xori k1, _THREAD_MASK | |
579 | LONG_L v1, TI_TP_VALUE(k1) | |
a809d460 | 580 | .set arch=r4000 |
5b10496b AN |
581 | eret |
582 | .set mips0 | |
583 | #endif | |
584 | .set pop | |
585 | END(handle_ri_rdhwr) | |
586 | ||
875d43e7 | 587 | #ifdef CONFIG_64BIT |
1da177e4 LT |
588 | /* A temporary overflow handler used by check_daddi(). */ |
589 | ||
590 | __INIT | |
591 | ||
592 | BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ | |
593 | #endif |