Commit | Line | Data |
---|---|---|
0a808a31 | 1 | /* arch/sparc/kernel/entry.S: Sparc trap low-level entry points. |
1da177e4 | 2 | * |
0a808a31 | 3 | * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net) |
1da177e4 LT |
4 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) |
5 | * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) | |
6 | * Copyright (C) 1996-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | |
7 | * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au) | |
8 | */ | |
9 | ||
1da177e4 LT |
10 | #include <linux/errno.h> |
11 | ||
12 | #include <asm/head.h> | |
13 | #include <asm/asi.h> | |
14 | #include <asm/smp.h> | |
15 | #include <asm/kgdb.h> | |
16 | #include <asm/contregs.h> | |
17 | #include <asm/ptrace.h> | |
47003497 | 18 | #include <asm/asm-offsets.h> |
1da177e4 LT |
19 | #include <asm/psr.h> |
20 | #include <asm/vaddrs.h> | |
21 | #include <asm/memreg.h> | |
22 | #include <asm/page.h> | |
23 | #ifdef CONFIG_SUN4 | |
24 | #include <asm/pgtsun4.h> | |
25 | #else | |
26 | #include <asm/pgtsun4c.h> | |
27 | #endif | |
28 | #include <asm/winmacro.h> | |
29 | #include <asm/signal.h> | |
30 | #include <asm/obio.h> | |
31 | #include <asm/mxcc.h> | |
32 | #include <asm/thread_info.h> | |
33 | #include <asm/param.h> | |
59359ff8 | 34 | #include <asm/unistd.h> |
1da177e4 LT |
35 | |
36 | #include <asm/asmmacro.h> | |
37 | ||
38 | #define curptr g6 | |
39 | ||
1da177e4 LT |
40 | /* These are just handy. */ |
41 | #define _SV save %sp, -STACKFRAME_SZ, %sp | |
42 | #define _RS restore | |
43 | ||
44 | #define FLUSH_ALL_KERNEL_WINDOWS \ | |
45 | _SV; _SV; _SV; _SV; _SV; _SV; _SV; \ | |
46 | _RS; _RS; _RS; _RS; _RS; _RS; _RS; | |
47 | ||
48 | /* First, KGDB low level things. This is a rewrite | |
49 | * of the routines found in the sparc-stub.c asm() statement | |
50 | * from the gdb distribution. This is also dual-purpose | |
51 | * as a software trap for userlevel programs. | |
52 | */ | |
53 | .data | |
54 | .align 4 | |
55 | ||
56 | in_trap_handler: | |
57 | .word 0 | |
58 | ||
59 | .text | |
60 | .align 4 | |
61 | ||
62 | #if 0 /* kgdb is dropped from 2.5.33 */ | |
63 | ! This function is called when any SPARC trap (except window overflow or | |
64 | ! underflow) occurs. It makes sure that the invalid register window is still | |
65 | ! available before jumping into C code. It will also restore the world if you | |
66 | ! return from handle_exception. | |
67 | ||
68 | .globl trap_low | |
69 | trap_low: | |
70 | rd %wim, %l3 | |
71 | SAVE_ALL | |
72 | ||
73 | sethi %hi(in_trap_handler), %l4 | |
74 | ld [%lo(in_trap_handler) + %l4], %l5 | |
75 | inc %l5 | |
76 | st %l5, [%lo(in_trap_handler) + %l4] | |
77 | ||
78 | /* Make sure kgdb sees the same state we just saved. */ | |
79 | LOAD_PT_GLOBALS(sp) | |
80 | LOAD_PT_INS(sp) | |
81 | ld [%sp + STACKFRAME_SZ + PT_Y], %l4 | |
82 | ld [%sp + STACKFRAME_SZ + PT_WIM], %l3 | |
83 | ld [%sp + STACKFRAME_SZ + PT_PSR], %l0 | |
84 | ld [%sp + STACKFRAME_SZ + PT_PC], %l1 | |
85 | ld [%sp + STACKFRAME_SZ + PT_NPC], %l2 | |
86 | rd %tbr, %l5 /* Never changes... */ | |
87 | ||
88 | /* Make kgdb exception frame. */ | |
89 | sub %sp,(16+1+6+1+72)*4,%sp ! Make room for input & locals | |
90 | ! + hidden arg + arg spill | |
91 | ! + doubleword alignment | |
92 | ! + registers[72] local var | |
93 | SAVE_KGDB_GLOBALS(sp) | |
94 | SAVE_KGDB_INS(sp) | |
95 | SAVE_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2) | |
96 | ||
97 | /* We are increasing PIL, so two writes. */ | |
98 | or %l0, PSR_PIL, %l0 | |
99 | wr %l0, 0, %psr | |
100 | WRITE_PAUSE | |
101 | wr %l0, PSR_ET, %psr | |
102 | WRITE_PAUSE | |
103 | ||
104 | call handle_exception | |
105 | add %sp, STACKFRAME_SZ, %o0 ! Pass address of registers | |
106 | ||
107 | /* Load new kgdb register set. */ | |
108 | LOAD_KGDB_GLOBALS(sp) | |
109 | LOAD_KGDB_INS(sp) | |
110 | LOAD_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2) | |
111 | wr %l4, 0x0, %y | |
112 | ||
113 | sethi %hi(in_trap_handler), %l4 | |
114 | ld [%lo(in_trap_handler) + %l4], %l5 | |
115 | dec %l5 | |
116 | st %l5, [%lo(in_trap_handler) + %l4] | |
117 | ||
118 | add %sp,(16+1+6+1+72)*4,%sp ! Undo the kgdb trap frame. | |
119 | ||
120 | /* Now take what kgdb did and place it into the pt_regs | |
121 | * frame which SparcLinux RESTORE_ALL understands., | |
122 | */ | |
123 | STORE_PT_INS(sp) | |
124 | STORE_PT_GLOBALS(sp) | |
125 | STORE_PT_YREG(sp, g2) | |
126 | STORE_PT_PRIV(sp, l0, l1, l2) | |
127 | ||
128 | RESTORE_ALL | |
129 | #endif | |
130 | ||
0a808a31 | 131 | #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE) |
1da177e4 LT |
132 | .text |
133 | .align 4 | |
134 | .globl floppy_hardint | |
135 | floppy_hardint: | |
136 | /* | |
137 | * This code cannot touch registers %l0 %l1 and %l2 | |
138 | * because SAVE_ALL depends on their values. It depends | |
139 | * on %l3 also, but we regenerate it before a call. | |
140 | * Other registers are: | |
141 | * %l3 -- base address of fdc registers | |
142 | * %l4 -- pdma_vaddr | |
143 | * %l5 -- scratch for ld/st address | |
144 | * %l6 -- pdma_size | |
145 | * %l7 -- scratch [floppy byte, ld/st address, aux. data] | |
146 | */ | |
147 | ||
148 | /* Do we have work to do? */ | |
149 | sethi %hi(doing_pdma), %l7 | |
150 | ld [%l7 + %lo(doing_pdma)], %l7 | |
151 | cmp %l7, 0 | |
152 | be floppy_dosoftint | |
153 | nop | |
154 | ||
155 | /* Load fdc register base */ | |
156 | sethi %hi(fdc_status), %l3 | |
157 | ld [%l3 + %lo(fdc_status)], %l3 | |
158 | ||
159 | /* Setup register addresses */ | |
160 | sethi %hi(pdma_vaddr), %l5 ! transfer buffer | |
161 | ld [%l5 + %lo(pdma_vaddr)], %l4 | |
162 | sethi %hi(pdma_size), %l5 ! bytes to go | |
163 | ld [%l5 + %lo(pdma_size)], %l6 | |
164 | next_byte: | |
165 | ldub [%l3], %l7 | |
166 | ||
167 | andcc %l7, 0x80, %g0 ! Does fifo still have data | |
168 | bz floppy_fifo_emptied ! fifo has been emptied... | |
169 | andcc %l7, 0x20, %g0 ! in non-dma mode still? | |
170 | bz floppy_overrun ! nope, overrun | |
171 | andcc %l7, 0x40, %g0 ! 0=write 1=read | |
172 | bz floppy_write | |
173 | sub %l6, 0x1, %l6 | |
174 | ||
175 | /* Ok, actually read this byte */ | |
176 | ldub [%l3 + 1], %l7 | |
177 | orcc %g0, %l6, %g0 | |
178 | stb %l7, [%l4] | |
179 | bne next_byte | |
180 | add %l4, 0x1, %l4 | |
181 | ||
182 | b floppy_tdone | |
183 | nop | |
184 | ||
185 | floppy_write: | |
186 | /* Ok, actually write this byte */ | |
187 | ldub [%l4], %l7 | |
188 | orcc %g0, %l6, %g0 | |
189 | stb %l7, [%l3 + 1] | |
190 | bne next_byte | |
191 | add %l4, 0x1, %l4 | |
192 | ||
193 | /* fall through... */ | |
194 | floppy_tdone: | |
195 | sethi %hi(pdma_vaddr), %l5 | |
196 | st %l4, [%l5 + %lo(pdma_vaddr)] | |
197 | sethi %hi(pdma_size), %l5 | |
198 | st %l6, [%l5 + %lo(pdma_size)] | |
199 | /* Flip terminal count pin */ | |
200 | set auxio_register, %l7 | |
201 | ld [%l7], %l7 | |
202 | ||
203 | set sparc_cpu_model, %l5 | |
204 | ld [%l5], %l5 | |
205 | subcc %l5, 1, %g0 /* enum { sun4c = 1 }; */ | |
206 | be 1f | |
207 | ldub [%l7], %l5 | |
208 | ||
209 | or %l5, 0xc2, %l5 | |
210 | stb %l5, [%l7] | |
211 | andn %l5, 0x02, %l5 | |
212 | b 2f | |
213 | nop | |
214 | ||
215 | 1: | |
216 | or %l5, 0xf4, %l5 | |
217 | stb %l5, [%l7] | |
218 | andn %l5, 0x04, %l5 | |
219 | ||
220 | 2: | |
221 | /* Kill some time so the bits set */ | |
222 | WRITE_PAUSE | |
223 | WRITE_PAUSE | |
224 | ||
225 | stb %l5, [%l7] | |
226 | ||
227 | /* Prevent recursion */ | |
228 | sethi %hi(doing_pdma), %l7 | |
229 | b floppy_dosoftint | |
230 | st %g0, [%l7 + %lo(doing_pdma)] | |
231 | ||
232 | /* We emptied the FIFO, but we haven't read everything | |
233 | * as of yet. Store the current transfer address and | |
234 | * bytes left to read so we can continue when the next | |
235 | * fast IRQ comes in. | |
236 | */ | |
237 | floppy_fifo_emptied: | |
238 | sethi %hi(pdma_vaddr), %l5 | |
239 | st %l4, [%l5 + %lo(pdma_vaddr)] | |
240 | sethi %hi(pdma_size), %l7 | |
241 | st %l6, [%l7 + %lo(pdma_size)] | |
242 | ||
243 | /* Restore condition codes */ | |
244 | wr %l0, 0x0, %psr | |
245 | WRITE_PAUSE | |
246 | ||
247 | jmp %l1 | |
248 | rett %l2 | |
249 | ||
250 | floppy_overrun: | |
251 | sethi %hi(pdma_vaddr), %l5 | |
252 | st %l4, [%l5 + %lo(pdma_vaddr)] | |
253 | sethi %hi(pdma_size), %l5 | |
254 | st %l6, [%l5 + %lo(pdma_size)] | |
255 | /* Prevent recursion */ | |
256 | sethi %hi(doing_pdma), %l7 | |
257 | st %g0, [%l7 + %lo(doing_pdma)] | |
258 | ||
259 | /* fall through... */ | |
260 | floppy_dosoftint: | |
261 | rd %wim, %l3 | |
262 | SAVE_ALL | |
263 | ||
264 | /* Set all IRQs off. */ | |
265 | or %l0, PSR_PIL, %l4 | |
266 | wr %l4, 0x0, %psr | |
267 | WRITE_PAUSE | |
268 | wr %l4, PSR_ET, %psr | |
269 | WRITE_PAUSE | |
270 | ||
271 | mov 11, %o0 ! floppy irq level (unused anyway) | |
272 | mov %g0, %o1 ! devid is not used in fast interrupts | |
273 | call sparc_floppy_irq | |
274 | add %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs | |
275 | ||
276 | RESTORE_ALL | |
277 | ||
278 | #endif /* (CONFIG_BLK_DEV_FD) */ | |
279 | ||
280 | /* Bad trap handler */ | |
281 | .globl bad_trap_handler | |
282 | bad_trap_handler: | |
283 | SAVE_ALL | |
284 | ||
285 | wr %l0, PSR_ET, %psr | |
286 | WRITE_PAUSE | |
287 | ||
288 | add %sp, STACKFRAME_SZ, %o0 ! pt_regs | |
289 | call do_hw_interrupt | |
290 | mov %l7, %o1 ! trap number | |
291 | ||
292 | RESTORE_ALL | |
293 | ||
294 | /* For now all IRQ's not registered get sent here. handler_irq() will | |
295 | * see if a routine is registered to handle this interrupt and if not | |
296 | * it will say so on the console. | |
297 | */ | |
298 | ||
299 | .align 4 | |
300 | .globl real_irq_entry, patch_handler_irq | |
301 | real_irq_entry: | |
302 | SAVE_ALL | |
303 | ||
304 | #ifdef CONFIG_SMP | |
305 | .globl patchme_maybe_smp_msg | |
306 | ||
307 | cmp %l7, 12 | |
308 | patchme_maybe_smp_msg: | |
309 | bgu maybe_smp4m_msg | |
310 | nop | |
311 | #endif | |
312 | ||
313 | real_irq_continue: | |
314 | or %l0, PSR_PIL, %g2 | |
315 | wr %g2, 0x0, %psr | |
316 | WRITE_PAUSE | |
317 | wr %g2, PSR_ET, %psr | |
318 | WRITE_PAUSE | |
319 | mov %l7, %o0 ! irq level | |
320 | patch_handler_irq: | |
321 | call handler_irq | |
322 | add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr | |
323 | or %l0, PSR_PIL, %g2 ! restore PIL after handler_irq | |
324 | wr %g2, PSR_ET, %psr ! keep ET up | |
325 | WRITE_PAUSE | |
326 | ||
327 | RESTORE_ALL | |
328 | ||
329 | #ifdef CONFIG_SMP | |
330 | /* SMP per-cpu ticker interrupts are handled specially. */ | |
331 | smp4m_ticker: | |
332 | bne real_irq_continue+4 | |
333 | or %l0, PSR_PIL, %g2 | |
334 | wr %g2, 0x0, %psr | |
335 | WRITE_PAUSE | |
336 | wr %g2, PSR_ET, %psr | |
337 | WRITE_PAUSE | |
338 | call smp4m_percpu_timer_interrupt | |
339 | add %sp, STACKFRAME_SZ, %o0 | |
340 | wr %l0, PSR_ET, %psr | |
341 | WRITE_PAUSE | |
342 | RESTORE_ALL | |
343 | ||
344 | /* Here is where we check for possible SMP IPI passed to us | |
345 | * on some level other than 15 which is the NMI and only used | |
346 | * for cross calls. That has a separate entry point below. | |
347 | */ | |
348 | maybe_smp4m_msg: | |
349 | GET_PROCESSOR4M_ID(o3) | |
350 | set sun4m_interrupts, %l5 | |
351 | ld [%l5], %o5 | |
352 | sethi %hi(0x40000000), %o2 | |
353 | sll %o3, 12, %o3 | |
354 | ld [%o5 + %o3], %o1 | |
355 | andcc %o1, %o2, %g0 | |
356 | be,a smp4m_ticker | |
357 | cmp %l7, 14 | |
358 | st %o2, [%o5 + 0x4] | |
359 | WRITE_PAUSE | |
360 | ld [%o5], %g0 | |
361 | WRITE_PAUSE | |
362 | or %l0, PSR_PIL, %l4 | |
363 | wr %l4, 0x0, %psr | |
364 | WRITE_PAUSE | |
365 | wr %l4, PSR_ET, %psr | |
366 | WRITE_PAUSE | |
367 | call smp_reschedule_irq | |
368 | nop | |
369 | ||
370 | RESTORE_ALL | |
371 | ||
372 | .align 4 | |
373 | .globl linux_trap_ipi15_sun4m | |
374 | linux_trap_ipi15_sun4m: | |
375 | SAVE_ALL | |
376 | sethi %hi(0x80000000), %o2 | |
377 | GET_PROCESSOR4M_ID(o0) | |
378 | set sun4m_interrupts, %l5 | |
379 | ld [%l5], %o5 | |
380 | sll %o0, 12, %o0 | |
381 | add %o5, %o0, %o5 | |
382 | ld [%o5], %o3 | |
383 | andcc %o3, %o2, %g0 | |
384 | be 1f ! Must be an NMI async memory error | |
385 | st %o2, [%o5 + 4] | |
386 | WRITE_PAUSE | |
387 | ld [%o5], %g0 | |
388 | WRITE_PAUSE | |
389 | or %l0, PSR_PIL, %l4 | |
390 | wr %l4, 0x0, %psr | |
391 | WRITE_PAUSE | |
392 | wr %l4, PSR_ET, %psr | |
393 | WRITE_PAUSE | |
394 | call smp4m_cross_call_irq | |
395 | nop | |
396 | b ret_trap_lockless_ipi | |
397 | clr %l6 | |
398 | 1: | |
399 | /* NMI async memory error handling. */ | |
400 | sethi %hi(0x80000000), %l4 | |
401 | sethi %hi(0x4000), %o3 | |
402 | sub %o5, %o0, %o5 | |
403 | add %o5, %o3, %l5 | |
404 | st %l4, [%l5 + 0xc] | |
405 | WRITE_PAUSE | |
406 | ld [%l5], %g0 | |
407 | WRITE_PAUSE | |
408 | or %l0, PSR_PIL, %l4 | |
409 | wr %l4, 0x0, %psr | |
410 | WRITE_PAUSE | |
411 | wr %l4, PSR_ET, %psr | |
412 | WRITE_PAUSE | |
413 | call sun4m_nmi | |
414 | nop | |
415 | st %l4, [%l5 + 0x8] | |
416 | WRITE_PAUSE | |
417 | ld [%l5], %g0 | |
418 | WRITE_PAUSE | |
419 | RESTORE_ALL | |
420 | ||
421 | .globl smp4d_ticker | |
422 | /* SMP per-cpu ticker interrupts are handled specially. */ | |
423 | smp4d_ticker: | |
424 | SAVE_ALL | |
425 | or %l0, PSR_PIL, %g2 | |
426 | sethi %hi(CC_ICLR), %o0 | |
427 | sethi %hi(1 << 14), %o1 | |
428 | or %o0, %lo(CC_ICLR), %o0 | |
429 | stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 14 in MXCC's ICLR */ | |
430 | wr %g2, 0x0, %psr | |
431 | WRITE_PAUSE | |
432 | wr %g2, PSR_ET, %psr | |
433 | WRITE_PAUSE | |
434 | call smp4d_percpu_timer_interrupt | |
435 | add %sp, STACKFRAME_SZ, %o0 | |
436 | wr %l0, PSR_ET, %psr | |
437 | WRITE_PAUSE | |
438 | RESTORE_ALL | |
439 | ||
440 | .align 4 | |
441 | .globl linux_trap_ipi15_sun4d | |
442 | linux_trap_ipi15_sun4d: | |
443 | SAVE_ALL | |
444 | sethi %hi(CC_BASE), %o4 | |
445 | sethi %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2 | |
446 | or %o4, (CC_EREG - CC_BASE), %o0 | |
447 | ldda [%o0] ASI_M_MXCC, %o0 | |
448 | andcc %o0, %o2, %g0 | |
449 | bne 1f | |
450 | sethi %hi(BB_STAT2), %o2 | |
451 | lduba [%o2] ASI_M_CTL, %o2 | |
452 | andcc %o2, BB_STAT2_MASK, %g0 | |
453 | bne 2f | |
454 | or %o4, (CC_ICLR - CC_BASE), %o0 | |
455 | sethi %hi(1 << 15), %o1 | |
456 | stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 15 in MXCC's ICLR */ | |
457 | or %l0, PSR_PIL, %l4 | |
458 | wr %l4, 0x0, %psr | |
459 | WRITE_PAUSE | |
460 | wr %l4, PSR_ET, %psr | |
461 | WRITE_PAUSE | |
462 | call smp4d_cross_call_irq | |
463 | nop | |
464 | b ret_trap_lockless_ipi | |
465 | clr %l6 | |
466 | ||
467 | 1: /* MXCC error */ | |
468 | 2: /* BB error */ | |
469 | /* Disable PIL 15 */ | |
470 | set CC_IMSK, %l4 | |
471 | lduha [%l4] ASI_M_MXCC, %l5 | |
472 | sethi %hi(1 << 15), %l7 | |
473 | or %l5, %l7, %l5 | |
474 | stha %l5, [%l4] ASI_M_MXCC | |
475 | /* FIXME */ | |
476 | 1: b,a 1b | |
477 | ||
478 | #endif /* CONFIG_SMP */ | |
479 | ||
480 | /* This routine handles illegal instructions and privileged | |
481 | * instruction attempts from user code. | |
482 | */ | |
483 | .align 4 | |
484 | .globl bad_instruction | |
485 | bad_instruction: | |
486 | sethi %hi(0xc1f80000), %l4 | |
487 | ld [%l1], %l5 | |
488 | sethi %hi(0x81d80000), %l7 | |
489 | and %l5, %l4, %l5 | |
490 | cmp %l5, %l7 | |
491 | be 1f | |
492 | SAVE_ALL | |
493 | ||
494 | wr %l0, PSR_ET, %psr ! re-enable traps | |
495 | WRITE_PAUSE | |
496 | ||
497 | add %sp, STACKFRAME_SZ, %o0 | |
498 | mov %l1, %o1 | |
499 | mov %l2, %o2 | |
500 | call do_illegal_instruction | |
501 | mov %l0, %o3 | |
502 | ||
503 | RESTORE_ALL | |
504 | ||
505 | 1: /* unimplemented flush - just skip */ | |
506 | jmpl %l2, %g0 | |
507 | rett %l2 + 4 | |
508 | ||
509 | .align 4 | |
510 | .globl priv_instruction | |
511 | priv_instruction: | |
512 | SAVE_ALL | |
513 | ||
514 | wr %l0, PSR_ET, %psr | |
515 | WRITE_PAUSE | |
516 | ||
517 | add %sp, STACKFRAME_SZ, %o0 | |
518 | mov %l1, %o1 | |
519 | mov %l2, %o2 | |
520 | call do_priv_instruction | |
521 | mov %l0, %o3 | |
522 | ||
523 | RESTORE_ALL | |
524 | ||
525 | /* This routine handles unaligned data accesses. */ | |
526 | .align 4 | |
527 | .globl mna_handler | |
528 | mna_handler: | |
529 | andcc %l0, PSR_PS, %g0 | |
530 | be mna_fromuser | |
531 | nop | |
532 | ||
533 | SAVE_ALL | |
534 | ||
535 | wr %l0, PSR_ET, %psr | |
536 | WRITE_PAUSE | |
537 | ||
538 | ld [%l1], %o1 | |
539 | call kernel_unaligned_trap | |
540 | add %sp, STACKFRAME_SZ, %o0 | |
541 | ||
542 | RESTORE_ALL | |
543 | ||
544 | mna_fromuser: | |
545 | SAVE_ALL | |
546 | ||
547 | wr %l0, PSR_ET, %psr ! re-enable traps | |
548 | WRITE_PAUSE | |
549 | ||
550 | ld [%l1], %o1 | |
551 | call user_unaligned_trap | |
552 | add %sp, STACKFRAME_SZ, %o0 | |
553 | ||
554 | RESTORE_ALL | |
555 | ||
556 | /* This routine handles floating point disabled traps. */ | |
557 | .align 4 | |
558 | .globl fpd_trap_handler | |
559 | fpd_trap_handler: | |
560 | SAVE_ALL | |
561 | ||
562 | wr %l0, PSR_ET, %psr ! re-enable traps | |
563 | WRITE_PAUSE | |
564 | ||
565 | add %sp, STACKFRAME_SZ, %o0 | |
566 | mov %l1, %o1 | |
567 | mov %l2, %o2 | |
568 | call do_fpd_trap | |
569 | mov %l0, %o3 | |
570 | ||
571 | RESTORE_ALL | |
572 | ||
573 | /* This routine handles Floating Point Exceptions. */ | |
574 | .align 4 | |
575 | .globl fpe_trap_handler | |
576 | fpe_trap_handler: | |
577 | set fpsave_magic, %l5 | |
578 | cmp %l1, %l5 | |
579 | be 1f | |
580 | sethi %hi(fpsave), %l5 | |
581 | or %l5, %lo(fpsave), %l5 | |
582 | cmp %l1, %l5 | |
583 | bne 2f | |
584 | sethi %hi(fpsave_catch2), %l5 | |
585 | or %l5, %lo(fpsave_catch2), %l5 | |
586 | wr %l0, 0x0, %psr | |
587 | WRITE_PAUSE | |
588 | jmp %l5 | |
589 | rett %l5 + 4 | |
590 | 1: | |
591 | sethi %hi(fpsave_catch), %l5 | |
592 | or %l5, %lo(fpsave_catch), %l5 | |
593 | wr %l0, 0x0, %psr | |
594 | WRITE_PAUSE | |
595 | jmp %l5 | |
596 | rett %l5 + 4 | |
597 | ||
598 | 2: | |
599 | SAVE_ALL | |
600 | ||
601 | wr %l0, PSR_ET, %psr ! re-enable traps | |
602 | WRITE_PAUSE | |
603 | ||
604 | add %sp, STACKFRAME_SZ, %o0 | |
605 | mov %l1, %o1 | |
606 | mov %l2, %o2 | |
607 | call do_fpe_trap | |
608 | mov %l0, %o3 | |
609 | ||
610 | RESTORE_ALL | |
611 | ||
612 | /* This routine handles Tag Overflow Exceptions. */ | |
613 | .align 4 | |
614 | .globl do_tag_overflow | |
615 | do_tag_overflow: | |
616 | SAVE_ALL | |
617 | ||
618 | wr %l0, PSR_ET, %psr ! re-enable traps | |
619 | WRITE_PAUSE | |
620 | ||
621 | add %sp, STACKFRAME_SZ, %o0 | |
622 | mov %l1, %o1 | |
623 | mov %l2, %o2 | |
624 | call handle_tag_overflow | |
625 | mov %l0, %o3 | |
626 | ||
627 | RESTORE_ALL | |
628 | ||
629 | /* This routine handles Watchpoint Exceptions. */ | |
630 | .align 4 | |
631 | .globl do_watchpoint | |
632 | do_watchpoint: | |
633 | SAVE_ALL | |
634 | ||
635 | wr %l0, PSR_ET, %psr ! re-enable traps | |
636 | WRITE_PAUSE | |
637 | ||
638 | add %sp, STACKFRAME_SZ, %o0 | |
639 | mov %l1, %o1 | |
640 | mov %l2, %o2 | |
641 | call handle_watchpoint | |
642 | mov %l0, %o3 | |
643 | ||
644 | RESTORE_ALL | |
645 | ||
646 | /* This routine handles Register Access Exceptions. */ | |
647 | .align 4 | |
648 | .globl do_reg_access | |
649 | do_reg_access: | |
650 | SAVE_ALL | |
651 | ||
652 | wr %l0, PSR_ET, %psr ! re-enable traps | |
653 | WRITE_PAUSE | |
654 | ||
655 | add %sp, STACKFRAME_SZ, %o0 | |
656 | mov %l1, %o1 | |
657 | mov %l2, %o2 | |
658 | call handle_reg_access | |
659 | mov %l0, %o3 | |
660 | ||
661 | RESTORE_ALL | |
662 | ||
663 | /* This routine handles Co-Processor Disabled Exceptions. */ | |
664 | .align 4 | |
665 | .globl do_cp_disabled | |
666 | do_cp_disabled: | |
667 | SAVE_ALL | |
668 | ||
669 | wr %l0, PSR_ET, %psr ! re-enable traps | |
670 | WRITE_PAUSE | |
671 | ||
672 | add %sp, STACKFRAME_SZ, %o0 | |
673 | mov %l1, %o1 | |
674 | mov %l2, %o2 | |
675 | call handle_cp_disabled | |
676 | mov %l0, %o3 | |
677 | ||
678 | RESTORE_ALL | |
679 | ||
680 | /* This routine handles Co-Processor Exceptions. */ | |
681 | .align 4 | |
682 | .globl do_cp_exception | |
683 | do_cp_exception: | |
684 | SAVE_ALL | |
685 | ||
686 | wr %l0, PSR_ET, %psr ! re-enable traps | |
687 | WRITE_PAUSE | |
688 | ||
689 | add %sp, STACKFRAME_SZ, %o0 | |
690 | mov %l1, %o1 | |
691 | mov %l2, %o2 | |
692 | call handle_cp_exception | |
693 | mov %l0, %o3 | |
694 | ||
695 | RESTORE_ALL | |
696 | ||
697 | /* This routine handles Hardware Divide By Zero Exceptions. */ | |
698 | .align 4 | |
699 | .globl do_hw_divzero | |
700 | do_hw_divzero: | |
701 | SAVE_ALL | |
702 | ||
703 | wr %l0, PSR_ET, %psr ! re-enable traps | |
704 | WRITE_PAUSE | |
705 | ||
706 | add %sp, STACKFRAME_SZ, %o0 | |
707 | mov %l1, %o1 | |
708 | mov %l2, %o2 | |
709 | call handle_hw_divzero | |
710 | mov %l0, %o3 | |
711 | ||
712 | RESTORE_ALL | |
713 | ||
714 | .align 4 | |
715 | .globl do_flush_windows | |
716 | do_flush_windows: | |
717 | SAVE_ALL | |
718 | ||
719 | wr %l0, PSR_ET, %psr | |
720 | WRITE_PAUSE | |
721 | ||
722 | andcc %l0, PSR_PS, %g0 | |
723 | bne dfw_kernel | |
724 | nop | |
725 | ||
726 | call flush_user_windows | |
727 | nop | |
728 | ||
729 | /* Advance over the trap instruction. */ | |
730 | ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 | |
731 | add %l1, 0x4, %l2 | |
732 | st %l1, [%sp + STACKFRAME_SZ + PT_PC] | |
733 | st %l2, [%sp + STACKFRAME_SZ + PT_NPC] | |
734 | ||
735 | RESTORE_ALL | |
736 | ||
737 | .globl flush_patch_one | |
738 | ||
739 | /* We get these for debugging routines using __builtin_return_address() */ | |
740 | dfw_kernel: | |
741 | flush_patch_one: | |
742 | FLUSH_ALL_KERNEL_WINDOWS | |
743 | ||
744 | /* Advance over the trap instruction. */ | |
745 | ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 | |
746 | add %l1, 0x4, %l2 | |
747 | st %l1, [%sp + STACKFRAME_SZ + PT_PC] | |
748 | st %l2, [%sp + STACKFRAME_SZ + PT_NPC] | |
749 | ||
750 | RESTORE_ALL | |
751 | ||
752 | /* The getcc software trap. The user wants the condition codes from | |
753 | * the %psr in register %g1. | |
754 | */ | |
755 | ||
756 | .align 4 | |
757 | .globl getcc_trap_handler | |
758 | getcc_trap_handler: | |
759 | srl %l0, 20, %g1 ! give user | |
760 | and %g1, 0xf, %g1 ! only ICC bits in %psr | |
761 | jmp %l2 ! advance over trap instruction | |
762 | rett %l2 + 0x4 ! like this... | |
763 | ||
764 | /* The setcc software trap. The user has condition codes in %g1 | |
765 | * that it would like placed in the %psr. Be careful not to flip | |
766 | * any unintentional bits! | |
767 | */ | |
768 | ||
769 | .align 4 | |
770 | .globl setcc_trap_handler | |
771 | setcc_trap_handler: | |
772 | sll %g1, 0x14, %l4 | |
773 | set PSR_ICC, %l5 | |
774 | andn %l0, %l5, %l0 ! clear ICC bits in %psr | |
775 | and %l4, %l5, %l4 ! clear non-ICC bits in user value | |
776 | or %l4, %l0, %l4 ! or them in... mix mix mix | |
777 | ||
778 | wr %l4, 0x0, %psr ! set new %psr | |
779 | WRITE_PAUSE ! TI scumbags... | |
780 | ||
781 | jmp %l2 ! advance over trap instruction | |
782 | rett %l2 + 0x4 ! like this... | |
783 | ||
784 | .align 4 | |
785 | .globl linux_trap_nmi_sun4c | |
786 | linux_trap_nmi_sun4c: | |
787 | SAVE_ALL | |
788 | ||
789 | /* Ugh, we need to clear the IRQ line. This is now | |
790 | * a very sun4c specific trap handler... | |
791 | */ | |
792 | sethi %hi(interrupt_enable), %l5 | |
793 | ld [%l5 + %lo(interrupt_enable)], %l5 | |
794 | ldub [%l5], %l6 | |
795 | andn %l6, INTS_ENAB, %l6 | |
796 | stb %l6, [%l5] | |
797 | ||
798 | /* Now it is safe to re-enable traps without recursion. */ | |
799 | or %l0, PSR_PIL, %l0 | |
800 | wr %l0, PSR_ET, %psr | |
801 | WRITE_PAUSE | |
802 | ||
803 | /* Now call the c-code with the pt_regs frame ptr and the | |
804 | * memory error registers as arguments. The ordering chosen | |
805 | * here is due to unlatching semantics. | |
806 | */ | |
807 | sethi %hi(AC_SYNC_ERR), %o0 | |
808 | add %o0, 0x4, %o0 | |
809 | lda [%o0] ASI_CONTROL, %o2 ! sync vaddr | |
810 | sub %o0, 0x4, %o0 | |
811 | lda [%o0] ASI_CONTROL, %o1 ! sync error | |
812 | add %o0, 0xc, %o0 | |
813 | lda [%o0] ASI_CONTROL, %o4 ! async vaddr | |
814 | sub %o0, 0x4, %o0 | |
815 | lda [%o0] ASI_CONTROL, %o3 ! async error | |
816 | call sparc_lvl15_nmi | |
817 | add %sp, STACKFRAME_SZ, %o0 | |
818 | ||
819 | RESTORE_ALL | |
820 | ||
821 | .align 4 | |
822 | .globl invalid_segment_patch1_ff | |
823 | .globl invalid_segment_patch2_ff | |
824 | invalid_segment_patch1_ff: cmp %l4, 0xff | |
825 | invalid_segment_patch2_ff: mov 0xff, %l3 | |
826 | ||
827 | .align 4 | |
828 | .globl invalid_segment_patch1_1ff | |
829 | .globl invalid_segment_patch2_1ff | |
830 | invalid_segment_patch1_1ff: cmp %l4, 0x1ff | |
831 | invalid_segment_patch2_1ff: mov 0x1ff, %l3 | |
832 | ||
833 | .align 4 | |
834 | .globl num_context_patch1_16, num_context_patch2_16 | |
835 | num_context_patch1_16: mov 0x10, %l7 | |
836 | num_context_patch2_16: mov 0x10, %l7 | |
837 | ||
838 | .align 4 | |
839 | .globl vac_linesize_patch_32 | |
840 | vac_linesize_patch_32: subcc %l7, 32, %l7 | |
841 | ||
842 | .align 4 | |
843 | .globl vac_hwflush_patch1_on, vac_hwflush_patch2_on | |
844 | ||
845 | /* | |
846 | * Ugly, but we cant use hardware flushing on the sun4 and we'd require | |
847 | * two instructions (Anton) | |
848 | */ | |
849 | #ifdef CONFIG_SUN4 | |
850 | vac_hwflush_patch1_on: nop | |
851 | #else | |
852 | vac_hwflush_patch1_on: addcc %l7, -PAGE_SIZE, %l7 | |
853 | #endif | |
854 | ||
855 | vac_hwflush_patch2_on: sta %g0, [%l3 + %l7] ASI_HWFLUSHSEG | |
856 | ||
857 | .globl invalid_segment_patch1, invalid_segment_patch2 | |
858 | .globl num_context_patch1 | |
859 | .globl vac_linesize_patch, vac_hwflush_patch1 | |
860 | .globl vac_hwflush_patch2 | |
861 | ||
862 | .align 4 | |
863 | .globl sun4c_fault | |
864 | ||
865 | ! %l0 = %psr | |
866 | ! %l1 = %pc | |
867 | ! %l2 = %npc | |
868 | ! %l3 = %wim | |
869 | ! %l7 = 1 for textfault | |
870 | ! We want error in %l5, vaddr in %l6 | |
871 | sun4c_fault: | |
872 | #ifdef CONFIG_SUN4 | |
873 | sethi %hi(sun4c_memerr_reg), %l4 | |
874 | ld [%l4+%lo(sun4c_memerr_reg)], %l4 ! memerr ctrl reg addr | |
875 | ld [%l4], %l6 ! memerr ctrl reg | |
876 | ld [%l4 + 4], %l5 ! memerr vaddr reg | |
877 | andcc %l6, 0x80, %g0 ! check for error type | |
878 | st %g0, [%l4 + 4] ! clear the error | |
879 | be 0f ! normal error | |
880 | sethi %hi(AC_BUS_ERROR), %l4 ! bus err reg addr | |
881 | ||
882 | call prom_halt ! something weird happened | |
883 | ! what exactly did happen? | |
884 | ! what should we do here? | |
885 | ||
886 | 0: or %l4, %lo(AC_BUS_ERROR), %l4 ! bus err reg addr | |
887 | lduba [%l4] ASI_CONTROL, %l6 ! bus err reg | |
888 | ||
889 | cmp %l7, 1 ! text fault? | |
890 | be 1f ! yes | |
891 | nop | |
892 | ||
893 | ld [%l1], %l4 ! load instruction that caused fault | |
894 | srl %l4, 21, %l4 | |
895 | andcc %l4, 1, %g0 ! store instruction? | |
896 | ||
897 | be 1f ! no | |
898 | sethi %hi(SUN4C_SYNC_BADWRITE), %l4 ! yep | |
899 | ! %lo(SUN4C_SYNC_BADWRITE) = 0 | |
900 | or %l4, %l6, %l6 ! set write bit to emulate sun4c | |
901 | 1: | |
902 | #else | |
903 | sethi %hi(AC_SYNC_ERR), %l4 | |
904 | add %l4, 0x4, %l6 ! AC_SYNC_VA in %l6 | |
905 | lda [%l6] ASI_CONTROL, %l5 ! Address | |
906 | lda [%l4] ASI_CONTROL, %l6 ! Error, retained for a bit | |
907 | #endif | |
908 | ||
909 | andn %l5, 0xfff, %l5 ! Encode all info into l7 | |
910 | srl %l6, 14, %l4 | |
911 | ||
912 | and %l4, 2, %l4 | |
913 | or %l5, %l4, %l4 | |
914 | ||
915 | or %l4, %l7, %l7 ! l7 = [addr,write,txtfault] | |
916 | ||
917 | andcc %l0, PSR_PS, %g0 | |
918 | be sun4c_fault_fromuser | |
919 | andcc %l7, 1, %g0 ! Text fault? | |
920 | ||
921 | be 1f | |
922 | sethi %hi(KERNBASE), %l4 | |
923 | ||
924 | mov %l1, %l5 ! PC | |
925 | ||
926 | 1: | |
927 | cmp %l5, %l4 | |
928 | blu sun4c_fault_fromuser | |
929 | sethi %hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4 | |
930 | ||
931 | /* If the kernel references a bum kernel pointer, or a pte which | |
932 | * points to a non existant page in ram, we will run this code | |
933 | * _forever_ and lock up the machine!!!!! So we must check for | |
934 | * this condition, the AC_SYNC_ERR bits are what we must examine. | |
935 | * Also a parity error would make this happen as well. So we just | |
936 | * check that we are in fact servicing a tlb miss and not some | |
937 | * other type of fault for the kernel. | |
938 | */ | |
939 | andcc %l6, 0x80, %g0 | |
940 | be sun4c_fault_fromuser | |
941 | and %l5, %l4, %l5 | |
942 | ||
943 | /* Test for NULL pte_t * in vmalloc area. */ | |
944 | sethi %hi(VMALLOC_START), %l4 | |
945 | cmp %l5, %l4 | |
946 | blu,a invalid_segment_patch1 | |
947 | lduXa [%l5] ASI_SEGMAP, %l4 | |
948 | ||
949 | sethi %hi(swapper_pg_dir), %l4 | |
950 | srl %l5, SUN4C_PGDIR_SHIFT, %l6 | |
951 | or %l4, %lo(swapper_pg_dir), %l4 | |
952 | sll %l6, 2, %l6 | |
953 | ld [%l4 + %l6], %l4 | |
954 | #ifdef CONFIG_SUN4 | |
955 | sethi %hi(PAGE_MASK), %l6 | |
956 | andcc %l4, %l6, %g0 | |
957 | #else | |
958 | andcc %l4, PAGE_MASK, %g0 | |
959 | #endif | |
960 | be sun4c_fault_fromuser | |
961 | lduXa [%l5] ASI_SEGMAP, %l4 | |
962 | ||
963 | invalid_segment_patch1: | |
964 | cmp %l4, 0x7f | |
965 | bne 1f | |
966 | sethi %hi(sun4c_kfree_ring), %l4 | |
967 | or %l4, %lo(sun4c_kfree_ring), %l4 | |
968 | ld [%l4 + 0x18], %l3 | |
969 | deccc %l3 ! do we have a free entry? | |
970 | bcs,a 2f ! no, unmap one. | |
971 | sethi %hi(sun4c_kernel_ring), %l4 | |
972 | ||
973 | st %l3, [%l4 + 0x18] ! sun4c_kfree_ring.num_entries-- | |
974 | ||
975 | ld [%l4 + 0x00], %l6 ! entry = sun4c_kfree_ring.ringhd.next | |
976 | st %l5, [%l6 + 0x08] ! entry->vaddr = address | |
977 | ||
978 | ld [%l6 + 0x00], %l3 ! next = entry->next | |
979 | ld [%l6 + 0x04], %l7 ! entry->prev | |
980 | ||
981 | st %l7, [%l3 + 0x04] ! next->prev = entry->prev | |
982 | st %l3, [%l7 + 0x00] ! entry->prev->next = next | |
983 | ||
984 | sethi %hi(sun4c_kernel_ring), %l4 | |
985 | or %l4, %lo(sun4c_kernel_ring), %l4 | |
986 | ! head = &sun4c_kernel_ring.ringhd | |
987 | ||
988 | ld [%l4 + 0x00], %l7 ! head->next | |
989 | ||
990 | st %l4, [%l6 + 0x04] ! entry->prev = head | |
991 | st %l7, [%l6 + 0x00] ! entry->next = head->next | |
992 | st %l6, [%l7 + 0x04] ! head->next->prev = entry | |
993 | ||
994 | st %l6, [%l4 + 0x00] ! head->next = entry | |
995 | ||
996 | ld [%l4 + 0x18], %l3 | |
997 | inc %l3 ! sun4c_kernel_ring.num_entries++ | |
998 | st %l3, [%l4 + 0x18] | |
999 | b 4f | |
1000 | ld [%l6 + 0x08], %l5 | |
1001 | ||
1002 | 2: | |
1003 | or %l4, %lo(sun4c_kernel_ring), %l4 | |
1004 | ! head = &sun4c_kernel_ring.ringhd | |
1005 | ||
1006 | ld [%l4 + 0x04], %l6 ! entry = head->prev | |
1007 | ||
1008 | ld [%l6 + 0x08], %l3 ! tmp = entry->vaddr | |
1009 | ||
1010 | ! Flush segment from the cache. | |
1011 | #ifdef CONFIG_SUN4 | |
1012 | sethi %hi((128 * 1024)), %l7 | |
1013 | #else | |
1014 | sethi %hi((64 * 1024)), %l7 | |
1015 | #endif | |
1016 | 9: | |
1017 | vac_hwflush_patch1: | |
1018 | vac_linesize_patch: | |
1019 | subcc %l7, 16, %l7 | |
1020 | bne 9b | |
1021 | vac_hwflush_patch2: | |
1022 | sta %g0, [%l3 + %l7] ASI_FLUSHSEG | |
1023 | ||
1024 | st %l5, [%l6 + 0x08] ! entry->vaddr = address | |
1025 | ||
1026 | ld [%l6 + 0x00], %l5 ! next = entry->next | |
1027 | ld [%l6 + 0x04], %l7 ! entry->prev | |
1028 | ||
1029 | st %l7, [%l5 + 0x04] ! next->prev = entry->prev | |
1030 | st %l5, [%l7 + 0x00] ! entry->prev->next = next | |
1031 | st %l4, [%l6 + 0x04] ! entry->prev = head | |
1032 | ||
1033 | ld [%l4 + 0x00], %l7 ! head->next | |
1034 | ||
1035 | st %l7, [%l6 + 0x00] ! entry->next = head->next | |
1036 | st %l6, [%l7 + 0x04] ! head->next->prev = entry | |
1037 | st %l6, [%l4 + 0x00] ! head->next = entry | |
1038 | ||
1039 | mov %l3, %l5 ! address = tmp | |
1040 | ||
1041 | 4: | |
1042 | num_context_patch1: | |
1043 | mov 0x08, %l7 | |
1044 | ||
1045 | ld [%l6 + 0x08], %l4 | |
1046 | ldub [%l6 + 0x0c], %l3 | |
1047 | or %l4, %l3, %l4 ! encode new vaddr/pseg into l4 | |
1048 | ||
1049 | sethi %hi(AC_CONTEXT), %l3 | |
1050 | lduba [%l3] ASI_CONTROL, %l6 | |
1051 | ||
1052 | /* Invalidate old mapping, instantiate new mapping, | |
1053 | * for each context. Registers l6/l7 are live across | |
1054 | * this loop. | |
1055 | */ | |
1056 | 3: deccc %l7 | |
1057 | sethi %hi(AC_CONTEXT), %l3 | |
1058 | stba %l7, [%l3] ASI_CONTROL | |
1059 | invalid_segment_patch2: | |
1060 | mov 0x7f, %l3 | |
1061 | stXa %l3, [%l5] ASI_SEGMAP | |
1062 | andn %l4, 0x1ff, %l3 | |
1063 | bne 3b | |
1064 | stXa %l4, [%l3] ASI_SEGMAP | |
1065 | ||
1066 | sethi %hi(AC_CONTEXT), %l3 | |
1067 | stba %l6, [%l3] ASI_CONTROL | |
1068 | ||
1069 | andn %l4, 0x1ff, %l5 | |
1070 | ||
1071 | 1: | |
1072 | sethi %hi(VMALLOC_START), %l4 | |
1073 | cmp %l5, %l4 | |
1074 | ||
1075 | bgeu 1f | |
1076 | mov 1 << (SUN4C_REAL_PGDIR_SHIFT - PAGE_SHIFT), %l7 | |
1077 | ||
1078 | sethi %hi(KERNBASE), %l6 | |
1079 | ||
1080 | sub %l5, %l6, %l4 | |
1081 | srl %l4, PAGE_SHIFT, %l4 | |
1082 | sethi %hi((SUN4C_PAGE_KERNEL & 0xf4000000)), %l3 | |
1083 | or %l3, %l4, %l3 | |
1084 | ||
1085 | sethi %hi(PAGE_SIZE), %l4 | |
1086 | ||
1087 | 2: | |
1088 | sta %l3, [%l5] ASI_PTE | |
1089 | deccc %l7 | |
1090 | inc %l3 | |
1091 | bne 2b | |
1092 | add %l5, %l4, %l5 | |
1093 | ||
1094 | b 7f | |
1095 | sethi %hi(sun4c_kernel_faults), %l4 | |
1096 | ||
1097 | 1: | |
1098 | srl %l5, SUN4C_PGDIR_SHIFT, %l3 | |
1099 | sethi %hi(swapper_pg_dir), %l4 | |
1100 | or %l4, %lo(swapper_pg_dir), %l4 | |
1101 | sll %l3, 2, %l3 | |
1102 | ld [%l4 + %l3], %l4 | |
1103 | #ifndef CONFIG_SUN4 | |
1104 | and %l4, PAGE_MASK, %l4 | |
1105 | #else | |
1106 | sethi %hi(PAGE_MASK), %l6 | |
1107 | and %l4, %l6, %l4 | |
1108 | #endif | |
1109 | ||
1110 | srl %l5, (PAGE_SHIFT - 2), %l6 | |
1111 | and %l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6 | |
1112 | add %l6, %l4, %l6 | |
1113 | ||
1114 | sethi %hi(PAGE_SIZE), %l4 | |
1115 | ||
1116 | 2: | |
1117 | ld [%l6], %l3 | |
1118 | deccc %l7 | |
1119 | sta %l3, [%l5] ASI_PTE | |
1120 | add %l6, 0x4, %l6 | |
1121 | bne 2b | |
1122 | add %l5, %l4, %l5 | |
1123 | ||
1124 | sethi %hi(sun4c_kernel_faults), %l4 | |
1125 | 7: | |
1126 | ld [%l4 + %lo(sun4c_kernel_faults)], %l3 | |
1127 | inc %l3 | |
1128 | st %l3, [%l4 + %lo(sun4c_kernel_faults)] | |
1129 | ||
1130 | /* Restore condition codes */ | |
1131 | wr %l0, 0x0, %psr | |
1132 | WRITE_PAUSE | |
1133 | jmp %l1 | |
1134 | rett %l2 | |
1135 | ||
1136 | sun4c_fault_fromuser: | |
1137 | SAVE_ALL | |
1138 | nop | |
1139 | ||
1140 | mov %l7, %o1 ! Decode the info from %l7 | |
1141 | mov %l7, %o2 | |
1142 | and %o1, 1, %o1 ! arg2 = text_faultp | |
1143 | mov %l7, %o3 | |
1144 | and %o2, 2, %o2 ! arg3 = writep | |
1145 | andn %o3, 0xfff, %o3 ! arg4 = faulting address | |
1146 | ||
1147 | wr %l0, PSR_ET, %psr | |
1148 | WRITE_PAUSE | |
1149 | ||
1150 | call do_sun4c_fault | |
1151 | add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr | |
1152 | ||
1153 | RESTORE_ALL | |
1154 | ||
1155 | .align 4 | |
1156 | .globl srmmu_fault | |
1157 | srmmu_fault: | |
1158 | mov 0x400, %l5 | |
1159 | mov 0x300, %l4 | |
1160 | ||
1161 | lda [%l5] ASI_M_MMUREGS, %l6 ! read sfar first | |
1162 | lda [%l4] ASI_M_MMUREGS, %l5 ! read sfsr last | |
1163 | ||
1164 | andn %l6, 0xfff, %l6 | |
1165 | srl %l5, 6, %l5 ! and encode all info into l7 | |
1166 | ||
1167 | and %l5, 2, %l5 | |
1168 | or %l5, %l6, %l6 | |
1169 | ||
1170 | or %l6, %l7, %l7 ! l7 = [addr,write,txtfault] | |
1171 | ||
1172 | SAVE_ALL | |
1173 | ||
1174 | mov %l7, %o1 | |
1175 | mov %l7, %o2 | |
1176 | and %o1, 1, %o1 ! arg2 = text_faultp | |
1177 | mov %l7, %o3 | |
1178 | and %o2, 2, %o2 ! arg3 = writep | |
1179 | andn %o3, 0xfff, %o3 ! arg4 = faulting address | |
1180 | ||
1181 | wr %l0, PSR_ET, %psr | |
1182 | WRITE_PAUSE | |
1183 | ||
1184 | call do_sparc_fault | |
1185 | add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr | |
1186 | ||
1187 | RESTORE_ALL | |
1188 | ||
1189 | #ifdef CONFIG_SUNOS_EMUL | |
1190 | /* SunOS uses syscall zero as the 'indirect syscall' it looks | |
1191 | * like indir_syscall(scall_num, arg0, arg1, arg2...); etc. | |
1192 | * This is complete brain damage. | |
1193 | */ | |
1194 | .globl sunos_indir | |
1195 | sunos_indir: | |
1196 | mov %o7, %l4 | |
1197 | cmp %o0, NR_SYSCALLS | |
1198 | blu,a 1f | |
1199 | sll %o0, 0x2, %o0 | |
1200 | ||
1201 | sethi %hi(sunos_nosys), %l6 | |
1202 | b 2f | |
1203 | or %l6, %lo(sunos_nosys), %l6 | |
1204 | ||
1205 | 1: | |
1206 | set sunos_sys_table, %l7 | |
1207 | ld [%l7 + %o0], %l6 | |
1208 | ||
1209 | 2: | |
1210 | mov %o1, %o0 | |
1211 | mov %o2, %o1 | |
1212 | mov %o3, %o2 | |
1213 | mov %o4, %o3 | |
1214 | mov %o5, %o4 | |
1215 | call %l6 | |
1216 | mov %l4, %o7 | |
1217 | #endif | |
1218 | ||
1219 | .align 4 | |
1220 | .globl sys_nis_syscall | |
1221 | sys_nis_syscall: | |
1222 | mov %o7, %l5 | |
1223 | add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg | |
1224 | call c_sys_nis_syscall | |
1225 | mov %l5, %o7 | |
1226 | ||
1da177e4 LT |
1227 | .align 4 |
1228 | .globl sys_execve | |
1229 | sys_execve: | |
1230 | mov %o7, %l5 | |
1231 | add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg | |
1232 | call sparc_execve | |
1233 | mov %l5, %o7 | |
1234 | ||
1235 | .align 4 | |
1236 | .globl sys_pipe | |
1237 | sys_pipe: | |
1238 | mov %o7, %l5 | |
1239 | add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg | |
1240 | call sparc_pipe | |
1241 | mov %l5, %o7 | |
1242 | ||
1243 | .align 4 | |
1244 | .globl sys_sigaltstack | |
1245 | sys_sigaltstack: | |
1246 | mov %o7, %l5 | |
1247 | mov %fp, %o2 | |
1248 | call do_sigaltstack | |
1249 | mov %l5, %o7 | |
1250 | ||
1251 | .align 4 | |
1252 | .globl sys_sigstack | |
1253 | sys_sigstack: | |
1254 | mov %o7, %l5 | |
1255 | mov %fp, %o2 | |
1256 | call do_sys_sigstack | |
1257 | mov %l5, %o7 | |
1258 | ||
1da177e4 LT |
1259 | .align 4 |
1260 | .globl sys_sigreturn | |
1261 | sys_sigreturn: | |
1262 | call do_sigreturn | |
1263 | add %sp, STACKFRAME_SZ, %o0 | |
1264 | ||
1265 | ld [%curptr + TI_FLAGS], %l5 | |
1266 | andcc %l5, _TIF_SYSCALL_TRACE, %g0 | |
1267 | be 1f | |
1268 | nop | |
1269 | ||
1270 | call syscall_trace | |
1271 | nop | |
1272 | ||
1273 | 1: | |
1274 | /* We don't want to muck with user registers like a | |
1275 | * normal syscall, just return. | |
1276 | */ | |
1277 | RESTORE_ALL | |
1278 | ||
1279 | .align 4 | |
1280 | .globl sys_rt_sigreturn | |
1281 | sys_rt_sigreturn: | |
1282 | call do_rt_sigreturn | |
1283 | add %sp, STACKFRAME_SZ, %o0 | |
1284 | ||
1285 | ld [%curptr + TI_FLAGS], %l5 | |
1286 | andcc %l5, _TIF_SYSCALL_TRACE, %g0 | |
1287 | be 1f | |
1288 | nop | |
1289 | ||
1290 | call syscall_trace | |
1291 | nop | |
1292 | ||
1293 | 1: | |
1294 | /* We are returning to a signal handler. */ | |
1295 | RESTORE_ALL | |
1296 | ||
1297 | /* Now that we have a real sys_clone, sys_fork() is | |
1298 | * implemented in terms of it. Our _real_ implementation | |
1299 | * of SunOS vfork() will use sys_vfork(). | |
1300 | * | |
1301 | * XXX These three should be consolidated into mostly shared | |
1302 | * XXX code just like on sparc64... -DaveM | |
1303 | */ | |
1304 | .align 4 | |
1305 | .globl sys_fork, flush_patch_two | |
1306 | sys_fork: | |
1307 | mov %o7, %l5 | |
1308 | flush_patch_two: | |
1309 | FLUSH_ALL_KERNEL_WINDOWS; | |
1310 | ld [%curptr + TI_TASK], %o4 | |
1311 | rd %psr, %g4 | |
1312 | WRITE_PAUSE | |
1313 | mov SIGCHLD, %o0 ! arg0: clone flags | |
1314 | rd %wim, %g5 | |
1315 | WRITE_PAUSE | |
1316 | mov %fp, %o1 ! arg1: usp | |
1317 | std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] | |
1318 | add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr | |
1319 | mov 0, %o3 | |
1320 | call sparc_do_fork | |
1321 | mov %l5, %o7 | |
1322 | ||
1323 | /* Whee, kernel threads! */ | |
1324 | .globl sys_clone, flush_patch_three | |
1325 | sys_clone: | |
1326 | mov %o7, %l5 | |
1327 | flush_patch_three: | |
1328 | FLUSH_ALL_KERNEL_WINDOWS; | |
1329 | ld [%curptr + TI_TASK], %o4 | |
1330 | rd %psr, %g4 | |
1331 | WRITE_PAUSE | |
1332 | ||
1333 | /* arg0,1: flags,usp -- loaded already */ | |
1334 | cmp %o1, 0x0 ! Is new_usp NULL? | |
1335 | rd %wim, %g5 | |
1336 | WRITE_PAUSE | |
1337 | be,a 1f | |
1338 | mov %fp, %o1 ! yes, use callers usp | |
1339 | andn %o1, 7, %o1 ! no, align to 8 bytes | |
1340 | 1: | |
1341 | std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] | |
1342 | add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr | |
1343 | mov 0, %o3 | |
1344 | call sparc_do_fork | |
1345 | mov %l5, %o7 | |
1346 | ||
1347 | /* Whee, real vfork! */ | |
1348 | .globl sys_vfork, flush_patch_four | |
1349 | sys_vfork: | |
1350 | flush_patch_four: | |
1351 | FLUSH_ALL_KERNEL_WINDOWS; | |
1352 | ld [%curptr + TI_TASK], %o4 | |
1353 | rd %psr, %g4 | |
1354 | WRITE_PAUSE | |
1355 | rd %wim, %g5 | |
1356 | WRITE_PAUSE | |
1357 | std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] | |
1358 | sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0 | |
1359 | mov %fp, %o1 | |
1360 | or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0 | |
1361 | sethi %hi(sparc_do_fork), %l1 | |
1362 | mov 0, %o3 | |
1363 | jmpl %l1 + %lo(sparc_do_fork), %g0 | |
1364 | add %sp, STACKFRAME_SZ, %o2 | |
1365 | ||
1366 | .align 4 | |
1367 | linux_sparc_ni_syscall: | |
1368 | sethi %hi(sys_ni_syscall), %l7 | |
1369 | b syscall_is_too_hard | |
1370 | or %l7, %lo(sys_ni_syscall), %l7 | |
1371 | ||
1372 | linux_fast_syscall: | |
1373 | andn %l7, 3, %l7 | |
1374 | mov %i0, %o0 | |
1375 | mov %i1, %o1 | |
1376 | mov %i2, %o2 | |
1377 | jmpl %l7 + %g0, %g0 | |
1378 | mov %i3, %o3 | |
1379 | ||
1380 | linux_syscall_trace: | |
1381 | call syscall_trace | |
1382 | nop | |
1383 | mov %i0, %o0 | |
1384 | mov %i1, %o1 | |
1385 | mov %i2, %o2 | |
1386 | mov %i3, %o3 | |
1387 | b 2f | |
1388 | mov %i4, %o4 | |
1389 | ||
1390 | .globl ret_from_fork | |
1391 | ret_from_fork: | |
1392 | call schedule_tail | |
1393 | mov %g3, %o0 | |
1394 | b ret_sys_call | |
1395 | ld [%sp + STACKFRAME_SZ + PT_I0], %o0 | |
1396 | ||
1397 | /* Linux native and SunOS system calls enter here... */ | |
1398 | .align 4 | |
1399 | .globl linux_sparc_syscall | |
1400 | linux_sparc_syscall: | |
1401 | /* Direct access to user regs, must faster. */ | |
1402 | cmp %g1, NR_SYSCALLS | |
1403 | bgeu linux_sparc_ni_syscall | |
1404 | sll %g1, 2, %l4 | |
1405 | ld [%l7 + %l4], %l7 | |
1406 | andcc %l7, 1, %g0 | |
1407 | bne linux_fast_syscall | |
1408 | /* Just do first insn from SAVE_ALL in the delay slot */ | |
1409 | ||
1410 | .globl syscall_is_too_hard | |
1411 | syscall_is_too_hard: | |
1412 | SAVE_ALL_HEAD | |
1413 | rd %wim, %l3 | |
1414 | ||
1415 | wr %l0, PSR_ET, %psr | |
1416 | mov %i0, %o0 | |
1417 | mov %i1, %o1 | |
1418 | mov %i2, %o2 | |
1419 | ||
1420 | ld [%curptr + TI_FLAGS], %l5 | |
1421 | mov %i3, %o3 | |
1422 | andcc %l5, _TIF_SYSCALL_TRACE, %g0 | |
1423 | mov %i4, %o4 | |
1424 | bne linux_syscall_trace | |
1425 | mov %i0, %l5 | |
1426 | 2: | |
1427 | call %l7 | |
1428 | mov %i5, %o5 | |
1429 | ||
1430 | st %o0, [%sp + STACKFRAME_SZ + PT_I0] | |
1431 | ||
1432 | .globl ret_sys_call | |
1433 | ret_sys_call: | |
1434 | ld [%curptr + TI_FLAGS], %l6 | |
1435 | cmp %o0, -ERESTART_RESTARTBLOCK | |
1436 | ld [%sp + STACKFRAME_SZ + PT_PSR], %g3 | |
1437 | set PSR_C, %g2 | |
1438 | bgeu 1f | |
1439 | andcc %l6, _TIF_SYSCALL_TRACE, %g0 | |
1440 | ||
1441 | /* System call success, clear Carry condition code. */ | |
1442 | andn %g3, %g2, %g3 | |
1443 | clr %l6 | |
1444 | st %g3, [%sp + STACKFRAME_SZ + PT_PSR] | |
1445 | bne linux_syscall_trace2 | |
1446 | ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ | |
1447 | add %l1, 0x4, %l2 /* npc = npc+4 */ | |
1448 | st %l1, [%sp + STACKFRAME_SZ + PT_PC] | |
1449 | b ret_trap_entry | |
1450 | st %l2, [%sp + STACKFRAME_SZ + PT_NPC] | |
1451 | 1: | |
1452 | /* System call failure, set Carry condition code. | |
1453 | * Also, get abs(errno) to return to the process. | |
1454 | */ | |
1455 | sub %g0, %o0, %o0 | |
1456 | or %g3, %g2, %g3 | |
1457 | st %o0, [%sp + STACKFRAME_SZ + PT_I0] | |
1458 | mov 1, %l6 | |
1459 | st %g3, [%sp + STACKFRAME_SZ + PT_PSR] | |
1460 | bne linux_syscall_trace2 | |
1461 | ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ | |
1462 | add %l1, 0x4, %l2 /* npc = npc+4 */ | |
1463 | st %l1, [%sp + STACKFRAME_SZ + PT_PC] | |
1464 | b ret_trap_entry | |
1465 | st %l2, [%sp + STACKFRAME_SZ + PT_NPC] | |
1466 | ||
1467 | linux_syscall_trace2: | |
1468 | call syscall_trace | |
1469 | add %l1, 0x4, %l2 /* npc = npc+4 */ | |
1470 | st %l1, [%sp + STACKFRAME_SZ + PT_PC] | |
1471 | b ret_trap_entry | |
1472 | st %l2, [%sp + STACKFRAME_SZ + PT_NPC] | |
1473 | ||
1474 | ||
1475 | /* | |
1476 | * Solaris system calls and indirect system calls enter here. | |
1477 | * | |
1478 | * I have named the solaris indirect syscalls like that because | |
1479 | * it seems like Solaris has some fast path syscalls that can | |
1480 | * be handled as indirect system calls. - mig | |
1481 | */ | |
1482 | ||
1483 | linux_syscall_for_solaris: | |
1484 | sethi %hi(sys_call_table), %l7 | |
1485 | b linux_sparc_syscall | |
1486 | or %l7, %lo(sys_call_table), %l7 | |
1487 | ||
1488 | .align 4 | |
1489 | .globl solaris_syscall | |
1490 | solaris_syscall: | |
1491 | cmp %g1,59 | |
1492 | be linux_syscall_for_solaris | |
1493 | cmp %g1,2 | |
1494 | be linux_syscall_for_solaris | |
1495 | cmp %g1,42 | |
1496 | be linux_syscall_for_solaris | |
1497 | cmp %g1,119 | |
1498 | be,a linux_syscall_for_solaris | |
1499 | mov 2, %g1 | |
1500 | 1: | |
1501 | SAVE_ALL_HEAD | |
1502 | rd %wim, %l3 | |
1503 | ||
1504 | wr %l0, PSR_ET, %psr | |
1505 | nop | |
1506 | nop | |
1507 | mov %i0, %l5 | |
1508 | ||
1509 | call do_solaris_syscall | |
1510 | add %sp, STACKFRAME_SZ, %o0 | |
1511 | ||
1512 | st %o0, [%sp + STACKFRAME_SZ + PT_I0] | |
1513 | set PSR_C, %g2 | |
1514 | cmp %o0, -ERESTART_RESTARTBLOCK | |
1515 | bgeu 1f | |
1516 | ld [%sp + STACKFRAME_SZ + PT_PSR], %g3 | |
1517 | ||
1518 | /* System call success, clear Carry condition code. */ | |
1519 | andn %g3, %g2, %g3 | |
1520 | clr %l6 | |
1521 | b 2f | |
1522 | st %g3, [%sp + STACKFRAME_SZ + PT_PSR] | |
1523 | ||
1524 | 1: | |
1525 | /* System call failure, set Carry condition code. | |
1526 | * Also, get abs(errno) to return to the process. | |
1527 | */ | |
1528 | sub %g0, %o0, %o0 | |
1529 | mov 1, %l6 | |
1530 | st %o0, [%sp + STACKFRAME_SZ + PT_I0] | |
1531 | or %g3, %g2, %g3 | |
1532 | st %g3, [%sp + STACKFRAME_SZ + PT_PSR] | |
1533 | ||
1534 | /* Advance the pc and npc over the trap instruction. | |
1535 | * If the npc is unaligned (has a 1 in the lower byte), it means | |
1536 | * the kernel does not want us to play magic (ie, skipping over | |
1537 | * traps). Mainly when the Solaris code wants to set some PC and | |
1538 | * nPC (setcontext). | |
1539 | */ | |
1540 | 2: | |
1541 | ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ | |
1542 | andcc %l1, 1, %g0 | |
1543 | bne 1f | |
1544 | add %l1, 0x4, %l2 /* npc = npc+4 */ | |
1545 | st %l1, [%sp + STACKFRAME_SZ + PT_PC] | |
1546 | b ret_trap_entry | |
1547 | st %l2, [%sp + STACKFRAME_SZ + PT_NPC] | |
1548 | ||
1549 | /* kernel knows what it is doing, fixup npc and continue */ | |
1550 | 1: | |
1551 | sub %l1, 1, %l1 | |
1552 | b ret_trap_entry | |
1553 | st %l1, [%sp + STACKFRAME_SZ + PT_NPC] | |
1554 | ||
1555 | #ifndef CONFIG_SUNOS_EMUL | |
1556 | .align 4 | |
1557 | .globl sunos_syscall | |
1558 | sunos_syscall: | |
1559 | SAVE_ALL_HEAD | |
1560 | rd %wim, %l3 | |
1561 | wr %l0, PSR_ET, %psr | |
1562 | nop | |
1563 | nop | |
1564 | mov %i0, %l5 | |
1565 | call do_sunos_syscall | |
1566 | add %sp, STACKFRAME_SZ, %o0 | |
1567 | #endif | |
1568 | ||
1569 | /* {net, open}bsd system calls enter here... */ | |
1570 | .align 4 | |
1571 | .globl bsd_syscall | |
1572 | bsd_syscall: | |
1573 | /* Direct access to user regs, must faster. */ | |
1574 | cmp %g1, NR_SYSCALLS | |
1575 | blu,a 1f | |
1576 | sll %g1, 2, %l4 | |
1577 | ||
1578 | set sys_ni_syscall, %l7 | |
1579 | b bsd_is_too_hard | |
1580 | nop | |
1581 | ||
1582 | 1: | |
1583 | ld [%l7 + %l4], %l7 | |
1584 | ||
1585 | .globl bsd_is_too_hard | |
1586 | bsd_is_too_hard: | |
1587 | rd %wim, %l3 | |
1588 | SAVE_ALL | |
1589 | ||
1590 | wr %l0, PSR_ET, %psr | |
1591 | WRITE_PAUSE | |
1592 | ||
1593 | 2: | |
1594 | mov %i0, %o0 | |
1595 | mov %i1, %o1 | |
1596 | mov %i2, %o2 | |
1597 | mov %i0, %l5 | |
1598 | mov %i3, %o3 | |
1599 | mov %i4, %o4 | |
1600 | call %l7 | |
1601 | mov %i5, %o5 | |
1602 | ||
1603 | st %o0, [%sp + STACKFRAME_SZ + PT_I0] | |
1604 | set PSR_C, %g2 | |
1605 | cmp %o0, -ERESTART_RESTARTBLOCK | |
1606 | bgeu 1f | |
1607 | ld [%sp + STACKFRAME_SZ + PT_PSR], %g3 | |
1608 | ||
1609 | /* System call success, clear Carry condition code. */ | |
1610 | andn %g3, %g2, %g3 | |
1611 | clr %l6 | |
1612 | b 2f | |
1613 | st %g3, [%sp + STACKFRAME_SZ + PT_PSR] | |
1614 | ||
1615 | 1: | |
1616 | /* System call failure, set Carry condition code. | |
1617 | * Also, get abs(errno) to return to the process. | |
1618 | */ | |
1619 | sub %g0, %o0, %o0 | |
1620 | #if 0 /* XXX todo XXX */ | |
1621 | sethi %hi(bsd_xlatb_rorl), %o3 | |
1622 | or %o3, %lo(bsd_xlatb_rorl), %o3 | |
1623 | sll %o0, 2, %o0 | |
1624 | ld [%o3 + %o0], %o0 | |
1625 | #endif | |
1626 | mov 1, %l6 | |
1627 | st %o0, [%sp + STACKFRAME_SZ + PT_I0] | |
1628 | or %g3, %g2, %g3 | |
1629 | st %g3, [%sp + STACKFRAME_SZ + PT_PSR] | |
1630 | ||
1631 | /* Advance the pc and npc over the trap instruction. */ | |
1632 | 2: | |
1633 | ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ | |
1634 | add %l1, 0x4, %l2 /* npc = npc+4 */ | |
1635 | st %l1, [%sp + STACKFRAME_SZ + PT_PC] | |
1636 | b ret_trap_entry | |
1637 | st %l2, [%sp + STACKFRAME_SZ + PT_NPC] | |
1638 | ||
1639 | /* Saving and restoring the FPU state is best done from lowlevel code. | |
1640 | * | |
1641 | * void fpsave(unsigned long *fpregs, unsigned long *fsr, | |
1642 | * void *fpqueue, unsigned long *fpqdepth) | |
1643 | */ | |
1644 | ||
1645 | .globl fpsave | |
1646 | fpsave: | |
1647 | st %fsr, [%o1] ! this can trap on us if fpu is in bogon state | |
1648 | ld [%o1], %g1 | |
1649 | set 0x2000, %g4 | |
1650 | andcc %g1, %g4, %g0 | |
1651 | be 2f | |
1652 | mov 0, %g2 | |
1653 | ||
1654 | /* We have an fpqueue to save. */ | |
1655 | 1: | |
1656 | std %fq, [%o2] | |
1657 | fpsave_magic: | |
1658 | st %fsr, [%o1] | |
1659 | ld [%o1], %g3 | |
1660 | andcc %g3, %g4, %g0 | |
1661 | add %g2, 1, %g2 | |
1662 | bne 1b | |
1663 | add %o2, 8, %o2 | |
1664 | ||
1665 | 2: | |
1666 | st %g2, [%o3] | |
1667 | ||
1668 | std %f0, [%o0 + 0x00] | |
1669 | std %f2, [%o0 + 0x08] | |
1670 | std %f4, [%o0 + 0x10] | |
1671 | std %f6, [%o0 + 0x18] | |
1672 | std %f8, [%o0 + 0x20] | |
1673 | std %f10, [%o0 + 0x28] | |
1674 | std %f12, [%o0 + 0x30] | |
1675 | std %f14, [%o0 + 0x38] | |
1676 | std %f16, [%o0 + 0x40] | |
1677 | std %f18, [%o0 + 0x48] | |
1678 | std %f20, [%o0 + 0x50] | |
1679 | std %f22, [%o0 + 0x58] | |
1680 | std %f24, [%o0 + 0x60] | |
1681 | std %f26, [%o0 + 0x68] | |
1682 | std %f28, [%o0 + 0x70] | |
1683 | retl | |
1684 | std %f30, [%o0 + 0x78] | |
1685 | ||
1686 | /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd | |
1687 | * code for pointing out this possible deadlock, while we save state | |
1688 | * above we could trap on the fsr store so our low level fpu trap | |
1689 | * code has to know how to deal with this. | |
1690 | */ | |
1691 | fpsave_catch: | |
1692 | b fpsave_magic + 4 | |
1693 | st %fsr, [%o1] | |
1694 | ||
1695 | fpsave_catch2: | |
1696 | b fpsave + 4 | |
1697 | st %fsr, [%o1] | |
1698 | ||
1699 | /* void fpload(unsigned long *fpregs, unsigned long *fsr); */ | |
1700 | ||
1701 | .globl fpload | |
1702 | fpload: | |
1703 | ldd [%o0 + 0x00], %f0 | |
1704 | ldd [%o0 + 0x08], %f2 | |
1705 | ldd [%o0 + 0x10], %f4 | |
1706 | ldd [%o0 + 0x18], %f6 | |
1707 | ldd [%o0 + 0x20], %f8 | |
1708 | ldd [%o0 + 0x28], %f10 | |
1709 | ldd [%o0 + 0x30], %f12 | |
1710 | ldd [%o0 + 0x38], %f14 | |
1711 | ldd [%o0 + 0x40], %f16 | |
1712 | ldd [%o0 + 0x48], %f18 | |
1713 | ldd [%o0 + 0x50], %f20 | |
1714 | ldd [%o0 + 0x58], %f22 | |
1715 | ldd [%o0 + 0x60], %f24 | |
1716 | ldd [%o0 + 0x68], %f26 | |
1717 | ldd [%o0 + 0x70], %f28 | |
1718 | ldd [%o0 + 0x78], %f30 | |
1719 | ld [%o1], %fsr | |
1720 | retl | |
1721 | nop | |
1722 | ||
1723 | /* __ndelay and __udelay take two arguments: | |
1724 | * 0 - nsecs or usecs to delay | |
1725 | * 1 - per_cpu udelay_val (loops per jiffy) | |
1726 | * | |
1727 | * Note that ndelay gives HZ times higher resolution but has a 10ms | |
1728 | * limit. udelay can handle up to 1s. | |
1729 | */ | |
1730 | .globl __ndelay | |
1731 | __ndelay: | |
1732 | save %sp, -STACKFRAME_SZ, %sp | |
1733 | mov %i0, %o0 | |
196bffa5 MF |
1734 | call .umul ! round multiplier up so large ns ok |
1735 | mov 0x1ae, %o1 ! 2**32 / (1 000 000 000 / HZ) | |
1da177e4 LT |
1736 | call .umul |
1737 | mov %i1, %o1 ! udelay_val | |
1738 | ba delay_continue | |
1739 | mov %o1, %o0 ! >>32 later for better resolution | |
1740 | ||
1741 | .globl __udelay | |
1742 | __udelay: | |
1743 | save %sp, -STACKFRAME_SZ, %sp | |
1744 | mov %i0, %o0 | |
196bffa5 | 1745 | sethi %hi(0x10c7), %o1 ! round multiplier up so large us ok |
1da177e4 | 1746 | call .umul |
196bffa5 | 1747 | or %o1, %lo(0x10c7), %o1 ! 2**32 / 1 000 000 |
1da177e4 LT |
1748 | call .umul |
1749 | mov %i1, %o1 ! udelay_val | |
196bffa5 MF |
1750 | sethi %hi(0x028f4b62), %l0 ! Add in rounding constant * 2**32, |
1751 | or %g0, %lo(0x028f4b62), %l0 | |
1752 | addcc %o0, %l0, %o0 ! 2**32 * 0.009 999 | |
1753 | bcs,a 3f | |
1754 | add %o1, 0x01, %o1 | |
1755 | 3: | |
1da177e4 LT |
1756 | call .umul |
1757 | mov HZ, %o0 ! >>32 earlier for wider range | |
1758 | ||
1759 | delay_continue: | |
1760 | cmp %o0, 0x0 | |
1761 | 1: | |
1762 | bne 1b | |
1763 | subcc %o0, 1, %o0 | |
1764 | ||
1765 | ret | |
1766 | restore | |
1767 | ||
1768 | /* Handle a software breakpoint */ | |
1769 | /* We have to inform parent that child has stopped */ | |
1770 | .align 4 | |
1771 | .globl breakpoint_trap | |
1772 | breakpoint_trap: | |
1773 | rd %wim,%l3 | |
1774 | SAVE_ALL | |
1775 | wr %l0, PSR_ET, %psr | |
1776 | WRITE_PAUSE | |
1777 | ||
1778 | st %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls | |
1779 | call sparc_breakpoint | |
1780 | add %sp, STACKFRAME_SZ, %o0 | |
1781 | ||
1782 | RESTORE_ALL | |
1783 | ||
1784 | .align 4 | |
1785 | .globl __handle_exception, flush_patch_exception | |
1786 | __handle_exception: | |
1787 | flush_patch_exception: | |
1788 | FLUSH_ALL_KERNEL_WINDOWS; | |
1789 | ldd [%o0], %o6 | |
1790 | jmpl %o7 + 0xc, %g0 ! see asm-sparc/processor.h | |
1791 | mov 1, %g1 ! signal EFAULT condition | |
1792 | ||
1793 | .align 4 | |
1794 | .globl kill_user_windows, kuw_patch1_7win | |
1795 | .globl kuw_patch1 | |
1796 | kuw_patch1_7win: sll %o3, 6, %o3 | |
1797 | ||
1798 | /* No matter how much overhead this routine has in the worst | |
1799 | * case scenerio, it is several times better than taking the | |
1800 | * traps with the old method of just doing flush_user_windows(). | |
1801 | */ | |
1802 | kill_user_windows: | |
1803 | ld [%g6 + TI_UWINMASK], %o0 ! get current umask | |
1804 | orcc %g0, %o0, %g0 ! if no bits set, we are done | |
1805 | be 3f ! nothing to do | |
1806 | rd %psr, %o5 ! must clear interrupts | |
1807 | or %o5, PSR_PIL, %o4 ! or else that could change | |
1808 | wr %o4, 0x0, %psr ! the uwinmask state | |
1809 | WRITE_PAUSE ! burn them cycles | |
1810 | 1: | |
1811 | ld [%g6 + TI_UWINMASK], %o0 ! get consistent state | |
1812 | orcc %g0, %o0, %g0 ! did an interrupt come in? | |
1813 | be 4f ! yep, we are done | |
1814 | rd %wim, %o3 ! get current wim | |
1815 | srl %o3, 1, %o4 ! simulate a save | |
1816 | kuw_patch1: | |
1817 | sll %o3, 7, %o3 ! compute next wim | |
1818 | or %o4, %o3, %o3 ! result | |
1819 | andncc %o0, %o3, %o0 ! clean this bit in umask | |
1820 | bne kuw_patch1 ! not done yet | |
1821 | srl %o3, 1, %o4 ! begin another save simulation | |
1822 | wr %o3, 0x0, %wim ! set the new wim | |
1823 | st %g0, [%g6 + TI_UWINMASK] ! clear uwinmask | |
1824 | 4: | |
1825 | wr %o5, 0x0, %psr ! re-enable interrupts | |
1826 | WRITE_PAUSE ! burn baby burn | |
1827 | 3: | |
1828 | retl ! return | |
1829 | st %g0, [%g6 + TI_W_SAVED] ! no windows saved | |
1830 | ||
1831 | .align 4 | |
1832 | .globl restore_current | |
1833 | restore_current: | |
1834 | LOAD_CURRENT(g6, o0) | |
1835 | retl | |
1836 | nop | |
1837 | ||
1838 | #ifdef CONFIG_PCI | |
1839 | #include <asm/pcic.h> | |
1840 | ||
1841 | .align 4 | |
1842 | .globl linux_trap_ipi15_pcic | |
1843 | linux_trap_ipi15_pcic: | |
1844 | rd %wim, %l3 | |
1845 | SAVE_ALL | |
1846 | ||
1847 | /* | |
1848 | * First deactivate NMI | |
1849 | * or we cannot drop ET, cannot get window spill traps. | |
1850 | * The busy loop is necessary because the PIO error | |
1851 | * sometimes does not go away quickly and we trap again. | |
1852 | */ | |
1853 | sethi %hi(pcic_regs), %o1 | |
1854 | ld [%o1 + %lo(pcic_regs)], %o2 | |
1855 | ||
1856 | ! Get pending status for printouts later. | |
1857 | ld [%o2 + PCI_SYS_INT_PENDING], %o0 | |
1858 | ||
1859 | mov PCI_SYS_INT_PENDING_CLEAR_ALL, %o1 | |
1860 | stb %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR] | |
1861 | 1: | |
1862 | ld [%o2 + PCI_SYS_INT_PENDING], %o1 | |
1863 | andcc %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0 | |
1864 | bne 1b | |
1865 | nop | |
1866 | ||
1867 | or %l0, PSR_PIL, %l4 | |
1868 | wr %l4, 0x0, %psr | |
1869 | WRITE_PAUSE | |
1870 | wr %l4, PSR_ET, %psr | |
1871 | WRITE_PAUSE | |
1872 | ||
1873 | call pcic_nmi | |
1874 | add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs | |
1875 | RESTORE_ALL | |
1876 | ||
1877 | .globl pcic_nmi_trap_patch | |
1878 | pcic_nmi_trap_patch: | |
1879 | sethi %hi(linux_trap_ipi15_pcic), %l3 | |
1880 | jmpl %l3 + %lo(linux_trap_ipi15_pcic), %g0 | |
1881 | rd %psr, %l0 | |
1882 | .word 0 | |
1883 | ||
1884 | #endif /* CONFIG_PCI */ | |
1885 | ||
1886 | /* End of entry.S */ |