sh: interrupt exception handling rework
[deliverable/linux.git] / arch / sh / kernel / entry.S
1 /*
2 * linux/arch/sh/entry.S
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2003 - 2006 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 */
12 #include <linux/sys.h>
13 #include <linux/errno.h>
14 #include <linux/linkage.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/thread_info.h>
17 #include <asm/cpu/mmu_context.h>
18 #include <asm/unistd.h>
19
20 ! NOTE:
21 ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
22 ! to be jumped is too far, but it causes illegal slot exception.
23
24 /*
25 * entry.S contains the system-call and fault low-level handling routines.
26 * This also contains the timer-interrupt handler, as well as all interrupts
27 * and faults that can result in a task-switch.
28 *
29 * NOTE: This code handles signal-recognition, which happens every time
30 * after a timer-interrupt and after each system call.
31 *
32 * NOTE: This code uses a convention that instructions in the delay slot
33 * of a transfer-control instruction are indented by an extra space, thus:
34 *
35 * jmp @k0 ! control-transfer instruction
36 * ldc k1, ssr ! delay slot
37 *
38 * Stack layout in 'ret_from_syscall':
39 * ptrace needs to have all regs on the stack.
40 * if the order here is changed, it needs to be
41 * updated in ptrace.c and ptrace.h
42 *
43 * r0
44 * ...
45 * r15 = stack pointer
46 * spc
47 * pr
48 * ssr
49 * gbr
50 * mach
51 * macl
52 * syscall #
53 *
54 */
55 #if defined(CONFIG_KGDB_NMI)
56 NMI_VEC = 0x1c0 ! Must catch early for debounce
57 #endif
58
59 /* Offsets to the stack */
60 OFF_R0 = 0 /* Return value. New ABI also arg4 */
61 OFF_R1 = 4 /* New ABI: arg5 */
62 OFF_R2 = 8 /* New ABI: arg6 */
63 OFF_R3 = 12 /* New ABI: syscall_nr */
64 OFF_R4 = 16 /* New ABI: arg0 */
65 OFF_R5 = 20 /* New ABI: arg1 */
66 OFF_R6 = 24 /* New ABI: arg2 */
67 OFF_R7 = 28 /* New ABI: arg3 */
68 OFF_SP = (15*4)
69 OFF_PC = (16*4)
70 OFF_SR = (16*4+8)
71 OFF_TRA = (16*4+6*4)
72
73
74 #define k0 r0
75 #define k1 r1
76 #define k2 r2
77 #define k3 r3
78 #define k4 r4
79
80 #define g_imask r6 /* r6_bank1 */
81 #define k_g_imask r6_bank /* r6_bank1 */
82 #define current r7 /* r7_bank1 */
83
84 /*
85 * Kernel mode register usage:
86 * k0 scratch
87 * k1 scratch
88 * k2 scratch (Exception code)
89 * k3 scratch (Return address)
90 * k4 scratch
91 * k5 reserved
92 * k6 Global Interrupt Mask (0--15 << 4)
93 * k7 CURRENT_THREAD_INFO (pointer to current thread info)
94 */
95
96 !
97 ! TLB Miss / Initial Page write exception handling
98 ! _and_
99 ! TLB hits, but the access violate the protection.
100 ! It can be valid access, such as stack grow and/or C-O-W.
101 !
102 !
103 ! Find the pmd/pte entry and loadtlb
104 ! If it's not found, cause address error (SEGV)
105 !
106 ! Although this could be written in assembly language (and it'd be faster),
107 ! this first version depends *much* on C implementation.
108 !
109
110 #define CLI() \
111 stc sr, r0; \
112 or #0xf0, r0; \
113 ldc r0, sr
114
115 #define STI() \
116 mov.l __INV_IMASK, r11; \
117 stc sr, r10; \
118 and r11, r10; \
119 stc k_g_imask, r11; \
120 or r11, r10; \
121 ldc r10, sr
122
123 #if defined(CONFIG_PREEMPT)
124 # define preempt_stop() CLI()
125 #else
126 # define preempt_stop()
127 # define resume_kernel restore_all
128 #endif
129
130 #if defined(CONFIG_MMU)
131 .align 2
132 ENTRY(tlb_miss_load)
133 bra call_dpf
134 mov #0, r5
135
136 .align 2
137 ENTRY(tlb_miss_store)
138 bra call_dpf
139 mov #1, r5
140
141 .align 2
142 ENTRY(initial_page_write)
143 bra call_dpf
144 mov #1, r5
145
146 .align 2
147 ENTRY(tlb_protection_violation_load)
148 bra call_dpf
149 mov #0, r5
150
151 .align 2
152 ENTRY(tlb_protection_violation_store)
153 bra call_dpf
154 mov #1, r5
155
156 call_dpf:
157 mov.l 1f, r0
158 mov r5, r8
159 mov.l @r0, r6
160 mov r6, r9
161 mov.l 2f, r0
162 sts pr, r10
163 jsr @r0
164 mov r15, r4
165 !
166 tst r0, r0
167 bf/s 0f
168 lds r10, pr
169 rts
170 nop
171 0: STI()
172 mov.l 3f, r0
173 mov r9, r6
174 mov r8, r5
175 jmp @r0
176 mov r15, r4
177
178 .align 2
179 1: .long MMU_TEA
180 2: .long __do_page_fault
181 3: .long do_page_fault
182
183 .align 2
184 ENTRY(address_error_load)
185 bra call_dae
186 mov #0,r5 ! writeaccess = 0
187
188 .align 2
189 ENTRY(address_error_store)
190 bra call_dae
191 mov #1,r5 ! writeaccess = 1
192
193 .align 2
194 call_dae:
195 mov.l 1f, r0
196 mov.l @r0, r6 ! address
197 mov.l 2f, r0
198 jmp @r0
199 mov r15, r4 ! regs
200
201 .align 2
202 1: .long MMU_TEA
203 2: .long do_address_error
204 #endif /* CONFIG_MMU */
205
206 #if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
207 ! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present.
208 ! If both are configured, handle the debug traps (breakpoints) in SW,
209 ! but still allow BIOS traps to FW.
210
211 .align 2
212 debug_kernel:
213 #if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB)
214 /* Force BIOS call to FW (debug_trap put TRA in r8) */
215 mov r8,r0
216 shlr2 r0
217 cmp/eq #0x3f,r0
218 bt debug_kernel_fw
219 #endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */
220
221 debug_enter:
222 #if defined(CONFIG_SH_KGDB)
223 /* Jump to kgdb, pass stacked regs as arg */
224 debug_kernel_sw:
225 mov.l 3f, r0
226 jmp @r0
227 mov r15, r4
228 .align 2
229 3: .long kgdb_handle_exception
230 #endif /* CONFIG_SH_KGDB */
231
232 #if defined(CONFIG_SH_STANDARD_BIOS)
233 /* Unwind the stack and jmp to the debug entry */
234 debug_kernel_fw:
235 mov.l @r15+, r0
236 mov.l @r15+, r1
237 mov.l @r15+, r2
238 mov.l @r15+, r3
239 mov.l @r15+, r4
240 mov.l @r15+, r5
241 mov.l @r15+, r6
242 mov.l @r15+, r7
243 stc sr, r8
244 mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F
245 or r9, r8
246 ldc r8, sr ! here, change the register bank
247 mov.l @r15+, r8
248 mov.l @r15+, r9
249 mov.l @r15+, r10
250 mov.l @r15+, r11
251 mov.l @r15+, r12
252 mov.l @r15+, r13
253 mov.l @r15+, r14
254 mov.l @r15+, k0
255 ldc.l @r15+, spc
256 lds.l @r15+, pr
257 mov.l @r15+, k1
258 ldc.l @r15+, gbr
259 lds.l @r15+, mach
260 lds.l @r15+, macl
261 mov k0, r15
262 !
263 mov.l 2f, k0
264 mov.l @k0, k0
265 jmp @k0
266 ldc k1, ssr
267 .align 2
268 1: .long 0x300000f0
269 2: .long gdb_vbr_vector
270 #endif /* CONFIG_SH_STANDARD_BIOS */
271
272 #endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */
273
274
275 .align 2
276 debug_trap:
277 #if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
278 mov #OFF_SR, r0
279 mov.l @(r0,r15), r0 ! get status register
280 shll r0
281 shll r0 ! kernel space?
282 bt/s debug_kernel
283 #endif
284 mov.l @r15, r0 ! Restore R0 value
285 mov.l 1f, r8
286 jmp @r8
287 nop
288
289 .align 2
290 ENTRY(exception_error)
291 !
292 STI()
293 mov.l 2f, r0
294 jmp @r0
295 nop
296
297 !
298 .align 2
299 1: .long break_point_trap_software
300 2: .long do_exception_error
301
302 .align 2
303 ret_from_exception:
304 preempt_stop()
305 ENTRY(ret_from_irq)
306 !
307 mov #OFF_SR, r0
308 mov.l @(r0,r15), r0 ! get status register
309 shll r0
310 shll r0 ! kernel space?
311 bt/s resume_kernel ! Yes, it's from kernel, go back soon
312 GET_THREAD_INFO(r8)
313
314 #ifdef CONFIG_PREEMPT
315 bra resume_userspace
316 nop
317 ENTRY(resume_kernel)
318 mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count
319 tst r0, r0
320 bf noresched
321 need_resched:
322 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
323 tst #_TIF_NEED_RESCHED, r0 ! need_resched set?
324 bt noresched
325
326 mov #OFF_SR, r0
327 mov.l @(r0,r15), r0 ! get status register
328 and #0xf0, r0 ! interrupts off (exception path)?
329 cmp/eq #0xf0, r0
330 bt noresched
331
332 mov.l 1f, r0
333 mov.l r0, @(TI_PRE_COUNT,r8)
334
335 STI()
336 mov.l 2f, r0
337 jsr @r0
338 nop
339 mov #0, r0
340 mov.l r0, @(TI_PRE_COUNT,r8)
341 CLI()
342
343 bra need_resched
344 nop
345 noresched:
346 bra restore_all
347 nop
348
349 .align 2
350 1: .long PREEMPT_ACTIVE
351 2: .long schedule
352 #endif
353
354 ENTRY(resume_userspace)
355 ! r8: current_thread_info
356 CLI()
357 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
358 tst #_TIF_WORK_MASK, r0
359 bt/s restore_all
360 tst #_TIF_NEED_RESCHED, r0
361
362 .align 2
363 work_pending:
364 ! r0: current_thread_info->flags
365 ! r8: current_thread_info
366 ! t: result of "tst #_TIF_NEED_RESCHED, r0"
367 bf/s work_resched
368 tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0
369 work_notifysig:
370 bt/s restore_all
371 mov r15, r4
372 mov r12, r5 ! set arg1(save_r0)
373 mov r0, r6
374 mov.l 2f, r1
375 mova restore_all, r0
376 jmp @r1
377 lds r0, pr
378 work_resched:
379 #ifndef CONFIG_PREEMPT
380 ! gUSA handling
381 mov.l @(OFF_SP,r15), r0 ! get user space stack pointer
382 mov r0, r1
383 shll r0
384 bf/s 1f
385 shll r0
386 bf/s 1f
387 mov #OFF_PC, r0
388 ! SP >= 0xc0000000 : gUSA mark
389 mov.l @(r0,r15), r2 ! get user space PC (program counter)
390 mov.l @(OFF_R0,r15), r3 ! end point
391 cmp/hs r3, r2 ! r2 >= r3?
392 bt 1f
393 add r3, r1 ! rewind point #2
394 mov.l r1, @(r0,r15) ! reset PC to rewind point #2
395 !
396 1:
397 #endif
398 mov.l 1f, r1
399 jsr @r1 ! schedule
400 nop
401 CLI()
402 !
403 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
404 tst #_TIF_WORK_MASK, r0
405 bt restore_all
406 bra work_pending
407 tst #_TIF_NEED_RESCHED, r0
408
409 .align 2
410 1: .long schedule
411 2: .long do_notify_resume
412
413 .align 2
414 syscall_exit_work:
415 ! r0: current_thread_info->flags
416 ! r8: current_thread_info
417 tst #_TIF_SYSCALL_TRACE, r0
418 bt/s work_pending
419 tst #_TIF_NEED_RESCHED, r0
420 STI()
421 ! XXX setup arguments...
422 mov.l 4f, r0 ! do_syscall_trace
423 jsr @r0
424 nop
425 bra resume_userspace
426 nop
427
428 .align 2
429 syscall_trace_entry:
430 ! Yes it is traced.
431 ! XXX setup arguments...
432 mov.l 4f, r11 ! Call do_syscall_trace which notifies
433 jsr @r11 ! superior (will chomp R[0-7])
434 nop
435 ! Reload R0-R4 from kernel stack, where the
436 ! parent may have modified them using
437 ! ptrace(POKEUSR). (Note that R0-R2 are
438 ! used by the system call handler directly
439 ! from the kernel stack anyway, so don't need
440 ! to be reloaded here.) This allows the parent
441 ! to rewrite system calls and args on the fly.
442 mov.l @(OFF_R4,r15), r4 ! arg0
443 mov.l @(OFF_R5,r15), r5
444 mov.l @(OFF_R6,r15), r6
445 mov.l @(OFF_R7,r15), r7 ! arg3
446 mov.l @(OFF_R3,r15), r3 ! syscall_nr
447 ! Arrange for do_syscall_trace to be called
448 ! again as the system call returns.
449 mov.l 2f, r10 ! Number of syscalls
450 cmp/hs r10, r3
451 bf syscall_call
452 mov #-ENOSYS, r0
453 bra syscall_exit
454 mov.l r0, @(OFF_R0,r15) ! Return value
455
456 /*
457 * Syscall interface:
458 *
459 * Syscall #: R3
460 * Arguments #0 to #3: R4--R7
461 * Arguments #4 to #6: R0, R1, R2
462 * TRA: (number of arguments + 0x10) x 4
463 *
464 * This code also handles delegating other traps to the BIOS/gdb stub
465 * according to:
466 *
467 * Trap number
468 * (TRA>>2) Purpose
469 * -------- -------
470 * 0x0-0xf old syscall ABI
471 * 0x10-0x1f new syscall ABI
472 * 0x20-0xff delegated through debug_trap to BIOS/gdb stub.
473 *
474 * Note: When we're first called, the TRA value must be shifted
475 * right 2 bits in order to get the value that was used as the "trapa"
476 * argument.
477 */
478
479 .align 2
480 .globl ret_from_fork
481 ret_from_fork:
482 mov.l 1f, r8
483 jsr @r8
484 mov r0, r4
485 bra syscall_exit
486 nop
487 .align 2
488 1: .long schedule_tail
489 !
490 ENTRY(system_call)
491 mov.l 1f, r9
492 mov.l @r9, r8 ! Read from TRA (Trap Address) Register
493 !
494 ! Is the trap argument >= 0x20? (TRA will be >= 0x80)
495 mov #0x7f, r9
496 cmp/hi r9, r8
497 bt/s 0f
498 mov #OFF_TRA, r9
499 add r15, r9
500 !
501 mov.l r8, @r9 ! set TRA value to tra
502 STI()
503 ! Call the system call handler through the table.
504 ! First check for bad syscall number
505 mov r3, r9
506 mov.l 2f, r8 ! Number of syscalls
507 cmp/hs r8, r9
508 bf/s good_system_call
509 GET_THREAD_INFO(r8)
510 syscall_badsys: ! Bad syscall number
511 mov #-ENOSYS, r0
512 bra resume_userspace
513 mov.l r0, @(OFF_R0,r15) ! Return value
514 !
515 0:
516 bra debug_trap
517 nop
518 !
519 good_system_call: ! Good syscall number
520 mov.l @(TI_FLAGS,r8), r8
521 mov #_TIF_SYSCALL_TRACE, r10
522 tst r10, r8
523 bf syscall_trace_entry
524 !
525 syscall_call:
526 shll2 r9 ! x4
527 mov.l 3f, r8 ! Load the address of sys_call_table
528 add r8, r9
529 mov.l @r9, r8
530 jsr @r8 ! jump to specific syscall handler
531 nop
532 mov.l @(OFF_R0,r15), r12 ! save r0
533 mov.l r0, @(OFF_R0,r15) ! save the return value
534 !
535 syscall_exit:
536 CLI()
537 !
538 GET_THREAD_INFO(r8)
539 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
540 tst #_TIF_ALLWORK_MASK, r0
541 bf syscall_exit_work
542 restore_all:
543 mov.l @r15+, r0
544 mov.l @r15+, r1
545 mov.l @r15+, r2
546 mov.l @r15+, r3
547 mov.l @r15+, r4
548 mov.l @r15+, r5
549 mov.l @r15+, r6
550 mov.l @r15+, r7
551 !
552 stc sr, r8
553 mov.l 7f, r9
554 or r9, r8 ! BL =1, RB=1
555 ldc r8, sr ! here, change the register bank
556 !
557 mov.l @r15+, r8
558 mov.l @r15+, r9
559 mov.l @r15+, r10
560 mov.l @r15+, r11
561 mov.l @r15+, r12
562 mov.l @r15+, r13
563 mov.l @r15+, r14
564 mov.l @r15+, k4 ! original stack pointer
565 ldc.l @r15+, spc
566 lds.l @r15+, pr
567 mov.l @r15+, k3 ! original SR
568 ldc.l @r15+, gbr
569 lds.l @r15+, mach
570 lds.l @r15+, macl
571 add #4, r15 ! Skip syscall number
572 !
573 #ifdef CONFIG_SH_DSP
574 mov.l @r15+, k0 ! DSP mode marker
575 mov.l 5f, k1
576 cmp/eq k0, k1 ! Do we have a DSP stack frame?
577 bf skip_restore
578
579 stc sr, k0 ! Enable CPU DSP mode
580 or k1, k0 ! (within kernel it may be disabled)
581 ldc k0, sr
582 mov r2, k0 ! Backup r2
583
584 ! Restore DSP registers from stack
585 mov r15, r2
586 movs.l @r2+, a1
587 movs.l @r2+, a0g
588 movs.l @r2+, a1g
589 movs.l @r2+, m0
590 movs.l @r2+, m1
591 mov r2, r15
592
593 lds.l @r15+, a0
594 lds.l @r15+, x0
595 lds.l @r15+, x1
596 lds.l @r15+, y0
597 lds.l @r15+, y1
598 lds.l @r15+, dsr
599 ldc.l @r15+, rs
600 ldc.l @r15+, re
601 ldc.l @r15+, mod
602
603 mov k0, r2 ! Restore r2
604 skip_restore:
605 #endif
606 !
607 ! Calculate new SR value
608 mov k3, k2 ! original SR value
609 mov.l 9f, k1
610 and k1, k2 ! Mask orignal SR value
611 !
612 mov k3, k0 ! Calculate IMASK-bits
613 shlr2 k0
614 and #0x3c, k0
615 cmp/eq #0x3c, k0
616 bt/s 6f
617 shll2 k0
618 mov g_imask, k0
619 !
620 6: or k0, k2 ! Set the IMASK-bits
621 ldc k2, ssr
622 !
623 #if defined(CONFIG_KGDB_NMI)
624 ! Clear in_nmi
625 mov.l 6f, k0
626 mov #0, k1
627 mov.b k1, @k0
628 #endif
629 mov.l @r15+, k2 ! restore EXPEVT
630 mov k4, r15
631 rte
632 nop
633
634 .align 2
635 1: .long TRA
636 2: .long NR_syscalls
637 3: .long sys_call_table
638 4: .long do_syscall_trace
639 5: .long 0x00001000 ! DSP
640 7: .long 0x30000000
641 9:
642 __INV_IMASK:
643 .long 0xffffff0f ! ~(IMASK)
644
645 ! Exception Vector Base
646 !
647 ! Should be aligned page boundary.
648 !
649 .balign 4096,0,4096
650 ENTRY(vbr_base)
651 .long 0
652 !
653 .balign 256,0,256
654 general_exception:
655 mov.l 1f, k2
656 mov.l 2f, k3
657 bra handle_exception
658 mov.l @k2, k2
659 .align 2
660 1: .long EXPEVT
661 2: .long ret_from_exception
662 !
663 !
664 .balign 1024,0,1024
665 tlb_miss:
666 mov.l 1f, k2
667 mov.l 4f, k3
668 bra handle_exception
669 mov.l @k2, k2
670 !
671 .balign 512,0,512
672 interrupt:
673 mov.l 2f, k2
674 mov.l 3f, k3
675 #if defined(CONFIG_KGDB_NMI)
676 ! Debounce (filter nested NMI)
677 mov.l @k2, k0
678 mov.l 5f, k1
679 cmp/eq k1, k0
680 bf 0f
681 mov.l 6f, k1
682 tas.b @k1
683 bt 0f
684 rte
685 nop
686 .align 2
687 5: .long NMI_VEC
688 6: .long in_nmi
689 0:
690 #endif /* defined(CONFIG_KGDB_NMI) */
691 bra handle_exception
692 mov #-1, k2 ! interrupt exception marker
693
694 .align 2
695 1: .long EXPEVT
696 2: .long INTEVT
697 3: .long ret_from_irq
698 4: .long ret_from_exception
699
700 !
701 !
702 .align 2
703 ENTRY(handle_exception)
704 ! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
705 ! save all registers onto stack.
706 !
707 stc ssr, k0 ! Is it from kernel space?
708 shll k0 ! Check MD bit (bit30) by shifting it into...
709 shll k0 ! ...the T bit
710 bt/s 1f ! It's a kernel to kernel transition.
711 mov r15, k0 ! save original stack to k0
712 /* User space to kernel */
713 mov #(THREAD_SIZE >> 8), k1
714 shll8 k1 ! k1 := THREAD_SIZE
715 add current, k1
716 mov k1, r15 ! change to kernel stack
717 !
718 1: mov.l 2f, k1
719 !
720 #ifdef CONFIG_SH_DSP
721 mov.l r2, @-r15 ! Save r2, we need another reg
722 stc sr, k4
723 mov.l 1f, r2
724 tst r2, k4 ! Check if in DSP mode
725 mov.l @r15+, r2 ! Restore r2 now
726 bt/s skip_save
727 mov #0, k4 ! Set marker for no stack frame
728
729 mov r2, k4 ! Backup r2 (in k4) for later
730
731 ! Save DSP registers on stack
732 stc.l mod, @-r15
733 stc.l re, @-r15
734 stc.l rs, @-r15
735 sts.l dsr, @-r15
736 sts.l y1, @-r15
737 sts.l y0, @-r15
738 sts.l x1, @-r15
739 sts.l x0, @-r15
740 sts.l a0, @-r15
741
742 ! GAS is broken, does not generate correct "movs.l Ds,@-As" instr.
743
744 ! FIXME: Make sure that this is still the case with newer toolchains,
745 ! as we're not at all interested in supporting ancient toolchains at
746 ! this point. -- PFM.
747
748 mov r15, r2
749 .word 0xf653 ! movs.l a1, @-r2
750 .word 0xf6f3 ! movs.l a0g, @-r2
751 .word 0xf6d3 ! movs.l a1g, @-r2
752 .word 0xf6c3 ! movs.l m0, @-r2
753 .word 0xf6e3 ! movs.l m1, @-r2
754 mov r2, r15
755
756 mov k4, r2 ! Restore r2
757 mov.l 1f, k4 ! Force DSP stack frame
758 skip_save:
759 mov.l k4, @-r15 ! Push DSP mode marker onto stack
760 #endif
761 ! Save the user registers on the stack.
762 mov.l k2, @-r15 ! EXPEVT
763
764 mov #-1, k4
765 mov.l k4, @-r15 ! set TRA (default: -1)
766 !
767 sts.l macl, @-r15
768 sts.l mach, @-r15
769 stc.l gbr, @-r15
770 stc.l ssr, @-r15
771 sts.l pr, @-r15
772 stc.l spc, @-r15
773 !
774 lds k3, pr ! Set the return address to pr
775 !
776 mov.l k0, @-r15 ! save orignal stack
777 mov.l r14, @-r15
778 mov.l r13, @-r15
779 mov.l r12, @-r15
780 mov.l r11, @-r15
781 mov.l r10, @-r15
782 mov.l r9, @-r15
783 mov.l r8, @-r15
784 !
785 stc sr, r8 ! Back to normal register bank, and
786 or k1, r8 ! Block all interrupts
787 mov.l 3f, k1
788 and k1, r8 ! ...
789 ldc r8, sr ! ...changed here.
790 !
791 mov.l r7, @-r15
792 mov.l r6, @-r15
793 mov.l r5, @-r15
794 mov.l r4, @-r15
795 mov.l r3, @-r15
796 mov.l r2, @-r15
797 mov.l r1, @-r15
798 mov.l r0, @-r15
799
800 /*
801 * This gets a bit tricky.. in the INTEVT case we don't want to use
802 * the VBR offset as a destination in the jump call table, since all
803 * of the destinations are the same. In this case, (interrupt) sets
804 * a marker in r2 (now r2_bank since SR.RB changed), which we check
805 * to determine the exception type. For all other exceptions, we
806 * forcibly read EXPEVT from memory and fix up the jump address, in
807 * the interrupt exception case we jump to do_IRQ() and defer the
808 * INTEVT read until there. As a bonus, we can also clean up the SR.RB
809 * checks that do_IRQ() was doing..
810 */
811 stc r2_bank, r8
812 cmp/pz r8
813 bf interrupt_exception
814 shlr2 r8
815 shlr r8
816 mov.l 4f, r9
817 add r8, r9
818 mov.l @r9, r9
819 jmp @r9
820 nop
821 rts
822 nop
823
824 .align 2
825 1: .long 0x00001000 ! DSP=1
826 2: .long 0x000080f0 ! FD=1, IMASK=15
827 3: .long 0xcfffffff ! RB=0, BL=0
828 4: .long exception_handling_table
829
830 interrupt_exception:
831 mov.l 1f, r9
832 jmp @r9
833 nop
834 rts
835 nop
836
837 .align 2
838 1: .long do_IRQ
839
840 .align 2
841 ENTRY(exception_none)
842 rts
843 nop
This page took 0.051523 seconds and 5 git commands to generate.