2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
11 #include <linux/linkage.h>
12 #include <asm/thread_info.h>
13 #include <linux/errno.h>
14 #include <asm/entry.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/registers.h>
17 #include <asm/unistd.h>
18 #include <asm/percpu.h>
19 #include <asm/signal.h>
21 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
36 andi r11, r11, ~MSR_IE
48 andi r11, r11, ~MSR_BIP
54 swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
55 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
56 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
59 brid 2f /* jump over */
60 addik r1, r1, (-PT_SIZE) /* room for pt_regs (delay slot) */
61 1: /* switch to kernel stack */
62 lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
63 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
64 /* calculate kernel stack pointer */
65 addik r1, r1, THREAD_SIZE - PT_SIZE
67 swi r11, r1, PT_MODE /* store the mode */
68 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
100 /* special purpose registers */
109 /* reload original stack pointer and save it */
110 lwi r11, r0, PER_CPU(ENTRY_SP)
112 /* update mode indicator we are in kernel mode */
114 swi r11, r0, PER_CPU(KM)
116 lwi r31, r0, PER_CPU(CURRENT_SAVE)
117 /* prepare the link register, the argument and jump */
118 la r15, r0, ret_from_intr - 8
127 lwi r6, r31, TS_THREAD_INFO /* get thread info */
128 lwi r19, r6, TI_FLAGS /* get flags in thread info */
129 /* do an extra work if any bits are set */
131 andi r11, r19, _TIF_NEED_RESCHED
135 1: andi r11, r19, _TIF_SIGPENDING
136 beqid r11, no_intr_reshed
139 bralid r15, do_signal
143 /* save mode indicator */
146 swi r11, r0, PER_CPU(KM)
149 swi r31, r0, PER_CPU(CURRENT_SAVE)
151 /* special purpose registers */
198 ENTRY(_user_exception)
199 swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
200 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
201 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
202 beqid r11, 1f /* Already in kernel mode? */
204 brid 2f /* jump over */
205 addik r1, r1, (-PT_SIZE) /* Room for pt_regs (delay slot) */
206 1: /* Switch to kernel stack */
207 lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
208 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
209 /* calculate kernel stack pointer */
210 addik r1, r1, THREAD_SIZE - PT_SIZE
212 swi r11, r1, PT_MODE /* store the mode */
213 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
214 /* save them on stack */
216 swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
217 swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
225 /* r12: _always_ in clobber list; see unistd.h */
228 /* r14: _always_ in clobber list; see unistd.h */
230 /* but we want to return to the next inst. */
232 swi r14, r1, PT_PC /* increment by 4 and store in pc */
252 nop /* make sure IE bit is in effect */
253 clear_bip /* once IE is in effect it is safe to clear BIP */
256 /* special purpose registers */
265 /* reload original stack pointer and save it */
266 lwi r11, r0, PER_CPU(ENTRY_SP)
268 /* update mode indicator we are in kernel mode */
270 swi r11, r0, PER_CPU(KM)
272 lwi r31, r0, PER_CPU(CURRENT_SAVE)
273 /* re-enable interrupts now we are in kernel mode */
276 /* See if the system call number is valid. */
277 addi r11, r12, -__NR_syscalls
278 bgei r11, 1f /* return to user if not valid */
279 /* Figure out which function to use for this system call. */
280 /* Note Microblaze barrel shift is optional, so don't rely on it */
281 add r12, r12, r12 /* convert num -> ptr */
283 lwi r12, r12, sys_call_table /* Get function pointer */
284 la r15, r0, ret_to_user-8 /* set return address */
285 bra r12 /* Make the system call. */
286 bri 0 /* won't reach here */
288 brid ret_to_user /* jump to syscall epilogue */
289 addi r3, r0, -ENOSYS /* set errno in delay slot */
292 * Debug traps are like a system call, but entered via brki r14, 0x60
293 * All we need to do is send the SIGTRAP signal to current, ptrace and do_signal
294 * will handle the rest
296 ENTRY(_debug_exception)
297 swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
298 lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
299 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
300 addik r1, r1, THREAD_SIZE - PT_SIZE /* get the kernel stack */
301 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
302 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
304 swi r11, r1, PT_MODE /* store the mode */
305 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
306 /* save them on stack */
308 swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
309 swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
317 /* r12: _always_ in clobber list; see unistd.h */
320 /* r14: _always_ in clobber list; see unistd.h */
322 swi r14, r1, PT_PC /* Will return to interrupted instruction */
342 nop /* make sure IE bit is in effect */
343 clear_bip /* once IE is in effect it is safe to clear BIP */
346 /* special purpose registers */
355 /* reload original stack pointer and save it */
356 lwi r11, r0, PER_CPU(ENTRY_SP)
358 /* update mode indicator we are in kernel mode */
360 swi r11, r0, PER_CPU(KM)
362 lwi r31, r0, PER_CPU(CURRENT_SAVE)
363 /* re-enable interrupts now we are in kernel mode */
366 addi r5, r0, SIGTRAP /* sending the trap signal */
367 add r6, r0, r31 /* to current */
369 add r7, r0, r0 /* 3rd param zero */
371 /* Restore r3/r4 to work around how ret_to_user works */
379 /* struct task_struct *_switch_to(struct thread_info *prev,
380 struct thread_info *next); */
382 /* prepare return value */
385 /* save registers in cpu_context */
386 /* use r11 and r12, volatile registers, as temp register */
387 addik r11, r5, TI_CPU_CONTEXT
390 /* skip volatile registers.
391 * they are saved on stack when we jumped to _switch_to() */
392 /* dedicated registers */
399 /* save non-volatile registers */
412 /* special purpose registers */
422 /* update r31, the current */
424 swi r31, r0, PER_CPU(CURRENT_SAVE)
426 /* get new process' cpu context and restore */
427 addik r11, r6, TI_CPU_CONTEXT
429 /* special purpose registers */
438 /* non-volatile registers */
451 /* dedicated registers */
458 /* skip volatile registers */
468 brlid r15, schedule_tail
470 swi r31, r1, PT_R31 /* save r31 in user context. */
471 /* will soon be restored to r31 in ret_to_user */
477 andi r11, r19, _TIF_NEED_RESCHED
481 1: andi r11, r19, _TIF_SIGPENDING
482 beqi r11, no_work_pending
485 bralid r15, do_signal
492 swi r4, r1, PT_R4 /* return val */
493 swi r3, r1, PT_R3 /* return val */
495 lwi r6, r31, TS_THREAD_INFO /* get thread info */
496 lwi r19, r6, TI_FLAGS /* get flags in thread info */
497 bnei r19, work_pending /* do an extra work if any bits are set */
502 swi r31, r0, PER_CPU(CURRENT_SAVE)
503 /* save mode indicator */
505 swi r18, r0, PER_CPU(KM)
507 /* special purpose registers */
544 lwi r4, r1, PT_R4 /* return val */
545 lwi r3, r1, PT_R3 /* return val */
553 brid microblaze_vfork
557 brid microblaze_clone
561 brid microblaze_execve
564 sys_rt_sigreturn_wrapper:
565 brid sys_rt_sigreturn
568 sys_rt_sigsuspend_wrapper:
569 brid sys_rt_sigsuspend
572 /* Interrupt vector table */
573 .section .init.ivt, "ax"
579 brai _hw_exception_handler
581 brai _debug_exception
584 #include "syscall_table.S"
586 syscall_table_size=(.-sys_call_table)