microblaze: Remove PER_CPU(KM) variable
[deliverable/linux.git] / arch / microblaze / kernel / entry.S
1 /*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18 #include <linux/sys.h>
19 #include <linux/linkage.h>
20
21 #include <asm/entry.h>
22 #include <asm/current.h>
23 #include <asm/processor.h>
24 #include <asm/exceptions.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/thread_info.h>
27
28 #include <asm/page.h>
29 #include <asm/unistd.h>
30
31 #include <linux/errno.h>
32 #include <asm/signal.h>
33
34 #undef DEBUG
35
36 /* The size of a state save frame. */
37 #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39 /* The offset of the struct pt_regs in a `state save frame' on the stack. */
40 #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42 #define C_ENTRY(name) .globl name; .align 4; name
43
44 /*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
51 msrclr r11, MSR_BIP
52 nop
53 .endm
54
55 .macro set_bip
56 msrset r11, MSR_BIP
57 nop
58 .endm
59
60 .macro clear_eip
61 msrclr r11, MSR_EIP
62 nop
63 .endm
64
65 .macro set_ee
66 msrset r11, MSR_EE
67 nop
68 .endm
69
70 .macro disable_irq
71 msrclr r11, MSR_IE
72 nop
73 .endm
74
75 .macro enable_irq
76 msrset r11, MSR_IE
77 nop
78 .endm
79
80 .macro set_ums
81 msrset r11, MSR_UMS
82 nop
83 msrclr r11, MSR_VMS
84 nop
85 .endm
86
87 .macro set_vms
88 msrclr r11, MSR_UMS
89 nop
90 msrset r11, MSR_VMS
91 nop
92 .endm
93
94 .macro clear_vms_ums
95 msrclr r11, MSR_VMS | MSR_UMS
96 nop
97 .endm
98 #else
99 .macro clear_bip
100 mfs r11, rmsr
101 nop
102 andi r11, r11, ~MSR_BIP
103 mts rmsr, r11
104 nop
105 .endm
106
107 .macro set_bip
108 mfs r11, rmsr
109 nop
110 ori r11, r11, MSR_BIP
111 mts rmsr, r11
112 nop
113 .endm
114
115 .macro clear_eip
116 mfs r11, rmsr
117 nop
118 andi r11, r11, ~MSR_EIP
119 mts rmsr, r11
120 nop
121 .endm
122
123 .macro set_ee
124 mfs r11, rmsr
125 nop
126 ori r11, r11, MSR_EE
127 mts rmsr, r11
128 nop
129 .endm
130
131 .macro disable_irq
132 mfs r11, rmsr
133 nop
134 andi r11, r11, ~MSR_IE
135 mts rmsr, r11
136 nop
137 .endm
138
139 .macro enable_irq
140 mfs r11, rmsr
141 nop
142 ori r11, r11, MSR_IE
143 mts rmsr, r11
144 nop
145 .endm
146
147 .macro set_ums
148 mfs r11, rmsr
149 nop
150 ori r11, r11, MSR_VMS
151 andni r11, r11, MSR_UMS
152 mts rmsr, r11
153 nop
154 .endm
155
156 .macro set_vms
157 mfs r11, rmsr
158 nop
159 ori r11, r11, MSR_VMS
160 andni r11, r11, MSR_UMS
161 mts rmsr, r11
162 nop
163 .endm
164
165 .macro clear_vms_ums
166 mfs r11, rmsr
167 nop
168 andni r11, r11, (MSR_VMS|MSR_UMS)
169 mts rmsr,r11
170 nop
171 .endm
172 #endif
173
174 /* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
177 */
178
179 /* turn on virtual protected mode save */
180 #define VM_ON \
181 set_ums; \
182 rted r0, 2f; \
183 nop; \
184 2:
185
186 /* turn off virtual protected mode save and user mode save*/
187 #define VM_OFF \
188 clear_vms_ums; \
189 rted r0, TOPHYS(1f); \
190 nop; \
191 1:
192
193 #define SAVE_REGS \
194 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
195 swi r3, r1, PTO+PT_R3; \
196 swi r4, r1, PTO+PT_R4; \
197 swi r5, r1, PTO+PT_R5; \
198 swi r6, r1, PTO+PT_R6; \
199 swi r7, r1, PTO+PT_R7; \
200 swi r8, r1, PTO+PT_R8; \
201 swi r9, r1, PTO+PT_R9; \
202 swi r10, r1, PTO+PT_R10; \
203 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
204 swi r12, r1, PTO+PT_R12; \
205 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
206 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
207 swi r15, r1, PTO+PT_R15; /* Save LP */ \
208 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
209 swi r19, r1, PTO+PT_R19; \
210 swi r20, r1, PTO+PT_R20; \
211 swi r21, r1, PTO+PT_R21; \
212 swi r22, r1, PTO+PT_R22; \
213 swi r23, r1, PTO+PT_R23; \
214 swi r24, r1, PTO+PT_R24; \
215 swi r25, r1, PTO+PT_R25; \
216 swi r26, r1, PTO+PT_R26; \
217 swi r27, r1, PTO+PT_R27; \
218 swi r28, r1, PTO+PT_R28; \
219 swi r29, r1, PTO+PT_R29; \
220 swi r30, r1, PTO+PT_R30; \
221 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
222 mfs r11, rmsr; /* save MSR */ \
223 nop; \
224 swi r11, r1, PTO+PT_MSR;
225
226 #define RESTORE_REGS \
227 lwi r11, r1, PTO+PT_MSR; \
228 mts rmsr , r11; \
229 nop; \
230 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
231 lwi r3, r1, PTO+PT_R3; \
232 lwi r4, r1, PTO+PT_R4; \
233 lwi r5, r1, PTO+PT_R5; \
234 lwi r6, r1, PTO+PT_R6; \
235 lwi r7, r1, PTO+PT_R7; \
236 lwi r8, r1, PTO+PT_R8; \
237 lwi r9, r1, PTO+PT_R9; \
238 lwi r10, r1, PTO+PT_R10; \
239 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
240 lwi r12, r1, PTO+PT_R12; \
241 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
242 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
243 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
244 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
245 lwi r19, r1, PTO+PT_R19; \
246 lwi r20, r1, PTO+PT_R20; \
247 lwi r21, r1, PTO+PT_R21; \
248 lwi r22, r1, PTO+PT_R22; \
249 lwi r23, r1, PTO+PT_R23; \
250 lwi r24, r1, PTO+PT_R24; \
251 lwi r25, r1, PTO+PT_R25; \
252 lwi r26, r1, PTO+PT_R26; \
253 lwi r27, r1, PTO+PT_R27; \
254 lwi r28, r1, PTO+PT_R28; \
255 lwi r29, r1, PTO+PT_R29; \
256 lwi r30, r1, PTO+PT_R30; \
257 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
258
259 .text
260
261 /*
262 * User trap.
263 *
264 * System calls are handled here.
265 *
266 * Syscall protocol:
267 * Syscall number in r12, args in r5-r10
268 * Return value in r3
269 *
270 * Trap entered via brki instruction, so BIP bit is set, and interrupts
271 * are masked. This is nice, means we don't have to CLI before state save
272 */
273 C_ENTRY(_user_exception):
274 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
275 addi r14, r14, 4 /* return address is 4 byte after call */
276 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
277
278 mfs r11, rmsr
279 nop
280 andi r11, r11, MSR_UMS
281 bnei r11, 1f
282
283 /* Kernel-mode state save - kernel execve */
284 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
285 tophys(r1,r11);
286 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
287 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
288
289 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
290 SAVE_REGS
291
292 addi r11, r0, 1; /* Was in kernel-mode. */
293 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
294 brid 2f;
295 nop; /* Fill delay slot */
296
297 /* User-mode state save. */
298 1:
299 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
300 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
301 tophys(r1,r1);
302 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
303 /* calculate kernel stack pointer from task struct 8k */
304 addik r1, r1, THREAD_SIZE;
305 tophys(r1,r1);
306
307 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
308 SAVE_REGS
309
310 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
311 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
312 swi r11, r1, PTO+PT_R1; /* Store user SP. */
313 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
314 /* Save away the syscall number. */
315 swi r12, r1, PTO+PT_R0;
316 tovirt(r1,r1)
317
318 /* where the trap should return need -8 to adjust for rtsd r15, 8*/
319 /* Jump to the appropriate function for the system call number in r12
320 * (r12 is not preserved), or return an error if r12 is not valid. The LP
321 * register should point to the location where
322 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
323
324 # Step into virtual mode.
325 set_vms;
326 addik r11, r0, 3f
327 rtid r11, 0
328 nop
329 3:
330 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
331 lwi r11, r11, TI_FLAGS /* get flags in thread info */
332 andi r11, r11, _TIF_WORK_SYSCALL_MASK
333 beqi r11, 4f
334
335 addik r3, r0, -ENOSYS
336 swi r3, r1, PTO + PT_R3
337 brlid r15, do_syscall_trace_enter
338 addik r5, r1, PTO + PT_R0
339
340 # do_syscall_trace_enter returns the new syscall nr.
341 addk r12, r0, r3
342 lwi r5, r1, PTO+PT_R5;
343 lwi r6, r1, PTO+PT_R6;
344 lwi r7, r1, PTO+PT_R7;
345 lwi r8, r1, PTO+PT_R8;
346 lwi r9, r1, PTO+PT_R9;
347 lwi r10, r1, PTO+PT_R10;
348 4:
349 /* Jump to the appropriate function for the system call number in r12
350 * (r12 is not preserved), or return an error if r12 is not valid.
351 * The LP register should point to the location where the called function
352 * should return. [note that MAKE_SYS_CALL uses label 1] */
353 /* See if the system call number is valid */
354 addi r11, r12, -__NR_syscalls;
355 bgei r11,5f;
356 /* Figure out which function to use for this system call. */
357 /* Note Microblaze barrel shift is optional, so don't rely on it */
358 add r12, r12, r12; /* convert num -> ptr */
359 add r12, r12, r12;
360
361 #ifdef DEBUG
362 /* Trac syscalls and stored them to r0_ram */
363 lwi r3, r12, 0x400 + r0_ram
364 addi r3, r3, 1
365 swi r3, r12, 0x400 + r0_ram
366 #endif
367
368 # Find and jump into the syscall handler.
369 lwi r12, r12, sys_call_table
370 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
371 la r15, r0, ret_from_trap-8
372 bra r12
373
374 /* The syscall number is invalid, return an error. */
375 5:
376 addi r3, r0, -ENOSYS;
377 rtsd r15,8; /* looks like a normal subroutine return */
378 or r0, r0, r0
379
380
381 /* Entry point used to return from a syscall/trap */
382 /* We re-enable BIP bit before state restore */
383 C_ENTRY(ret_from_trap):
384 set_bip; /* Ints masked for state restore*/
385 swi r3, r1, PTO + PT_R3
386 swi r4, r1, PTO + PT_R4
387
388 lwi r11, r1, PTO+PT_MODE;
389 /* See if returning to kernel mode, if so, skip resched &c. */
390 bnei r11, 2f;
391 /* We're returning to user mode, so check for various conditions that
392 * trigger rescheduling. */
393 /* FIXME: Restructure all these flag checks. */
394 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
395 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
396 andi r11, r11, _TIF_WORK_SYSCALL_MASK
397 beqi r11, 1f
398
399 brlid r15, do_syscall_trace_leave
400 addik r5, r1, PTO + PT_R0
401 1:
402 /* We're returning to user mode, so check for various conditions that
403 * trigger rescheduling. */
404 /* get thread info from current task */
405 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
406 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
407 andi r11, r11, _TIF_NEED_RESCHED;
408 beqi r11, 5f;
409
410 bralid r15, schedule; /* Call scheduler */
411 nop; /* delay slot */
412
413 /* Maybe handle a signal */
414 5: /* get thread info from current task*/
415 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
416 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
417 andi r11, r11, _TIF_SIGPENDING;
418 beqi r11, 1f; /* Signals to handle, handle them */
419
420 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
421 addi r7, r0, 1; /* Arg 3: int in_syscall */
422 bralid r15, do_signal; /* Handle any signals */
423 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
424
425 /* Finally, return to user state. */
426 1:
427 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
428 VM_OFF;
429 tophys(r1,r1);
430 RESTORE_REGS;
431 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
432 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
433 bri 6f;
434
435 /* Return to kernel state. */
436 2: VM_OFF;
437 tophys(r1,r1);
438 RESTORE_REGS;
439 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
440 tovirt(r1,r1);
441 6:
442 TRAP_return: /* Make global symbol for debugging */
443 rtbd r14, 0; /* Instructions to return from an IRQ */
444 nop;
445
446
447 /* These syscalls need access to the struct pt_regs on the stack, so we
448 implement them in assembly (they're basically all wrappers anyway). */
449
450 C_ENTRY(sys_fork_wrapper):
451 addi r5, r0, SIGCHLD /* Arg 0: flags */
452 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
453 la r7, r1, PTO /* Arg 2: parent context */
454 add r8. r0, r0 /* Arg 3: (unused) */
455 add r9, r0, r0; /* Arg 4: (unused) */
456 add r10, r0, r0; /* Arg 5: (unused) */
457 brid do_fork /* Do real work (tail-call) */
458 nop;
459
460 /* This the initial entry point for a new child thread, with an appropriate
461 stack in place that makes it look the the child is in the middle of an
462 syscall. This function is actually `returned to' from switch_thread
463 (copy_thread makes ret_from_fork the return address in each new thread's
464 saved context). */
465 C_ENTRY(ret_from_fork):
466 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
467 add r3, r5, r0; /* switch_thread returns the prev task */
468 /* ( in the delay slot ) */
469 add r3, r0, r0; /* Child's fork call should return 0. */
470 brid ret_from_trap; /* Do normal trap return */
471 nop;
472
473 C_ENTRY(sys_vfork):
474 brid microblaze_vfork /* Do real work (tail-call) */
475 la r5, r1, PTO
476
477 C_ENTRY(sys_clone):
478 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
479 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
480 1: add r10, r0, r9; /* Arg 6: (child_tidptr) */
481 add r9, r0, r8; /* Arg 5: (parent_tidptr) */
482 add r8, r0, r7; /* Arg 4: (stack_size) */
483 la r7, r1, PTO; /* Arg 3: pt_regs */
484 brid do_fork /* Do real work (tail-call) */
485 nop
486
487 C_ENTRY(sys_execve):
488 la r8, r1, PTO; /* add user context as 4th arg */
489 brid microblaze_execve; /* Do real work (tail-call).*/
490 nop;
491
492 C_ENTRY(sys_rt_sigreturn_wrapper):
493 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
494 swi r4, r1, PTO+PT_R4;
495 la r5, r1, PTO; /* add user context as 1st arg */
496 brlid r15, sys_rt_sigreturn /* Do real work */
497 nop;
498 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
499 lwi r4, r1, PTO+PT_R4;
500 bri ret_from_trap /* fall through will not work here due to align */
501 nop;
502
503 /*
504 * HW EXCEPTION rutine start
505 */
506
507 #define SAVE_STATE \
508 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
509 set_bip; /*equalize initial state for all possible entries*/\
510 clear_eip; \
511 enable_irq; \
512 set_ee; \
513 /* See if already in kernel mode.*/ \
514 mfs r11, rmsr; \
515 nop; \
516 andi r11, r11, MSR_UMS; \
517 bnei r11, 1f; \
518 /* Kernel-mode state save. */ \
519 /* Reload kernel stack-ptr. */ \
520 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
521 tophys(r1,r11); \
522 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
523 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
524 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
525 SAVE_REGS \
526 /* PC, before IRQ/trap - this is one instruction above */ \
527 swi r17, r1, PTO+PT_PC; \
528 \
529 addi r11, r0, 1; /* Was in kernel-mode. */ \
530 swi r11, r1, PTO+PT_MODE; \
531 brid 2f; \
532 nop; /* Fill delay slot */ \
533 1: /* User-mode state save. */ \
534 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
535 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
536 tophys(r1,r1); \
537 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
538 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
539 tophys(r1,r1); \
540 \
541 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
542 SAVE_REGS \
543 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
544 swi r17, r1, PTO+PT_PC; \
545 \
546 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
547 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
548 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
549 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
550 /* Save away the syscall number. */ \
551 swi r0, r1, PTO+PT_R0; \
552 tovirt(r1,r1)
553
554 C_ENTRY(full_exception_trap):
555 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
556 /* adjust exception address for privileged instruction
557 * for finding where is it */
558 addik r17, r17, -4
559 SAVE_STATE /* Save registers */
560 /* FIXME this can be store directly in PT_ESR reg.
561 * I tested it but there is a fault */
562 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
563 la r15, r0, ret_from_exc - 8
564 la r5, r1, PTO /* parameter struct pt_regs * regs */
565 mfs r6, resr
566 nop
567 mfs r7, rfsr; /* save FSR */
568 nop
569 mts rfsr, r0; /* Clear sticky fsr */
570 nop
571 la r12, r0, full_exception
572 set_vms;
573 rtbd r12, 0;
574 nop;
575
576 /*
577 * Unaligned data trap.
578 *
579 * Unaligned data trap last on 4k page is handled here.
580 *
581 * Trap entered via exception, so EE bit is set, and interrupts
582 * are masked. This is nice, means we don't have to CLI before state save
583 *
584 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
585 */
586 C_ENTRY(unaligned_data_trap):
587 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
588 SAVE_STATE /* Save registers.*/
589 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
590 la r15, r0, ret_from_exc-8
591 mfs r3, resr /* ESR */
592 nop
593 mfs r4, rear /* EAR */
594 nop
595 la r7, r1, PTO /* parameter struct pt_regs * regs */
596 la r12, r0, _unaligned_data_exception
597 set_vms;
598 rtbd r12, 0; /* interrupts enabled */
599 nop;
600
601 /*
602 * Page fault traps.
603 *
604 * If the real exception handler (from hw_exception_handler.S) didn't find
605 * the mapping for the process, then we're thrown here to handle such situation.
606 *
607 * Trap entered via exceptions, so EE bit is set, and interrupts
608 * are masked. This is nice, means we don't have to CLI before state save
609 *
610 * Build a standard exception frame for TLB Access errors. All TLB exceptions
611 * will bail out to this point if they can't resolve the lightweight TLB fault.
612 *
613 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
614 * void do_page_fault(struct pt_regs *regs,
615 * unsigned long address,
616 * unsigned long error_code)
617 */
618 /* data and intruction trap - which is choose is resolved int fault.c */
619 C_ENTRY(page_fault_data_trap):
620 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
621 SAVE_STATE /* Save registers.*/
622 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
623 la r15, r0, ret_from_exc-8
624 la r5, r1, PTO /* parameter struct pt_regs * regs */
625 mfs r6, rear /* parameter unsigned long address */
626 nop
627 mfs r7, resr /* parameter unsigned long error_code */
628 nop
629 la r12, r0, do_page_fault
630 set_vms;
631 rtbd r12, 0; /* interrupts enabled */
632 nop;
633
634 C_ENTRY(page_fault_instr_trap):
635 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
636 SAVE_STATE /* Save registers.*/
637 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
638 la r15, r0, ret_from_exc-8
639 la r5, r1, PTO /* parameter struct pt_regs * regs */
640 mfs r6, rear /* parameter unsigned long address */
641 nop
642 ori r7, r0, 0 /* parameter unsigned long error_code */
643 la r12, r0, do_page_fault
644 set_vms;
645 rtbd r12, 0; /* interrupts enabled */
646 nop;
647
648 /* Entry point used to return from an exception. */
649 C_ENTRY(ret_from_exc):
650 set_bip; /* Ints masked for state restore*/
651 lwi r11, r1, PTO+PT_MODE;
652 bnei r11, 2f; /* See if returning to kernel mode, */
653 /* ... if so, skip resched &c. */
654
655 /* We're returning to user mode, so check for various conditions that
656 trigger rescheduling. */
657 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
658 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
659 andi r11, r11, _TIF_NEED_RESCHED;
660 beqi r11, 5f;
661
662 /* Call the scheduler before returning from a syscall/trap. */
663 bralid r15, schedule; /* Call scheduler */
664 nop; /* delay slot */
665
666 /* Maybe handle a signal */
667 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
668 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
669 andi r11, r11, _TIF_SIGPENDING;
670 beqi r11, 1f; /* Signals to handle, handle them */
671
672 /*
673 * Handle a signal return; Pending signals should be in r18.
674 *
675 * Not all registers are saved by the normal trap/interrupt entry
676 * points (for instance, call-saved registers (because the normal
677 * C-compiler calling sequence in the kernel makes sure they're
678 * preserved), and call-clobbered registers in the case of
679 * traps), but signal handlers may want to examine or change the
680 * complete register state. Here we save anything not saved by
681 * the normal entry sequence, so that it may be safely restored
682 * (in a possibly modified form) after do_signal returns. */
683 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
684 addi r7, r0, 0; /* Arg 3: int in_syscall */
685 bralid r15, do_signal; /* Handle any signals */
686 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
687
688 /* Finally, return to user state. */
689 1:
690 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
691 VM_OFF;
692 tophys(r1,r1);
693
694 RESTORE_REGS;
695 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
696
697 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
698 bri 6f;
699 /* Return to kernel state. */
700 2: VM_OFF;
701 tophys(r1,r1);
702 RESTORE_REGS;
703 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
704
705 tovirt(r1,r1);
706 6:
707 EXC_return: /* Make global symbol for debugging */
708 rtbd r14, 0; /* Instructions to return from an IRQ */
709 nop;
710
711 /*
712 * HW EXCEPTION rutine end
713 */
714
715 /*
716 * Hardware maskable interrupts.
717 *
718 * The stack-pointer (r1) should have already been saved to the memory
719 * location PER_CPU(ENTRY_SP).
720 */
721 C_ENTRY(_interrupt):
722 /* MS: we are in physical address */
723 /* Save registers, switch to proper stack, convert SP to virtual.*/
724 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
725 swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
726 /* MS: See if already in kernel mode. */
727 mfs r11, rmsr
728 nop
729 andi r11, r11, MSR_UMS
730 bnei r11, 1f
731
732 /* Kernel-mode state save. */
733 or r11, r1, r0
734 tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
735 /* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
736 swi r11, r1, (PT_R1 - PT_SIZE);
737 /* MS: restore r11 because of saving in SAVE_REGS */
738 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
739 /* save registers */
740 /* MS: Make room on the stack -> activation record */
741 addik r1, r1, -STATE_SAVE_SIZE;
742 SAVE_REGS
743 /* MS: store mode */
744 addi r11, r0, 1; /* MS: Was in kernel-mode. */
745 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
746 brid 2f;
747 nop; /* MS: Fill delay slot */
748
749 1:
750 /* User-mode state save. */
751 /* MS: restore r11 -> FIXME move before SAVE_REG */
752 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
753 /* MS: get the saved current */
754 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
755 tophys(r1,r1);
756 lwi r1, r1, TS_THREAD_INFO;
757 addik r1, r1, THREAD_SIZE;
758 tophys(r1,r1);
759 /* save registers */
760 addik r1, r1, -STATE_SAVE_SIZE;
761 SAVE_REGS
762 /* calculate mode */
763 swi r0, r1, PTO + PT_MODE;
764 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
765 swi r11, r1, PTO+PT_R1;
766 2:
767 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
768 swi r0, r1, PTO + PT_R0;
769 tovirt(r1,r1)
770 la r5, r1, PTO;
771 set_vms;
772 la r11, r0, do_IRQ;
773 la r15, r0, irq_call;
774 irq_call:rtbd r11, 0;
775 nop;
776
777 /* MS: we are in virtual mode */
778 ret_from_irq:
779 lwi r11, r1, PTO + PT_MODE;
780 bnei r11, 2f;
781
782 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
783 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
784 andi r11, r11, _TIF_NEED_RESCHED;
785 beqi r11, 5f
786 bralid r15, schedule;
787 nop; /* delay slot */
788
789 /* Maybe handle a signal */
790 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
791 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
792 andi r11, r11, _TIF_SIGPENDING;
793 beqid r11, no_intr_resched
794 /* Handle a signal return; Pending signals should be in r18. */
795 addi r7, r0, 0; /* Arg 3: int in_syscall */
796 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
797 bralid r15, do_signal; /* Handle any signals */
798 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
799
800 /* Finally, return to user state. */
801 no_intr_resched:
802 /* Disable interrupts, we are now committed to the state restore */
803 disable_irq
804 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
805 VM_OFF;
806 tophys(r1,r1);
807 RESTORE_REGS
808 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
809 lwi r1, r1, PT_R1 - PT_SIZE;
810 bri 6f;
811 /* MS: Return to kernel state. */
812 2:
813 #ifdef CONFIG_PREEMPT
814 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
815 /* MS: get preempt_count from thread info */
816 lwi r5, r11, TI_PREEMPT_COUNT;
817 bgti r5, restore;
818
819 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
820 andi r5, r5, _TIF_NEED_RESCHED;
821 beqi r5, restore /* if zero jump over */
822
823 preempt:
824 /* interrupts are off that's why I am calling preempt_chedule_irq */
825 bralid r15, preempt_schedule_irq
826 nop
827 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
828 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
829 andi r5, r5, _TIF_NEED_RESCHED;
830 bnei r5, preempt /* if non zero jump to resched */
831 restore:
832 #endif
833 VM_OFF /* MS: turn off MMU */
834 tophys(r1,r1)
835 RESTORE_REGS
836 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
837 tovirt(r1,r1);
838 6:
839 IRQ_return: /* MS: Make global symbol for debugging */
840 rtid r14, 0
841 nop
842
843 /*
844 * `Debug' trap
845 * We enter dbtrap in "BIP" (breakpoint) mode.
846 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
847 * original dbtrap.
848 * however, wait to save state first
849 */
850 C_ENTRY(_debug_exception):
851 /* BIP bit is set on entry, no interrupts can occur */
852 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
853
854 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
855 set_bip; /*equalize initial state for all possible entries*/
856 clear_eip;
857 enable_irq;
858 mfs r11, rmsr
859 nop
860 andi r11, r11, MSR_UMS
861 bnei r11, 1f
862 /* Kernel-mode state save. */
863 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
864 tophys(r1,r11);
865 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
866 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
867
868 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
869 SAVE_REGS;
870
871 addi r11, r0, 1; /* Was in kernel-mode. */
872 swi r11, r1, PTO + PT_MODE;
873 brid 2f;
874 nop; /* Fill delay slot */
875 1: /* User-mode state save. */
876 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
877 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
878 tophys(r1,r1);
879 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
880 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
881 tophys(r1,r1);
882
883 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
884 SAVE_REGS;
885
886 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
887 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
888 swi r11, r1, PTO+PT_R1; /* Store user SP. */
889 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
890 /* Save away the syscall number. */
891 swi r0, r1, PTO+PT_R0;
892 tovirt(r1,r1)
893
894 addi r5, r0, SIGTRAP /* send the trap signal */
895 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
896 addk r7, r0, r0 /* 3rd param zero */
897
898 set_vms;
899 la r11, r0, send_sig;
900 la r15, r0, dbtrap_call;
901 dbtrap_call: rtbd r11, 0;
902 nop;
903
904 set_bip; /* Ints masked for state restore*/
905 lwi r11, r1, PTO+PT_MODE;
906 bnei r11, 2f;
907
908 /* Get current task ptr into r11 */
909 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
910 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
911 andi r11, r11, _TIF_NEED_RESCHED;
912 beqi r11, 5f;
913
914 /* Call the scheduler before returning from a syscall/trap. */
915
916 bralid r15, schedule; /* Call scheduler */
917 nop; /* delay slot */
918 /* XXX Is PT_DTRACE handling needed here? */
919 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
920
921 /* Maybe handle a signal */
922 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
923 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
924 andi r11, r11, _TIF_SIGPENDING;
925 beqi r11, 1f; /* Signals to handle, handle them */
926
927 /* Handle a signal return; Pending signals should be in r18. */
928 /* Not all registers are saved by the normal trap/interrupt entry
929 points (for instance, call-saved registers (because the normal
930 C-compiler calling sequence in the kernel makes sure they're
931 preserved), and call-clobbered registers in the case of
932 traps), but signal handlers may want to examine or change the
933 complete register state. Here we save anything not saved by
934 the normal entry sequence, so that it may be safely restored
935 (in a possibly modified form) after do_signal returns. */
936
937 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
938 addi r7, r0, 0; /* Arg 3: int in_syscall */
939 bralid r15, do_signal; /* Handle any signals */
940 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
941
942
943 /* Finally, return to user state. */
944 1:
945 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
946 VM_OFF;
947 tophys(r1,r1);
948
949 RESTORE_REGS
950 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
951
952
953 lwi r1, r1, PT_R1 - PT_SIZE;
954 /* Restore user stack pointer. */
955 bri 6f;
956
957 /* Return to kernel state. */
958 2: VM_OFF;
959 tophys(r1,r1);
960 RESTORE_REGS
961 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
962
963 tovirt(r1,r1);
964 6:
965 DBTRAP_return: /* Make global symbol for debugging */
966 rtbd r14, 0; /* Instructions to return from an IRQ */
967 nop;
968
969
970
971 ENTRY(_switch_to)
972 /* prepare return value */
973 addk r3, r0, CURRENT_TASK
974
975 /* save registers in cpu_context */
976 /* use r11 and r12, volatile registers, as temp register */
977 /* give start of cpu_context for previous process */
978 addik r11, r5, TI_CPU_CONTEXT
979 swi r1, r11, CC_R1
980 swi r2, r11, CC_R2
981 /* skip volatile registers.
982 * they are saved on stack when we jumped to _switch_to() */
983 /* dedicated registers */
984 swi r13, r11, CC_R13
985 swi r14, r11, CC_R14
986 swi r15, r11, CC_R15
987 swi r16, r11, CC_R16
988 swi r17, r11, CC_R17
989 swi r18, r11, CC_R18
990 /* save non-volatile registers */
991 swi r19, r11, CC_R19
992 swi r20, r11, CC_R20
993 swi r21, r11, CC_R21
994 swi r22, r11, CC_R22
995 swi r23, r11, CC_R23
996 swi r24, r11, CC_R24
997 swi r25, r11, CC_R25
998 swi r26, r11, CC_R26
999 swi r27, r11, CC_R27
1000 swi r28, r11, CC_R28
1001 swi r29, r11, CC_R29
1002 swi r30, r11, CC_R30
1003 /* special purpose registers */
1004 mfs r12, rmsr
1005 nop
1006 swi r12, r11, CC_MSR
1007 mfs r12, rear
1008 nop
1009 swi r12, r11, CC_EAR
1010 mfs r12, resr
1011 nop
1012 swi r12, r11, CC_ESR
1013 mfs r12, rfsr
1014 nop
1015 swi r12, r11, CC_FSR
1016
1017 /* update r31, the current-give me pointer to task which will be next */
1018 lwi CURRENT_TASK, r6, TI_TASK
1019 /* stored it to current_save too */
1020 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
1021
1022 /* get new process' cpu context and restore */
1023 /* give me start where start context of next task */
1024 addik r11, r6, TI_CPU_CONTEXT
1025
1026 /* non-volatile registers */
1027 lwi r30, r11, CC_R30
1028 lwi r29, r11, CC_R29
1029 lwi r28, r11, CC_R28
1030 lwi r27, r11, CC_R27
1031 lwi r26, r11, CC_R26
1032 lwi r25, r11, CC_R25
1033 lwi r24, r11, CC_R24
1034 lwi r23, r11, CC_R23
1035 lwi r22, r11, CC_R22
1036 lwi r21, r11, CC_R21
1037 lwi r20, r11, CC_R20
1038 lwi r19, r11, CC_R19
1039 /* dedicated registers */
1040 lwi r18, r11, CC_R18
1041 lwi r17, r11, CC_R17
1042 lwi r16, r11, CC_R16
1043 lwi r15, r11, CC_R15
1044 lwi r14, r11, CC_R14
1045 lwi r13, r11, CC_R13
1046 /* skip volatile registers */
1047 lwi r2, r11, CC_R2
1048 lwi r1, r11, CC_R1
1049
1050 /* special purpose registers */
1051 lwi r12, r11, CC_FSR
1052 mts rfsr, r12
1053 nop
1054 lwi r12, r11, CC_MSR
1055 mts rmsr, r12
1056 nop
1057
1058 rtsd r15, 8
1059 nop
1060
1061 ENTRY(_reset)
1062 brai 0x70; /* Jump back to FS-boot */
1063
1064 ENTRY(_break)
1065 mfs r5, rmsr
1066 nop
1067 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1068 mfs r5, resr
1069 nop
1070 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1071 bri 0
1072
1073 /* These are compiled and loaded into high memory, then
1074 * copied into place in mach_early_setup */
1075 .section .init.ivt, "ax"
1076 .org 0x0
1077 /* this is very important - here is the reset vector */
1078 /* in current MMU branch you don't care what is here - it is
1079 * used from bootloader site - but this is correct for FS-BOOT */
1080 brai 0x70
1081 nop
1082 brai TOPHYS(_user_exception); /* syscall handler */
1083 brai TOPHYS(_interrupt); /* Interrupt handler */
1084 brai TOPHYS(_break); /* nmi trap handler */
1085 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1086
1087 .org 0x60
1088 brai TOPHYS(_debug_exception); /* debug trap handler*/
1089
1090 .section .rodata,"a"
1091 #include "syscall_table.S"
1092
1093 syscall_table_size=(.-sys_call_table)
1094
1095 type_SYSCALL:
1096 .ascii "SYSCALL\0"
1097 type_IRQ:
1098 .ascii "IRQ\0"
1099 type_IRQ_PREEMPT:
1100 .ascii "IRQ (PREEMPTED)\0"
1101 type_SYSCALL_PREEMPT:
1102 .ascii " SYSCALL (PREEMPTED)\0"
1103
1104 /*
1105 * Trap decoding for stack unwinder
1106 * Tuples are (start addr, end addr, string)
1107 * If return address lies on [start addr, end addr],
1108 * unwinder displays 'string'
1109 */
1110
1111 .align 4
1112 .global microblaze_trap_handlers
1113 microblaze_trap_handlers:
1114 /* Exact matches come first */
1115 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1116 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1117 /* Fuzzy matches go here */
1118 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1119 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1120 /* End of table */
1121 .word 0 ; .word 0 ; .word 0
This page took 0.055114 seconds and 5 git commands to generate.