Commit | Line | Data |
---|---|---|
ca54502b MS |
1 | /* |
2 | * Low-level system-call handling, trap handlers and context-switching | |
3 | * | |
4 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | |
5 | * Copyright (C) 2008-2009 PetaLogix | |
6 | * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au> | |
7 | * Copyright (C) 2001,2002 NEC Corporation | |
8 | * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org> | |
9 | * | |
10 | * This file is subject to the terms and conditions of the GNU General | |
11 | * Public License. See the file COPYING in the main directory of this | |
12 | * archive for more details. | |
13 | * | |
14 | * Written by Miles Bader <miles@gnu.org> | |
15 | * Heavily modified by John Williams for Microblaze | |
16 | */ | |
17 | ||
18 | #include <linux/sys.h> | |
19 | #include <linux/linkage.h> | |
20 | ||
21 | #include <asm/entry.h> | |
22 | #include <asm/current.h> | |
23 | #include <asm/processor.h> | |
24 | #include <asm/exceptions.h> | |
25 | #include <asm/asm-offsets.h> | |
26 | #include <asm/thread_info.h> | |
27 | ||
28 | #include <asm/page.h> | |
29 | #include <asm/unistd.h> | |
30 | ||
31 | #include <linux/errno.h> | |
32 | #include <asm/signal.h> | |
33 | ||
11d51360 MS |
34 | #undef DEBUG |
35 | ||
ca54502b MS |
36 | /* The size of a state save frame. */ |
37 | #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE) | |
38 | ||
39 | /* The offset of the struct pt_regs in a `state save frame' on the stack. */ | |
40 | #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */ | |
41 | ||
42 | #define C_ENTRY(name) .globl name; .align 4; name | |
43 | ||
44 | /* | |
45 | * Various ways of setting and clearing BIP in flags reg. | |
46 | * This is mucky, but necessary using microblaze version that | |
47 | * allows msr ops to write to BIP | |
48 | */ | |
49 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | |
50 | .macro clear_bip | |
66f7de86 | 51 | msrclr r0, MSR_BIP |
ca54502b MS |
52 | nop |
53 | .endm | |
54 | ||
55 | .macro set_bip | |
66f7de86 | 56 | msrset r0, MSR_BIP |
ca54502b MS |
57 | nop |
58 | .endm | |
59 | ||
60 | .macro clear_eip | |
66f7de86 | 61 | msrclr r0, MSR_EIP |
ca54502b MS |
62 | nop |
63 | .endm | |
64 | ||
65 | .macro set_ee | |
66f7de86 | 66 | msrset r0, MSR_EE |
ca54502b MS |
67 | nop |
68 | .endm | |
69 | ||
70 | .macro disable_irq | |
66f7de86 | 71 | msrclr r0, MSR_IE |
ca54502b MS |
72 | nop |
73 | .endm | |
74 | ||
75 | .macro enable_irq | |
66f7de86 | 76 | msrset r0, MSR_IE |
ca54502b MS |
77 | nop |
78 | .endm | |
79 | ||
80 | .macro set_ums | |
66f7de86 | 81 | msrset r0, MSR_UMS |
ca54502b | 82 | nop |
66f7de86 | 83 | msrclr r0, MSR_VMS |
ca54502b MS |
84 | nop |
85 | .endm | |
86 | ||
87 | .macro set_vms | |
66f7de86 | 88 | msrclr r0, MSR_UMS |
ca54502b | 89 | nop |
66f7de86 | 90 | msrset r0, MSR_VMS |
ca54502b MS |
91 | nop |
92 | .endm | |
93 | ||
b318067e | 94 | .macro clear_ums |
66f7de86 | 95 | msrclr r0, MSR_UMS |
b318067e MS |
96 | nop |
97 | .endm | |
98 | ||
ca54502b | 99 | .macro clear_vms_ums |
66f7de86 | 100 | msrclr r0, MSR_VMS | MSR_UMS |
ca54502b MS |
101 | nop |
102 | .endm | |
103 | #else | |
104 | .macro clear_bip | |
105 | mfs r11, rmsr | |
106 | nop | |
107 | andi r11, r11, ~MSR_BIP | |
108 | mts rmsr, r11 | |
109 | nop | |
110 | .endm | |
111 | ||
112 | .macro set_bip | |
113 | mfs r11, rmsr | |
114 | nop | |
115 | ori r11, r11, MSR_BIP | |
116 | mts rmsr, r11 | |
117 | nop | |
118 | .endm | |
119 | ||
120 | .macro clear_eip | |
121 | mfs r11, rmsr | |
122 | nop | |
123 | andi r11, r11, ~MSR_EIP | |
124 | mts rmsr, r11 | |
125 | nop | |
126 | .endm | |
127 | ||
128 | .macro set_ee | |
129 | mfs r11, rmsr | |
130 | nop | |
131 | ori r11, r11, MSR_EE | |
132 | mts rmsr, r11 | |
133 | nop | |
134 | .endm | |
135 | ||
136 | .macro disable_irq | |
137 | mfs r11, rmsr | |
138 | nop | |
139 | andi r11, r11, ~MSR_IE | |
140 | mts rmsr, r11 | |
141 | nop | |
142 | .endm | |
143 | ||
144 | .macro enable_irq | |
145 | mfs r11, rmsr | |
146 | nop | |
147 | ori r11, r11, MSR_IE | |
148 | mts rmsr, r11 | |
149 | nop | |
150 | .endm | |
151 | ||
152 | .macro set_ums | |
153 | mfs r11, rmsr | |
154 | nop | |
155 | ori r11, r11, MSR_VMS | |
156 | andni r11, r11, MSR_UMS | |
157 | mts rmsr, r11 | |
158 | nop | |
159 | .endm | |
160 | ||
161 | .macro set_vms | |
162 | mfs r11, rmsr | |
163 | nop | |
164 | ori r11, r11, MSR_VMS | |
165 | andni r11, r11, MSR_UMS | |
166 | mts rmsr, r11 | |
167 | nop | |
168 | .endm | |
169 | ||
b318067e MS |
170 | .macro clear_ums |
171 | mfs r11, rmsr | |
172 | nop | |
173 | andni r11, r11, MSR_UMS | |
174 | mts rmsr,r11 | |
175 | nop | |
176 | .endm | |
177 | ||
ca54502b MS |
178 | .macro clear_vms_ums |
179 | mfs r11, rmsr | |
180 | nop | |
181 | andni r11, r11, (MSR_VMS|MSR_UMS) | |
182 | mts rmsr,r11 | |
183 | nop | |
184 | .endm | |
185 | #endif | |
186 | ||
187 | /* Define how to call high-level functions. With MMU, virtual mode must be | |
188 | * enabled when calling the high-level function. Clobbers R11. | |
189 | * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL | |
190 | */ | |
191 | ||
192 | /* turn on virtual protected mode save */ | |
193 | #define VM_ON \ | |
a4a94dbf | 194 | set_ums; \ |
ca54502b | 195 | rted r0, 2f; \ |
a4a94dbf MS |
196 | nop; \ |
197 | 2: | |
ca54502b MS |
198 | |
199 | /* turn off virtual protected mode save and user mode save*/ | |
200 | #define VM_OFF \ | |
a4a94dbf | 201 | clear_vms_ums; \ |
ca54502b | 202 | rted r0, TOPHYS(1f); \ |
a4a94dbf MS |
203 | nop; \ |
204 | 1: | |
ca54502b MS |
205 | |
206 | #define SAVE_REGS \ | |
207 | swi r2, r1, PTO+PT_R2; /* Save SDA */ \ | |
36f60954 MS |
208 | swi r3, r1, PTO+PT_R3; \ |
209 | swi r4, r1, PTO+PT_R4; \ | |
ca54502b MS |
210 | swi r5, r1, PTO+PT_R5; \ |
211 | swi r6, r1, PTO+PT_R6; \ | |
212 | swi r7, r1, PTO+PT_R7; \ | |
213 | swi r8, r1, PTO+PT_R8; \ | |
214 | swi r9, r1, PTO+PT_R9; \ | |
215 | swi r10, r1, PTO+PT_R10; \ | |
216 | swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\ | |
217 | swi r12, r1, PTO+PT_R12; \ | |
218 | swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \ | |
219 | swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \ | |
220 | swi r15, r1, PTO+PT_R15; /* Save LP */ \ | |
221 | swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \ | |
222 | swi r19, r1, PTO+PT_R19; \ | |
223 | swi r20, r1, PTO+PT_R20; \ | |
224 | swi r21, r1, PTO+PT_R21; \ | |
225 | swi r22, r1, PTO+PT_R22; \ | |
226 | swi r23, r1, PTO+PT_R23; \ | |
227 | swi r24, r1, PTO+PT_R24; \ | |
228 | swi r25, r1, PTO+PT_R25; \ | |
229 | swi r26, r1, PTO+PT_R26; \ | |
230 | swi r27, r1, PTO+PT_R27; \ | |
231 | swi r28, r1, PTO+PT_R28; \ | |
232 | swi r29, r1, PTO+PT_R29; \ | |
233 | swi r30, r1, PTO+PT_R30; \ | |
234 | swi r31, r1, PTO+PT_R31; /* Save current task reg */ \ | |
235 | mfs r11, rmsr; /* save MSR */ \ | |
236 | nop; \ | |
237 | swi r11, r1, PTO+PT_MSR; | |
238 | ||
239 | #define RESTORE_REGS \ | |
240 | lwi r11, r1, PTO+PT_MSR; \ | |
241 | mts rmsr , r11; \ | |
242 | nop; \ | |
243 | lwi r2, r1, PTO+PT_R2; /* restore SDA */ \ | |
36f60954 MS |
244 | lwi r3, r1, PTO+PT_R3; \ |
245 | lwi r4, r1, PTO+PT_R4; \ | |
ca54502b MS |
246 | lwi r5, r1, PTO+PT_R5; \ |
247 | lwi r6, r1, PTO+PT_R6; \ | |
248 | lwi r7, r1, PTO+PT_R7; \ | |
249 | lwi r8, r1, PTO+PT_R8; \ | |
250 | lwi r9, r1, PTO+PT_R9; \ | |
251 | lwi r10, r1, PTO+PT_R10; \ | |
252 | lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\ | |
253 | lwi r12, r1, PTO+PT_R12; \ | |
254 | lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \ | |
255 | lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\ | |
256 | lwi r15, r1, PTO+PT_R15; /* restore LP */ \ | |
257 | lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \ | |
258 | lwi r19, r1, PTO+PT_R19; \ | |
259 | lwi r20, r1, PTO+PT_R20; \ | |
260 | lwi r21, r1, PTO+PT_R21; \ | |
261 | lwi r22, r1, PTO+PT_R22; \ | |
262 | lwi r23, r1, PTO+PT_R23; \ | |
263 | lwi r24, r1, PTO+PT_R24; \ | |
264 | lwi r25, r1, PTO+PT_R25; \ | |
265 | lwi r26, r1, PTO+PT_R26; \ | |
266 | lwi r27, r1, PTO+PT_R27; \ | |
267 | lwi r28, r1, PTO+PT_R28; \ | |
268 | lwi r29, r1, PTO+PT_R29; \ | |
269 | lwi r30, r1, PTO+PT_R30; \ | |
270 | lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */ | |
271 | ||
e5d2af2b MS |
272 | #define SAVE_STATE \ |
273 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \ | |
274 | /* See if already in kernel mode.*/ \ | |
275 | mfs r1, rmsr; \ | |
276 | nop; \ | |
277 | andi r1, r1, MSR_UMS; \ | |
278 | bnei r1, 1f; \ | |
279 | /* Kernel-mode state save. */ \ | |
280 | /* Reload kernel stack-ptr. */ \ | |
281 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ | |
287503fa MS |
282 | /* FIXME: I can add these two lines to one */ \ |
283 | /* tophys(r1,r1); */ \ | |
284 | /* addik r1, r1, -STATE_SAVE_SIZE; */ \ | |
285 | addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \ | |
e5d2af2b | 286 | SAVE_REGS \ |
e5d2af2b | 287 | brid 2f; \ |
da233552 | 288 | swi r1, r1, PTO+PT_MODE; \ |
e5d2af2b MS |
289 | 1: /* User-mode state save. */ \ |
290 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ | |
291 | tophys(r1,r1); \ | |
292 | lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \ | |
287503fa MS |
293 | /* MS these three instructions can be added to one */ \ |
294 | /* addik r1, r1, THREAD_SIZE; */ \ | |
295 | /* tophys(r1,r1); */ \ | |
296 | /* addik r1, r1, -STATE_SAVE_SIZE; */ \ | |
297 | addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \ | |
e5d2af2b | 298 | SAVE_REGS \ |
e5d2af2b MS |
299 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ |
300 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ | |
e7741075 | 301 | swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \ |
e5d2af2b MS |
302 | /* MS: I am clearing UMS even in case when I come from kernel space */ \ |
303 | clear_ums; \ | |
304 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | |
305 | ||
ca54502b MS |
306 | .text |
307 | ||
308 | /* | |
309 | * User trap. | |
310 | * | |
311 | * System calls are handled here. | |
312 | * | |
313 | * Syscall protocol: | |
314 | * Syscall number in r12, args in r5-r10 | |
315 | * Return value in r3 | |
316 | * | |
317 | * Trap entered via brki instruction, so BIP bit is set, and interrupts | |
318 | * are masked. This is nice, means we don't have to CLI before state save | |
319 | */ | |
320 | C_ENTRY(_user_exception): | |
321 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ | |
322 | addi r14, r14, 4 /* return address is 4 byte after call */ | |
ca54502b | 323 | |
653e447e | 324 | mfs r1, rmsr |
5c0d72b1 | 325 | nop |
653e447e MS |
326 | andi r1, r1, MSR_UMS |
327 | bnei r1, 1f | |
5c0d72b1 MS |
328 | |
329 | /* Kernel-mode state save - kernel execve */ | |
653e447e MS |
330 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ |
331 | tophys(r1,r1); | |
ca54502b MS |
332 | |
333 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | |
334 | SAVE_REGS | |
335 | ||
77f6d226 | 336 | swi r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */ |
ca54502b MS |
337 | brid 2f; |
338 | nop; /* Fill delay slot */ | |
339 | ||
340 | /* User-mode state save. */ | |
341 | 1: | |
ca54502b MS |
342 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ |
343 | tophys(r1,r1); | |
344 | lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */ | |
345 | /* calculate kernel stack pointer from task struct 8k */ | |
346 | addik r1, r1, THREAD_SIZE; | |
347 | tophys(r1,r1); | |
348 | ||
349 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | |
350 | SAVE_REGS | |
351 | ||
77f6d226 | 352 | swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ |
ca54502b MS |
353 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
354 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | |
25f6e596 | 355 | clear_ums; |
b1d70c62 | 356 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
ca54502b MS |
357 | /* Save away the syscall number. */ |
358 | swi r12, r1, PTO+PT_R0; | |
359 | tovirt(r1,r1) | |
360 | ||
ca54502b MS |
361 | /* where the trap should return need -8 to adjust for rtsd r15, 8*/ |
362 | /* Jump to the appropriate function for the system call number in r12 | |
363 | * (r12 is not preserved), or return an error if r12 is not valid. The LP | |
364 | * register should point to the location where | |
365 | * the called function should return. [note that MAKE_SYS_CALL uses label 1] */ | |
23575483 | 366 | |
25f6e596 MS |
367 | /* Step into virtual mode */ |
368 | rtbd r0, 3f | |
23575483 MS |
369 | nop |
370 | 3: | |
b1d70c62 | 371 | lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */ |
23575483 MS |
372 | lwi r11, r11, TI_FLAGS /* get flags in thread info */ |
373 | andi r11, r11, _TIF_WORK_SYSCALL_MASK | |
374 | beqi r11, 4f | |
375 | ||
376 | addik r3, r0, -ENOSYS | |
377 | swi r3, r1, PTO + PT_R3 | |
378 | brlid r15, do_syscall_trace_enter | |
379 | addik r5, r1, PTO + PT_R0 | |
380 | ||
381 | # do_syscall_trace_enter returns the new syscall nr. | |
382 | addk r12, r0, r3 | |
383 | lwi r5, r1, PTO+PT_R5; | |
384 | lwi r6, r1, PTO+PT_R6; | |
385 | lwi r7, r1, PTO+PT_R7; | |
386 | lwi r8, r1, PTO+PT_R8; | |
387 | lwi r9, r1, PTO+PT_R9; | |
388 | lwi r10, r1, PTO+PT_R10; | |
389 | 4: | |
390 | /* Jump to the appropriate function for the system call number in r12 | |
391 | * (r12 is not preserved), or return an error if r12 is not valid. | |
392 | * The LP register should point to the location where the called function | |
393 | * should return. [note that MAKE_SYS_CALL uses label 1] */ | |
394 | /* See if the system call number is valid */ | |
ca54502b | 395 | addi r11, r12, -__NR_syscalls; |
23575483 | 396 | bgei r11,5f; |
ca54502b MS |
397 | /* Figure out which function to use for this system call. */ |
398 | /* Note Microblaze barrel shift is optional, so don't rely on it */ | |
399 | add r12, r12, r12; /* convert num -> ptr */ | |
400 | add r12, r12, r12; | |
401 | ||
11d51360 | 402 | #ifdef DEBUG |
ca54502b | 403 | /* Trac syscalls and stored them to r0_ram */ |
23575483 | 404 | lwi r3, r12, 0x400 + r0_ram |
ca54502b | 405 | addi r3, r3, 1 |
23575483 | 406 | swi r3, r12, 0x400 + r0_ram |
11d51360 | 407 | #endif |
23575483 MS |
408 | |
409 | # Find and jump into the syscall handler. | |
410 | lwi r12, r12, sys_call_table | |
411 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | |
b9ea77e2 | 412 | addi r15, r0, ret_from_trap-8 |
23575483 | 413 | bra r12 |
ca54502b | 414 | |
ca54502b | 415 | /* The syscall number is invalid, return an error. */ |
23575483 | 416 | 5: |
9814cc11 | 417 | rtsd r15, 8; /* looks like a normal subroutine return */ |
ca54502b | 418 | addi r3, r0, -ENOSYS; |
ca54502b | 419 | |
23575483 | 420 | /* Entry point used to return from a syscall/trap */ |
ca54502b MS |
421 | /* We re-enable BIP bit before state restore */ |
422 | C_ENTRY(ret_from_trap): | |
b1d70c62 MS |
423 | swi r3, r1, PTO + PT_R3 |
424 | swi r4, r1, PTO + PT_R4 | |
425 | ||
77f6d226 | 426 | lwi r11, r1, PTO + PT_MODE; |
36f60954 MS |
427 | /* See if returning to kernel mode, if so, skip resched &c. */ |
428 | bnei r11, 2f; | |
23575483 MS |
429 | /* We're returning to user mode, so check for various conditions that |
430 | * trigger rescheduling. */ | |
b1d70c62 MS |
431 | /* FIXME: Restructure all these flag checks. */ |
432 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ | |
23575483 MS |
433 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
434 | andi r11, r11, _TIF_WORK_SYSCALL_MASK | |
435 | beqi r11, 1f | |
436 | ||
23575483 MS |
437 | brlid r15, do_syscall_trace_leave |
438 | addik r5, r1, PTO + PT_R0 | |
23575483 | 439 | 1: |
ca54502b MS |
440 | /* We're returning to user mode, so check for various conditions that |
441 | * trigger rescheduling. */ | |
b1d70c62 MS |
442 | /* get thread info from current task */ |
443 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; | |
ca54502b MS |
444 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
445 | andi r11, r11, _TIF_NEED_RESCHED; | |
446 | beqi r11, 5f; | |
447 | ||
ca54502b MS |
448 | bralid r15, schedule; /* Call scheduler */ |
449 | nop; /* delay slot */ | |
ca54502b MS |
450 | |
451 | /* Maybe handle a signal */ | |
b1d70c62 MS |
452 | 5: /* get thread info from current task*/ |
453 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; | |
ca54502b MS |
454 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
455 | andi r11, r11, _TIF_SIGPENDING; | |
456 | beqi r11, 1f; /* Signals to handle, handle them */ | |
457 | ||
b9ea77e2 | 458 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
ca54502b MS |
459 | addi r7, r0, 1; /* Arg 3: int in_syscall */ |
460 | bralid r15, do_signal; /* Handle any signals */ | |
841d6e8c | 461 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
b1d70c62 MS |
462 | |
463 | /* Finally, return to user state. */ | |
96014cc3 | 464 | 1: set_bip; /* Ints masked for state restore */ |
8633bebc | 465 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
ca54502b MS |
466 | VM_OFF; |
467 | tophys(r1,r1); | |
468 | RESTORE_REGS; | |
469 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | |
470 | lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */ | |
471 | bri 6f; | |
472 | ||
473 | /* Return to kernel state. */ | |
96014cc3 MS |
474 | 2: set_bip; /* Ints masked for state restore */ |
475 | VM_OFF; | |
ca54502b MS |
476 | tophys(r1,r1); |
477 | RESTORE_REGS; | |
478 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | |
479 | tovirt(r1,r1); | |
480 | 6: | |
481 | TRAP_return: /* Make global symbol for debugging */ | |
482 | rtbd r14, 0; /* Instructions to return from an IRQ */ | |
483 | nop; | |
484 | ||
485 | ||
486 | /* These syscalls need access to the struct pt_regs on the stack, so we | |
487 | implement them in assembly (they're basically all wrappers anyway). */ | |
488 | ||
489 | C_ENTRY(sys_fork_wrapper): | |
490 | addi r5, r0, SIGCHLD /* Arg 0: flags */ | |
491 | lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */ | |
b9ea77e2 | 492 | addik r7, r1, PTO /* Arg 2: parent context */ |
ca54502b MS |
493 | add r8. r0, r0 /* Arg 3: (unused) */ |
494 | add r9, r0, r0; /* Arg 4: (unused) */ | |
ca54502b | 495 | brid do_fork /* Do real work (tail-call) */ |
9814cc11 | 496 | add r10, r0, r0; /* Arg 5: (unused) */ |
ca54502b MS |
497 | |
498 | /* This the initial entry point for a new child thread, with an appropriate | |
499 | stack in place that makes it look the the child is in the middle of an | |
500 | syscall. This function is actually `returned to' from switch_thread | |
501 | (copy_thread makes ret_from_fork the return address in each new thread's | |
502 | saved context). */ | |
503 | C_ENTRY(ret_from_fork): | |
504 | bralid r15, schedule_tail; /* ...which is schedule_tail's arg */ | |
505 | add r3, r5, r0; /* switch_thread returns the prev task */ | |
506 | /* ( in the delay slot ) */ | |
ca54502b | 507 | brid ret_from_trap; /* Do normal trap return */ |
9814cc11 | 508 | add r3, r0, r0; /* Child's fork call should return 0. */ |
ca54502b | 509 | |
e513588f AB |
510 | C_ENTRY(sys_vfork): |
511 | brid microblaze_vfork /* Do real work (tail-call) */ | |
b9ea77e2 | 512 | addik r5, r1, PTO |
ca54502b | 513 | |
e513588f | 514 | C_ENTRY(sys_clone): |
ca54502b | 515 | bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */ |
570e3e23 | 516 | lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */ |
b9ea77e2 MS |
517 | 1: addik r7, r1, PTO; /* Arg 2: parent context */ |
518 | add r8, r0, r0; /* Arg 3: (unused) */ | |
519 | add r9, r0, r0; /* Arg 4: (unused) */ | |
b9ea77e2 | 520 | brid do_fork /* Do real work (tail-call) */ |
9814cc11 | 521 | add r10, r0, r0; /* Arg 5: (unused) */ |
ca54502b | 522 | |
e513588f | 523 | C_ENTRY(sys_execve): |
e513588f | 524 | brid microblaze_execve; /* Do real work (tail-call).*/ |
9814cc11 | 525 | addik r8, r1, PTO; /* add user context as 4th arg */ |
ca54502b | 526 | |
ca54502b MS |
527 | C_ENTRY(sys_rt_sigreturn_wrapper): |
528 | swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | |
529 | swi r4, r1, PTO+PT_R4; | |
ca54502b | 530 | brlid r15, sys_rt_sigreturn /* Do real work */ |
9814cc11 | 531 | addik r5, r1, PTO; /* add user context as 1st arg */ |
ca54502b MS |
532 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ |
533 | lwi r4, r1, PTO+PT_R4; | |
534 | bri ret_from_trap /* fall through will not work here due to align */ | |
535 | nop; | |
536 | ||
537 | /* | |
538 | * HW EXCEPTION rutine start | |
539 | */ | |
ca54502b | 540 | C_ENTRY(full_exception_trap): |
ca54502b MS |
541 | /* adjust exception address for privileged instruction |
542 | * for finding where is it */ | |
543 | addik r17, r17, -4 | |
544 | SAVE_STATE /* Save registers */ | |
06a54604 MS |
545 | /* PC, before IRQ/trap - this is one instruction above */ |
546 | swi r17, r1, PTO+PT_PC; | |
547 | tovirt(r1,r1) | |
ca54502b MS |
548 | /* FIXME this can be store directly in PT_ESR reg. |
549 | * I tested it but there is a fault */ | |
550 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | |
b9ea77e2 | 551 | addik r15, r0, ret_from_exc - 8 |
ca54502b MS |
552 | mfs r6, resr |
553 | nop | |
554 | mfs r7, rfsr; /* save FSR */ | |
555 | nop | |
131e4e97 MS |
556 | mts rfsr, r0; /* Clear sticky fsr */ |
557 | nop | |
c318d483 | 558 | rted r0, full_exception |
9814cc11 | 559 | addik r5, r1, PTO /* parameter struct pt_regs * regs */ |
ca54502b MS |
560 | |
561 | /* | |
562 | * Unaligned data trap. | |
563 | * | |
564 | * Unaligned data trap last on 4k page is handled here. | |
565 | * | |
566 | * Trap entered via exception, so EE bit is set, and interrupts | |
567 | * are masked. This is nice, means we don't have to CLI before state save | |
568 | * | |
569 | * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S" | |
570 | */ | |
571 | C_ENTRY(unaligned_data_trap): | |
8b110d15 MS |
572 | /* MS: I have to save r11 value and then restore it because |
573 | * set_bit, clear_eip, set_ee use r11 as temp register if MSR | |
574 | * instructions are not used. We don't need to do if MSR instructions | |
575 | * are used and they use r0 instead of r11. | |
576 | * I am using ENTRY_SP which should be primary used only for stack | |
577 | * pointer saving. */ | |
578 | swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | |
579 | set_bip; /* equalize initial state for all possible entries */ | |
580 | clear_eip; | |
581 | set_ee; | |
582 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | |
ca54502b | 583 | SAVE_STATE /* Save registers.*/ |
06a54604 MS |
584 | /* PC, before IRQ/trap - this is one instruction above */ |
585 | swi r17, r1, PTO+PT_PC; | |
586 | tovirt(r1,r1) | |
ca54502b | 587 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
b9ea77e2 | 588 | addik r15, r0, ret_from_exc-8 |
ca54502b MS |
589 | mfs r3, resr /* ESR */ |
590 | nop | |
591 | mfs r4, rear /* EAR */ | |
592 | nop | |
c318d483 | 593 | rtbd r0, _unaligned_data_exception |
b9ea77e2 | 594 | addik r7, r1, PTO /* parameter struct pt_regs * regs */ |
ca54502b MS |
595 | |
596 | /* | |
597 | * Page fault traps. | |
598 | * | |
599 | * If the real exception handler (from hw_exception_handler.S) didn't find | |
600 | * the mapping for the process, then we're thrown here to handle such situation. | |
601 | * | |
602 | * Trap entered via exceptions, so EE bit is set, and interrupts | |
603 | * are masked. This is nice, means we don't have to CLI before state save | |
604 | * | |
605 | * Build a standard exception frame for TLB Access errors. All TLB exceptions | |
606 | * will bail out to this point if they can't resolve the lightweight TLB fault. | |
607 | * | |
608 | * The C function called is in "arch/microblaze/mm/fault.c", declared as: | |
609 | * void do_page_fault(struct pt_regs *regs, | |
610 | * unsigned long address, | |
611 | * unsigned long error_code) | |
612 | */ | |
613 | /* data and intruction trap - which is choose is resolved int fault.c */ | |
614 | C_ENTRY(page_fault_data_trap): | |
ca54502b | 615 | SAVE_STATE /* Save registers.*/ |
06a54604 MS |
616 | /* PC, before IRQ/trap - this is one instruction above */ |
617 | swi r17, r1, PTO+PT_PC; | |
618 | tovirt(r1,r1) | |
ca54502b | 619 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
b9ea77e2 | 620 | addik r15, r0, ret_from_exc-8 |
ca54502b MS |
621 | mfs r6, rear /* parameter unsigned long address */ |
622 | nop | |
623 | mfs r7, resr /* parameter unsigned long error_code */ | |
624 | nop | |
c318d483 | 625 | rted r0, do_page_fault |
9814cc11 | 626 | addik r5, r1, PTO /* parameter struct pt_regs * regs */ |
ca54502b MS |
627 | |
628 | C_ENTRY(page_fault_instr_trap): | |
ca54502b | 629 | SAVE_STATE /* Save registers.*/ |
06a54604 MS |
630 | /* PC, before IRQ/trap - this is one instruction above */ |
631 | swi r17, r1, PTO+PT_PC; | |
632 | tovirt(r1,r1) | |
ca54502b | 633 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
b9ea77e2 | 634 | addik r15, r0, ret_from_exc-8 |
ca54502b MS |
635 | mfs r6, rear /* parameter unsigned long address */ |
636 | nop | |
637 | ori r7, r0, 0 /* parameter unsigned long error_code */ | |
9814cc11 MS |
638 | rted r0, do_page_fault |
639 | addik r5, r1, PTO /* parameter struct pt_regs * regs */ | |
ca54502b MS |
640 | |
641 | /* Entry point used to return from an exception. */ | |
642 | C_ENTRY(ret_from_exc): | |
77f6d226 | 643 | lwi r11, r1, PTO + PT_MODE; |
ca54502b MS |
644 | bnei r11, 2f; /* See if returning to kernel mode, */ |
645 | /* ... if so, skip resched &c. */ | |
646 | ||
647 | /* We're returning to user mode, so check for various conditions that | |
648 | trigger rescheduling. */ | |
b1d70c62 | 649 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
ca54502b MS |
650 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
651 | andi r11, r11, _TIF_NEED_RESCHED; | |
652 | beqi r11, 5f; | |
653 | ||
654 | /* Call the scheduler before returning from a syscall/trap. */ | |
655 | bralid r15, schedule; /* Call scheduler */ | |
656 | nop; /* delay slot */ | |
657 | ||
658 | /* Maybe handle a signal */ | |
b1d70c62 | 659 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
ca54502b MS |
660 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
661 | andi r11, r11, _TIF_SIGPENDING; | |
662 | beqi r11, 1f; /* Signals to handle, handle them */ | |
663 | ||
664 | /* | |
665 | * Handle a signal return; Pending signals should be in r18. | |
666 | * | |
667 | * Not all registers are saved by the normal trap/interrupt entry | |
668 | * points (for instance, call-saved registers (because the normal | |
669 | * C-compiler calling sequence in the kernel makes sure they're | |
670 | * preserved), and call-clobbered registers in the case of | |
671 | * traps), but signal handlers may want to examine or change the | |
672 | * complete register state. Here we save anything not saved by | |
673 | * the normal entry sequence, so that it may be safely restored | |
36f60954 | 674 | * (in a possibly modified form) after do_signal returns. */ |
b9ea77e2 | 675 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
ca54502b MS |
676 | addi r7, r0, 0; /* Arg 3: int in_syscall */ |
677 | bralid r15, do_signal; /* Handle any signals */ | |
841d6e8c | 678 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
ca54502b MS |
679 | |
680 | /* Finally, return to user state. */ | |
96014cc3 | 681 | 1: set_bip; /* Ints masked for state restore */ |
8633bebc | 682 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
ca54502b MS |
683 | VM_OFF; |
684 | tophys(r1,r1); | |
685 | ||
ca54502b MS |
686 | RESTORE_REGS; |
687 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | |
688 | ||
689 | lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */ | |
690 | bri 6f; | |
691 | /* Return to kernel state. */ | |
96014cc3 MS |
692 | 2: set_bip; /* Ints masked for state restore */ |
693 | VM_OFF; | |
ca54502b | 694 | tophys(r1,r1); |
ca54502b MS |
695 | RESTORE_REGS; |
696 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | |
697 | ||
698 | tovirt(r1,r1); | |
699 | 6: | |
700 | EXC_return: /* Make global symbol for debugging */ | |
701 | rtbd r14, 0; /* Instructions to return from an IRQ */ | |
702 | nop; | |
703 | ||
704 | /* | |
705 | * HW EXCEPTION rutine end | |
706 | */ | |
707 | ||
708 | /* | |
709 | * Hardware maskable interrupts. | |
710 | * | |
711 | * The stack-pointer (r1) should have already been saved to the memory | |
712 | * location PER_CPU(ENTRY_SP). | |
713 | */ | |
714 | C_ENTRY(_interrupt): | |
715 | /* MS: we are in physical address */ | |
716 | /* Save registers, switch to proper stack, convert SP to virtual.*/ | |
717 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) | |
ca54502b | 718 | /* MS: See if already in kernel mode. */ |
653e447e | 719 | mfs r1, rmsr |
5c0d72b1 | 720 | nop |
653e447e MS |
721 | andi r1, r1, MSR_UMS |
722 | bnei r1, 1f | |
ca54502b MS |
723 | |
724 | /* Kernel-mode state save. */ | |
653e447e MS |
725 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) |
726 | tophys(r1,r1); /* MS: I have in r1 physical address where stack is */ | |
ca54502b MS |
727 | /* save registers */ |
728 | /* MS: Make room on the stack -> activation record */ | |
729 | addik r1, r1, -STATE_SAVE_SIZE; | |
ca54502b | 730 | SAVE_REGS |
ca54502b | 731 | brid 2f; |
0a6b08fd | 732 | swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */ |
ca54502b MS |
733 | 1: |
734 | /* User-mode state save. */ | |
ca54502b MS |
735 | /* MS: get the saved current */ |
736 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | |
737 | tophys(r1,r1); | |
738 | lwi r1, r1, TS_THREAD_INFO; | |
739 | addik r1, r1, THREAD_SIZE; | |
740 | tophys(r1,r1); | |
741 | /* save registers */ | |
742 | addik r1, r1, -STATE_SAVE_SIZE; | |
ca54502b MS |
743 | SAVE_REGS |
744 | /* calculate mode */ | |
745 | swi r0, r1, PTO + PT_MODE; | |
746 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | |
747 | swi r11, r1, PTO+PT_R1; | |
80c5ff6b | 748 | clear_ums; |
ca54502b | 749 | 2: |
b1d70c62 | 750 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
ca54502b | 751 | tovirt(r1,r1) |
b9ea77e2 | 752 | addik r15, r0, irq_call; |
80c5ff6b MS |
753 | irq_call:rtbd r0, do_IRQ; |
754 | addik r5, r1, PTO; | |
ca54502b MS |
755 | |
756 | /* MS: we are in virtual mode */ | |
757 | ret_from_irq: | |
758 | lwi r11, r1, PTO + PT_MODE; | |
759 | bnei r11, 2f; | |
760 | ||
b1d70c62 | 761 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
ca54502b MS |
762 | lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ |
763 | andi r11, r11, _TIF_NEED_RESCHED; | |
764 | beqi r11, 5f | |
765 | bralid r15, schedule; | |
766 | nop; /* delay slot */ | |
767 | ||
768 | /* Maybe handle a signal */ | |
b1d70c62 | 769 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */ |
ca54502b MS |
770 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
771 | andi r11, r11, _TIF_SIGPENDING; | |
772 | beqid r11, no_intr_resched | |
773 | /* Handle a signal return; Pending signals should be in r18. */ | |
774 | addi r7, r0, 0; /* Arg 3: int in_syscall */ | |
b9ea77e2 | 775 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
ca54502b MS |
776 | bralid r15, do_signal; /* Handle any signals */ |
777 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | |
778 | ||
779 | /* Finally, return to user state. */ | |
780 | no_intr_resched: | |
781 | /* Disable interrupts, we are now committed to the state restore */ | |
782 | disable_irq | |
8633bebc | 783 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); |
ca54502b MS |
784 | VM_OFF; |
785 | tophys(r1,r1); | |
ca54502b MS |
786 | RESTORE_REGS |
787 | addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */ | |
788 | lwi r1, r1, PT_R1 - PT_SIZE; | |
789 | bri 6f; | |
790 | /* MS: Return to kernel state. */ | |
77753790 MS |
791 | 2: |
792 | #ifdef CONFIG_PREEMPT | |
b1d70c62 | 793 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
77753790 MS |
794 | /* MS: get preempt_count from thread info */ |
795 | lwi r5, r11, TI_PREEMPT_COUNT; | |
796 | bgti r5, restore; | |
797 | ||
798 | lwi r5, r11, TI_FLAGS; /* get flags in thread info */ | |
799 | andi r5, r5, _TIF_NEED_RESCHED; | |
800 | beqi r5, restore /* if zero jump over */ | |
801 | ||
802 | preempt: | |
803 | /* interrupts are off that's why I am calling preempt_chedule_irq */ | |
804 | bralid r15, preempt_schedule_irq | |
805 | nop | |
b1d70c62 | 806 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
77753790 MS |
807 | lwi r5, r11, TI_FLAGS; /* get flags in thread info */ |
808 | andi r5, r5, _TIF_NEED_RESCHED; | |
809 | bnei r5, preempt /* if non zero jump to resched */ | |
810 | restore: | |
811 | #endif | |
812 | VM_OFF /* MS: turn off MMU */ | |
ca54502b | 813 | tophys(r1,r1) |
ca54502b MS |
814 | RESTORE_REGS |
815 | addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */ | |
816 | tovirt(r1,r1); | |
817 | 6: | |
818 | IRQ_return: /* MS: Make global symbol for debugging */ | |
819 | rtid r14, 0 | |
820 | nop | |
821 | ||
822 | /* | |
823 | * `Debug' trap | |
824 | * We enter dbtrap in "BIP" (breakpoint) mode. | |
825 | * So we exit the breakpoint mode with an 'rtbd' and proceed with the | |
826 | * original dbtrap. | |
827 | * however, wait to save state first | |
828 | */ | |
829 | C_ENTRY(_debug_exception): | |
830 | /* BIP bit is set on entry, no interrupts can occur */ | |
831 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) | |
832 | ||
653e447e | 833 | mfs r1, rmsr |
5c0d72b1 | 834 | nop |
653e447e MS |
835 | andi r1, r1, MSR_UMS |
836 | bnei r1, 1f | |
ca54502b | 837 | /* Kernel-mode state save. */ |
653e447e MS |
838 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ |
839 | tophys(r1,r1); | |
ca54502b MS |
840 | |
841 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | |
ca54502b MS |
842 | SAVE_REGS; |
843 | ||
77f6d226 | 844 | swi r1, r1, PTO + PT_MODE; |
ca54502b MS |
845 | brid 2f; |
846 | nop; /* Fill delay slot */ | |
847 | 1: /* User-mode state save. */ | |
ca54502b MS |
848 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ |
849 | tophys(r1,r1); | |
850 | lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ | |
851 | addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */ | |
852 | tophys(r1,r1); | |
853 | ||
854 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | |
ca54502b MS |
855 | SAVE_REGS; |
856 | ||
77f6d226 | 857 | swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ |
ca54502b MS |
858 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
859 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | |
653e447e | 860 | 2: |
ca54502b MS |
861 | tovirt(r1,r1) |
862 | ||
06b28640 | 863 | set_vms; |
ca54502b MS |
864 | addi r5, r0, SIGTRAP /* send the trap signal */ |
865 | add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | |
866 | addk r7, r0, r0 /* 3rd param zero */ | |
06b28640 | 867 | dbtrap_call: rtbd r0, send_sig; |
b9ea77e2 | 868 | addik r15, r0, dbtrap_call; |
ca54502b MS |
869 | |
870 | set_bip; /* Ints masked for state restore*/ | |
77f6d226 | 871 | lwi r11, r1, PTO + PT_MODE; |
ca54502b MS |
872 | bnei r11, 2f; |
873 | ||
874 | /* Get current task ptr into r11 */ | |
b1d70c62 | 875 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
ca54502b MS |
876 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
877 | andi r11, r11, _TIF_NEED_RESCHED; | |
878 | beqi r11, 5f; | |
879 | ||
880 | /* Call the scheduler before returning from a syscall/trap. */ | |
881 | ||
882 | bralid r15, schedule; /* Call scheduler */ | |
883 | nop; /* delay slot */ | |
884 | /* XXX Is PT_DTRACE handling needed here? */ | |
885 | /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ | |
886 | ||
887 | /* Maybe handle a signal */ | |
b1d70c62 | 888 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
ca54502b MS |
889 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
890 | andi r11, r11, _TIF_SIGPENDING; | |
891 | beqi r11, 1f; /* Signals to handle, handle them */ | |
892 | ||
893 | /* Handle a signal return; Pending signals should be in r18. */ | |
894 | /* Not all registers are saved by the normal trap/interrupt entry | |
895 | points (for instance, call-saved registers (because the normal | |
896 | C-compiler calling sequence in the kernel makes sure they're | |
897 | preserved), and call-clobbered registers in the case of | |
898 | traps), but signal handlers may want to examine or change the | |
899 | complete register state. Here we save anything not saved by | |
900 | the normal entry sequence, so that it may be safely restored | |
901 | (in a possibly modified form) after do_signal returns. */ | |
902 | ||
b9ea77e2 | 903 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
ca54502b MS |
904 | addi r7, r0, 0; /* Arg 3: int in_syscall */ |
905 | bralid r15, do_signal; /* Handle any signals */ | |
841d6e8c | 906 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
ca54502b MS |
907 | |
908 | ||
909 | /* Finally, return to user state. */ | |
5c0d72b1 | 910 | 1: |
8633bebc | 911 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
ca54502b MS |
912 | VM_OFF; |
913 | tophys(r1,r1); | |
914 | ||
ca54502b MS |
915 | RESTORE_REGS |
916 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | |
917 | ||
918 | ||
919 | lwi r1, r1, PT_R1 - PT_SIZE; | |
920 | /* Restore user stack pointer. */ | |
921 | bri 6f; | |
922 | ||
923 | /* Return to kernel state. */ | |
924 | 2: VM_OFF; | |
925 | tophys(r1,r1); | |
ca54502b MS |
926 | RESTORE_REGS |
927 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | |
928 | ||
929 | tovirt(r1,r1); | |
930 | 6: | |
931 | DBTRAP_return: /* Make global symbol for debugging */ | |
932 | rtbd r14, 0; /* Instructions to return from an IRQ */ | |
933 | nop; | |
934 | ||
935 | ||
936 | ||
937 | ENTRY(_switch_to) | |
938 | /* prepare return value */ | |
b1d70c62 | 939 | addk r3, r0, CURRENT_TASK |
ca54502b MS |
940 | |
941 | /* save registers in cpu_context */ | |
942 | /* use r11 and r12, volatile registers, as temp register */ | |
943 | /* give start of cpu_context for previous process */ | |
944 | addik r11, r5, TI_CPU_CONTEXT | |
945 | swi r1, r11, CC_R1 | |
946 | swi r2, r11, CC_R2 | |
947 | /* skip volatile registers. | |
948 | * they are saved on stack when we jumped to _switch_to() */ | |
949 | /* dedicated registers */ | |
950 | swi r13, r11, CC_R13 | |
951 | swi r14, r11, CC_R14 | |
952 | swi r15, r11, CC_R15 | |
953 | swi r16, r11, CC_R16 | |
954 | swi r17, r11, CC_R17 | |
955 | swi r18, r11, CC_R18 | |
956 | /* save non-volatile registers */ | |
957 | swi r19, r11, CC_R19 | |
958 | swi r20, r11, CC_R20 | |
959 | swi r21, r11, CC_R21 | |
960 | swi r22, r11, CC_R22 | |
961 | swi r23, r11, CC_R23 | |
962 | swi r24, r11, CC_R24 | |
963 | swi r25, r11, CC_R25 | |
964 | swi r26, r11, CC_R26 | |
965 | swi r27, r11, CC_R27 | |
966 | swi r28, r11, CC_R28 | |
967 | swi r29, r11, CC_R29 | |
968 | swi r30, r11, CC_R30 | |
969 | /* special purpose registers */ | |
970 | mfs r12, rmsr | |
971 | nop | |
972 | swi r12, r11, CC_MSR | |
973 | mfs r12, rear | |
974 | nop | |
975 | swi r12, r11, CC_EAR | |
976 | mfs r12, resr | |
977 | nop | |
978 | swi r12, r11, CC_ESR | |
979 | mfs r12, rfsr | |
980 | nop | |
981 | swi r12, r11, CC_FSR | |
982 | ||
b1d70c62 MS |
983 | /* update r31, the current-give me pointer to task which will be next */ |
984 | lwi CURRENT_TASK, r6, TI_TASK | |
ca54502b | 985 | /* stored it to current_save too */ |
b1d70c62 | 986 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE) |
ca54502b MS |
987 | |
988 | /* get new process' cpu context and restore */ | |
989 | /* give me start where start context of next task */ | |
990 | addik r11, r6, TI_CPU_CONTEXT | |
991 | ||
992 | /* non-volatile registers */ | |
993 | lwi r30, r11, CC_R30 | |
994 | lwi r29, r11, CC_R29 | |
995 | lwi r28, r11, CC_R28 | |
996 | lwi r27, r11, CC_R27 | |
997 | lwi r26, r11, CC_R26 | |
998 | lwi r25, r11, CC_R25 | |
999 | lwi r24, r11, CC_R24 | |
1000 | lwi r23, r11, CC_R23 | |
1001 | lwi r22, r11, CC_R22 | |
1002 | lwi r21, r11, CC_R21 | |
1003 | lwi r20, r11, CC_R20 | |
1004 | lwi r19, r11, CC_R19 | |
1005 | /* dedicated registers */ | |
1006 | lwi r18, r11, CC_R18 | |
1007 | lwi r17, r11, CC_R17 | |
1008 | lwi r16, r11, CC_R16 | |
1009 | lwi r15, r11, CC_R15 | |
1010 | lwi r14, r11, CC_R14 | |
1011 | lwi r13, r11, CC_R13 | |
1012 | /* skip volatile registers */ | |
1013 | lwi r2, r11, CC_R2 | |
1014 | lwi r1, r11, CC_R1 | |
1015 | ||
1016 | /* special purpose registers */ | |
1017 | lwi r12, r11, CC_FSR | |
1018 | mts rfsr, r12 | |
1019 | nop | |
1020 | lwi r12, r11, CC_MSR | |
1021 | mts rmsr, r12 | |
1022 | nop | |
1023 | ||
1024 | rtsd r15, 8 | |
1025 | nop | |
1026 | ||
1027 | ENTRY(_reset) | |
1028 | brai 0x70; /* Jump back to FS-boot */ | |
1029 | ||
1030 | ENTRY(_break) | |
1031 | mfs r5, rmsr | |
1032 | nop | |
1033 | swi r5, r0, 0x250 + TOPHYS(r0_ram) | |
1034 | mfs r5, resr | |
1035 | nop | |
1036 | swi r5, r0, 0x254 + TOPHYS(r0_ram) | |
1037 | bri 0 | |
1038 | ||
1039 | /* These are compiled and loaded into high memory, then | |
1040 | * copied into place in mach_early_setup */ | |
1041 | .section .init.ivt, "ax" | |
1042 | .org 0x0 | |
1043 | /* this is very important - here is the reset vector */ | |
1044 | /* in current MMU branch you don't care what is here - it is | |
1045 | * used from bootloader site - but this is correct for FS-BOOT */ | |
1046 | brai 0x70 | |
1047 | nop | |
1048 | brai TOPHYS(_user_exception); /* syscall handler */ | |
1049 | brai TOPHYS(_interrupt); /* Interrupt handler */ | |
1050 | brai TOPHYS(_break); /* nmi trap handler */ | |
1051 | brai TOPHYS(_hw_exception_handler); /* HW exception handler */ | |
1052 | ||
1053 | .org 0x60 | |
1054 | brai TOPHYS(_debug_exception); /* debug trap handler*/ | |
1055 | ||
1056 | .section .rodata,"a" | |
1057 | #include "syscall_table.S" | |
1058 | ||
1059 | syscall_table_size=(.-sys_call_table) | |
1060 | ||
ce3266c0 SM |
1061 | type_SYSCALL: |
1062 | .ascii "SYSCALL\0" | |
1063 | type_IRQ: | |
1064 | .ascii "IRQ\0" | |
1065 | type_IRQ_PREEMPT: | |
1066 | .ascii "IRQ (PREEMPTED)\0" | |
1067 | type_SYSCALL_PREEMPT: | |
1068 | .ascii " SYSCALL (PREEMPTED)\0" | |
1069 | ||
1070 | /* | |
1071 | * Trap decoding for stack unwinder | |
1072 | * Tuples are (start addr, end addr, string) | |
1073 | * If return address lies on [start addr, end addr], | |
1074 | * unwinder displays 'string' | |
1075 | */ | |
1076 | ||
1077 | .align 4 | |
1078 | .global microblaze_trap_handlers | |
1079 | microblaze_trap_handlers: | |
1080 | /* Exact matches come first */ | |
1081 | .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL | |
1082 | .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ | |
1083 | /* Fuzzy matches go here */ | |
1084 | .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT | |
1085 | .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT | |
1086 | /* End of table */ | |
1087 | .word 0 ; .word 0 ; .word 0 |