Commit | Line | Data |
---|---|---|
60ffc30d CM |
1 | /* |
2 | * Low-level exception handling code | |
3 | * | |
4 | * Copyright (C) 2012 ARM Ltd. | |
5 | * Authors: Catalin Marinas <catalin.marinas@arm.com> | |
6 | * Will Deacon <will.deacon@arm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | ||
21 | #include <linux/init.h> | |
22 | #include <linux/linkage.h> | |
23 | ||
24 | #include <asm/assembler.h> | |
25 | #include <asm/asm-offsets.h> | |
26 | #include <asm/errno.h> | |
27 | #include <asm/thread_info.h> | |
28 | #include <asm/unistd.h> | |
f3d447a9 | 29 | #include <asm/unistd32.h> |
60ffc30d CM |
30 | |
31 | /* | |
32 | * Bad Abort numbers | |
33 | *----------------- | |
34 | */ | |
35 | #define BAD_SYNC 0 | |
36 | #define BAD_IRQ 1 | |
37 | #define BAD_FIQ 2 | |
38 | #define BAD_ERROR 3 | |
39 | ||
40 | .macro kernel_entry, el, regsize = 64 | |
41 | sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR | |
42 | .if \regsize == 32 | |
43 | mov w0, w0 // zero upper 32 bits of x0 | |
44 | .endif | |
45 | push x28, x29 | |
46 | push x26, x27 | |
47 | push x24, x25 | |
48 | push x22, x23 | |
49 | push x20, x21 | |
50 | push x18, x19 | |
51 | push x16, x17 | |
52 | push x14, x15 | |
53 | push x12, x13 | |
54 | push x10, x11 | |
55 | push x8, x9 | |
56 | push x6, x7 | |
57 | push x4, x5 | |
58 | push x2, x3 | |
59 | push x0, x1 | |
60 | .if \el == 0 | |
61 | mrs x21, sp_el0 | |
62 | .else | |
63 | add x21, sp, #S_FRAME_SIZE | |
64 | .endif | |
65 | mrs x22, elr_el1 | |
66 | mrs x23, spsr_el1 | |
67 | stp lr, x21, [sp, #S_LR] | |
68 | stp x22, x23, [sp, #S_PC] | |
69 | ||
70 | /* | |
71 | * Set syscallno to -1 by default (overridden later if real syscall). | |
72 | */ | |
73 | .if \el == 0 | |
74 | mvn x21, xzr | |
75 | str x21, [sp, #S_SYSCALLNO] | |
76 | .endif | |
77 | ||
78 | /* | |
79 | * Registers that may be useful after this macro is invoked: | |
80 | * | |
81 | * x21 - aborted SP | |
82 | * x22 - aborted PC | |
83 | * x23 - aborted PSTATE | |
84 | */ | |
85 | .endm | |
86 | ||
87 | .macro kernel_exit, el, ret = 0 | |
88 | ldp x21, x22, [sp, #S_PC] // load ELR, SPSR | |
89 | .if \el == 0 | |
90 | ldr x23, [sp, #S_SP] // load return stack pointer | |
91 | .endif | |
92 | .if \ret | |
93 | ldr x1, [sp, #S_X1] // preserve x0 (syscall return) | |
94 | add sp, sp, S_X2 | |
95 | .else | |
96 | pop x0, x1 | |
97 | .endif | |
98 | pop x2, x3 // load the rest of the registers | |
99 | pop x4, x5 | |
100 | pop x6, x7 | |
101 | pop x8, x9 | |
102 | msr elr_el1, x21 // set up the return data | |
103 | msr spsr_el1, x22 | |
104 | .if \el == 0 | |
105 | msr sp_el0, x23 | |
106 | .endif | |
107 | pop x10, x11 | |
108 | pop x12, x13 | |
109 | pop x14, x15 | |
110 | pop x16, x17 | |
111 | pop x18, x19 | |
112 | pop x20, x21 | |
113 | pop x22, x23 | |
114 | pop x24, x25 | |
115 | pop x26, x27 | |
116 | pop x28, x29 | |
117 | ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP | |
118 | eret // return to kernel | |
119 | .endm | |
120 | ||
121 | .macro get_thread_info, rd | |
122 | mov \rd, sp | |
123 | and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack | |
124 | .endm | |
125 | ||
126 | /* | |
127 | * These are the registers used in the syscall handler, and allow us to | |
128 | * have in theory up to 7 arguments to a function - x0 to x6. | |
129 | * | |
130 | * x7 is reserved for the system call number in 32-bit mode. | |
131 | */ | |
132 | sc_nr .req x25 // number of system calls | |
133 | scno .req x26 // syscall number | |
134 | stbl .req x27 // syscall table pointer | |
135 | tsk .req x28 // current thread_info | |
136 | ||
137 | /* | |
138 | * Interrupt handling. | |
139 | */ | |
140 | .macro irq_handler | |
141 | ldr x1, handle_arch_irq | |
142 | mov x0, sp | |
143 | blr x1 | |
144 | .endm | |
145 | ||
146 | .text | |
147 | ||
148 | /* | |
149 | * Exception vectors. | |
150 | */ | |
151 | .macro ventry label | |
152 | .align 7 | |
153 | b \label | |
154 | .endm | |
155 | ||
156 | .align 11 | |
157 | ENTRY(vectors) | |
158 | ventry el1_sync_invalid // Synchronous EL1t | |
159 | ventry el1_irq_invalid // IRQ EL1t | |
160 | ventry el1_fiq_invalid // FIQ EL1t | |
161 | ventry el1_error_invalid // Error EL1t | |
162 | ||
163 | ventry el1_sync // Synchronous EL1h | |
164 | ventry el1_irq // IRQ EL1h | |
165 | ventry el1_fiq_invalid // FIQ EL1h | |
166 | ventry el1_error_invalid // Error EL1h | |
167 | ||
168 | ventry el0_sync // Synchronous 64-bit EL0 | |
169 | ventry el0_irq // IRQ 64-bit EL0 | |
170 | ventry el0_fiq_invalid // FIQ 64-bit EL0 | |
171 | ventry el0_error_invalid // Error 64-bit EL0 | |
172 | ||
173 | #ifdef CONFIG_COMPAT | |
174 | ventry el0_sync_compat // Synchronous 32-bit EL0 | |
175 | ventry el0_irq_compat // IRQ 32-bit EL0 | |
176 | ventry el0_fiq_invalid_compat // FIQ 32-bit EL0 | |
177 | ventry el0_error_invalid_compat // Error 32-bit EL0 | |
178 | #else | |
179 | ventry el0_sync_invalid // Synchronous 32-bit EL0 | |
180 | ventry el0_irq_invalid // IRQ 32-bit EL0 | |
181 | ventry el0_fiq_invalid // FIQ 32-bit EL0 | |
182 | ventry el0_error_invalid // Error 32-bit EL0 | |
183 | #endif | |
184 | END(vectors) | |
185 | ||
186 | /* | |
187 | * Invalid mode handlers | |
188 | */ | |
189 | .macro inv_entry, el, reason, regsize = 64 | |
190 | kernel_entry el, \regsize | |
191 | mov x0, sp | |
192 | mov x1, #\reason | |
193 | mrs x2, esr_el1 | |
194 | b bad_mode | |
195 | .endm | |
196 | ||
197 | el0_sync_invalid: | |
198 | inv_entry 0, BAD_SYNC | |
199 | ENDPROC(el0_sync_invalid) | |
200 | ||
201 | el0_irq_invalid: | |
202 | inv_entry 0, BAD_IRQ | |
203 | ENDPROC(el0_irq_invalid) | |
204 | ||
205 | el0_fiq_invalid: | |
206 | inv_entry 0, BAD_FIQ | |
207 | ENDPROC(el0_fiq_invalid) | |
208 | ||
209 | el0_error_invalid: | |
210 | inv_entry 0, BAD_ERROR | |
211 | ENDPROC(el0_error_invalid) | |
212 | ||
213 | #ifdef CONFIG_COMPAT | |
214 | el0_fiq_invalid_compat: | |
215 | inv_entry 0, BAD_FIQ, 32 | |
216 | ENDPROC(el0_fiq_invalid_compat) | |
217 | ||
218 | el0_error_invalid_compat: | |
219 | inv_entry 0, BAD_ERROR, 32 | |
220 | ENDPROC(el0_error_invalid_compat) | |
221 | #endif | |
222 | ||
223 | el1_sync_invalid: | |
224 | inv_entry 1, BAD_SYNC | |
225 | ENDPROC(el1_sync_invalid) | |
226 | ||
227 | el1_irq_invalid: | |
228 | inv_entry 1, BAD_IRQ | |
229 | ENDPROC(el1_irq_invalid) | |
230 | ||
231 | el1_fiq_invalid: | |
232 | inv_entry 1, BAD_FIQ | |
233 | ENDPROC(el1_fiq_invalid) | |
234 | ||
235 | el1_error_invalid: | |
236 | inv_entry 1, BAD_ERROR | |
237 | ENDPROC(el1_error_invalid) | |
238 | ||
239 | /* | |
240 | * EL1 mode handlers. | |
241 | */ | |
242 | .align 6 | |
243 | el1_sync: | |
244 | kernel_entry 1 | |
245 | mrs x1, esr_el1 // read the syndrome register | |
246 | lsr x24, x1, #26 // exception class | |
247 | cmp x24, #0x25 // data abort in EL1 | |
248 | b.eq el1_da | |
249 | cmp x24, #0x18 // configurable trap | |
250 | b.eq el1_undef | |
251 | cmp x24, #0x26 // stack alignment exception | |
252 | b.eq el1_sp_pc | |
253 | cmp x24, #0x22 // pc alignment exception | |
254 | b.eq el1_sp_pc | |
255 | cmp x24, #0x00 // unknown exception in EL1 | |
256 | b.eq el1_undef | |
257 | cmp x24, #0x30 // debug exception in EL1 | |
258 | b.ge el1_dbg | |
259 | b el1_inv | |
260 | el1_da: | |
261 | /* | |
262 | * Data abort handling | |
263 | */ | |
264 | mrs x0, far_el1 | |
265 | enable_dbg_if_not_stepping x2 | |
266 | // re-enable interrupts if they were enabled in the aborted context | |
267 | tbnz x23, #7, 1f // PSR_I_BIT | |
268 | enable_irq | |
269 | 1: | |
270 | mov x2, sp // struct pt_regs | |
271 | bl do_mem_abort | |
272 | ||
273 | // disable interrupts before pulling preserved data off the stack | |
274 | disable_irq | |
275 | kernel_exit 1 | |
276 | el1_sp_pc: | |
277 | /* | |
278 | * Stack or PC alignment exception handling | |
279 | */ | |
280 | mrs x0, far_el1 | |
281 | mov x1, x25 | |
282 | mov x2, sp | |
283 | b do_sp_pc_abort | |
284 | el1_undef: | |
285 | /* | |
286 | * Undefined instruction | |
287 | */ | |
288 | mov x0, sp | |
289 | b do_undefinstr | |
290 | el1_dbg: | |
291 | /* | |
292 | * Debug exception handling | |
293 | */ | |
294 | tbz x24, #0, el1_inv // EL1 only | |
295 | mrs x0, far_el1 | |
296 | mov x2, sp // struct pt_regs | |
297 | bl do_debug_exception | |
298 | ||
299 | kernel_exit 1 | |
300 | el1_inv: | |
301 | // TODO: add support for undefined instructions in kernel mode | |
302 | mov x0, sp | |
303 | mov x1, #BAD_SYNC | |
304 | mrs x2, esr_el1 | |
305 | b bad_mode | |
306 | ENDPROC(el1_sync) | |
307 | ||
308 | .align 6 | |
309 | el1_irq: | |
310 | kernel_entry 1 | |
311 | enable_dbg_if_not_stepping x0 | |
312 | #ifdef CONFIG_TRACE_IRQFLAGS | |
313 | bl trace_hardirqs_off | |
314 | #endif | |
315 | #ifdef CONFIG_PREEMPT | |
316 | get_thread_info tsk | |
317 | ldr x24, [tsk, #TI_PREEMPT] // get preempt count | |
318 | add x0, x24, #1 // increment it | |
319 | str x0, [tsk, #TI_PREEMPT] | |
320 | #endif | |
321 | irq_handler | |
322 | #ifdef CONFIG_PREEMPT | |
323 | str x24, [tsk, #TI_PREEMPT] // restore preempt count | |
324 | cbnz x24, 1f // preempt count != 0 | |
325 | ldr x0, [tsk, #TI_FLAGS] // get flags | |
326 | tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? | |
327 | bl el1_preempt | |
328 | 1: | |
329 | #endif | |
330 | #ifdef CONFIG_TRACE_IRQFLAGS | |
331 | bl trace_hardirqs_on | |
332 | #endif | |
333 | kernel_exit 1 | |
334 | ENDPROC(el1_irq) | |
335 | ||
336 | #ifdef CONFIG_PREEMPT | |
337 | el1_preempt: | |
338 | mov x24, lr | |
339 | 1: enable_dbg | |
340 | bl preempt_schedule_irq // irq en/disable is done inside | |
341 | ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS | |
342 | tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? | |
343 | ret x24 | |
344 | #endif | |
345 | ||
346 | /* | |
347 | * EL0 mode handlers. | |
348 | */ | |
349 | .align 6 | |
350 | el0_sync: | |
351 | kernel_entry 0 | |
352 | mrs x25, esr_el1 // read the syndrome register | |
353 | lsr x24, x25, #26 // exception class | |
354 | cmp x24, #0x15 // SVC in 64-bit state | |
355 | b.eq el0_svc | |
356 | adr lr, ret_from_exception | |
357 | cmp x24, #0x24 // data abort in EL0 | |
358 | b.eq el0_da | |
359 | cmp x24, #0x20 // instruction abort in EL0 | |
360 | b.eq el0_ia | |
361 | cmp x24, #0x07 // FP/ASIMD access | |
362 | b.eq el0_fpsimd_acc | |
363 | cmp x24, #0x2c // FP/ASIMD exception | |
364 | b.eq el0_fpsimd_exc | |
365 | cmp x24, #0x18 // configurable trap | |
366 | b.eq el0_undef | |
367 | cmp x24, #0x26 // stack alignment exception | |
368 | b.eq el0_sp_pc | |
369 | cmp x24, #0x22 // pc alignment exception | |
370 | b.eq el0_sp_pc | |
371 | cmp x24, #0x00 // unknown exception in EL0 | |
372 | b.eq el0_undef | |
373 | cmp x24, #0x30 // debug exception in EL0 | |
374 | b.ge el0_dbg | |
375 | b el0_inv | |
376 | ||
377 | #ifdef CONFIG_COMPAT | |
378 | .align 6 | |
379 | el0_sync_compat: | |
380 | kernel_entry 0, 32 | |
381 | mrs x25, esr_el1 // read the syndrome register | |
382 | lsr x24, x25, #26 // exception class | |
383 | cmp x24, #0x11 // SVC in 32-bit state | |
384 | b.eq el0_svc_compat | |
385 | adr lr, ret_from_exception | |
386 | cmp x24, #0x24 // data abort in EL0 | |
387 | b.eq el0_da | |
388 | cmp x24, #0x20 // instruction abort in EL0 | |
389 | b.eq el0_ia | |
390 | cmp x24, #0x07 // FP/ASIMD access | |
391 | b.eq el0_fpsimd_acc | |
392 | cmp x24, #0x28 // FP/ASIMD exception | |
393 | b.eq el0_fpsimd_exc | |
394 | cmp x24, #0x00 // unknown exception in EL0 | |
395 | b.eq el0_undef | |
396 | cmp x24, #0x30 // debug exception in EL0 | |
397 | b.ge el0_dbg | |
398 | b el0_inv | |
399 | el0_svc_compat: | |
400 | /* | |
401 | * AArch32 syscall handling | |
402 | */ | |
403 | adr stbl, compat_sys_call_table // load compat syscall table pointer | |
404 | uxtw scno, w7 // syscall number in w7 (r7) | |
405 | mov sc_nr, #__NR_compat_syscalls | |
406 | b el0_svc_naked | |
407 | ||
408 | .align 6 | |
409 | el0_irq_compat: | |
410 | kernel_entry 0, 32 | |
411 | b el0_irq_naked | |
412 | #endif | |
413 | ||
414 | el0_da: | |
415 | /* | |
416 | * Data abort handling | |
417 | */ | |
418 | mrs x0, far_el1 | |
419 | disable_step x1 | |
420 | isb | |
421 | enable_dbg | |
422 | // enable interrupts before calling the main handler | |
423 | enable_irq | |
424 | mov x1, x25 | |
425 | mov x2, sp | |
426 | b do_mem_abort | |
427 | el0_ia: | |
428 | /* | |
429 | * Instruction abort handling | |
430 | */ | |
431 | mrs x0, far_el1 | |
432 | disable_step x1 | |
433 | isb | |
434 | enable_dbg | |
435 | // enable interrupts before calling the main handler | |
436 | enable_irq | |
437 | orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts | |
438 | mov x2, sp | |
439 | b do_mem_abort | |
440 | el0_fpsimd_acc: | |
441 | /* | |
442 | * Floating Point or Advanced SIMD access | |
443 | */ | |
444 | mov x0, x25 | |
445 | mov x1, sp | |
446 | b do_fpsimd_acc | |
447 | el0_fpsimd_exc: | |
448 | /* | |
449 | * Floating Point or Advanced SIMD exception | |
450 | */ | |
451 | mov x0, x25 | |
452 | mov x1, sp | |
453 | b do_fpsimd_exc | |
454 | el0_sp_pc: | |
455 | /* | |
456 | * Stack or PC alignment exception handling | |
457 | */ | |
458 | mrs x0, far_el1 | |
459 | disable_step x1 | |
460 | isb | |
461 | enable_dbg | |
462 | // enable interrupts before calling the main handler | |
463 | enable_irq | |
464 | mov x1, x25 | |
465 | mov x2, sp | |
466 | b do_sp_pc_abort | |
467 | el0_undef: | |
468 | /* | |
469 | * Undefined instruction | |
470 | */ | |
471 | mov x0, sp | |
472 | b do_undefinstr | |
473 | el0_dbg: | |
474 | /* | |
475 | * Debug exception handling | |
476 | */ | |
477 | tbnz x24, #0, el0_inv // EL0 only | |
478 | mrs x0, far_el1 | |
479 | disable_step x1 | |
480 | mov x1, x25 | |
481 | mov x2, sp | |
482 | b do_debug_exception | |
483 | el0_inv: | |
484 | mov x0, sp | |
485 | mov x1, #BAD_SYNC | |
486 | mrs x2, esr_el1 | |
487 | b bad_mode | |
488 | ENDPROC(el0_sync) | |
489 | ||
490 | .align 6 | |
491 | el0_irq: | |
492 | kernel_entry 0 | |
493 | el0_irq_naked: | |
494 | disable_step x1 | |
495 | isb | |
496 | enable_dbg | |
497 | #ifdef CONFIG_TRACE_IRQFLAGS | |
498 | bl trace_hardirqs_off | |
499 | #endif | |
500 | get_thread_info tsk | |
501 | #ifdef CONFIG_PREEMPT | |
502 | ldr x24, [tsk, #TI_PREEMPT] // get preempt count | |
503 | add x23, x24, #1 // increment it | |
504 | str x23, [tsk, #TI_PREEMPT] | |
505 | #endif | |
506 | irq_handler | |
507 | #ifdef CONFIG_PREEMPT | |
508 | ldr x0, [tsk, #TI_PREEMPT] | |
509 | str x24, [tsk, #TI_PREEMPT] | |
510 | cmp x0, x23 | |
511 | b.eq 1f | |
512 | mov x1, #0 | |
513 | str x1, [x1] // BUG | |
514 | 1: | |
515 | #endif | |
516 | #ifdef CONFIG_TRACE_IRQFLAGS | |
517 | bl trace_hardirqs_on | |
518 | #endif | |
519 | b ret_to_user | |
520 | ENDPROC(el0_irq) | |
521 | ||
522 | /* | |
523 | * This is the return code to user mode for abort handlers | |
524 | */ | |
525 | ret_from_exception: | |
526 | get_thread_info tsk | |
527 | b ret_to_user | |
528 | ENDPROC(ret_from_exception) | |
529 | ||
530 | /* | |
531 | * Register switch for AArch64. The callee-saved registers need to be saved | |
532 | * and restored. On entry: | |
533 | * x0 = previous task_struct (must be preserved across the switch) | |
534 | * x1 = next task_struct | |
535 | * Previous and next are guaranteed not to be the same. | |
536 | * | |
537 | */ | |
538 | ENTRY(cpu_switch_to) | |
539 | add x8, x0, #THREAD_CPU_CONTEXT | |
540 | mov x9, sp | |
541 | stp x19, x20, [x8], #16 // store callee-saved registers | |
542 | stp x21, x22, [x8], #16 | |
543 | stp x23, x24, [x8], #16 | |
544 | stp x25, x26, [x8], #16 | |
545 | stp x27, x28, [x8], #16 | |
546 | stp x29, x9, [x8], #16 | |
547 | str lr, [x8] | |
548 | add x8, x1, #THREAD_CPU_CONTEXT | |
549 | ldp x19, x20, [x8], #16 // restore callee-saved registers | |
550 | ldp x21, x22, [x8], #16 | |
551 | ldp x23, x24, [x8], #16 | |
552 | ldp x25, x26, [x8], #16 | |
553 | ldp x27, x28, [x8], #16 | |
554 | ldp x29, x9, [x8], #16 | |
555 | ldr lr, [x8] | |
556 | mov sp, x9 | |
557 | ret | |
558 | ENDPROC(cpu_switch_to) | |
559 | ||
560 | /* | |
561 | * This is the fast syscall return path. We do as little as possible here, | |
562 | * and this includes saving x0 back into the kernel stack. | |
563 | */ | |
564 | ret_fast_syscall: | |
565 | disable_irq // disable interrupts | |
566 | ldr x1, [tsk, #TI_FLAGS] | |
567 | and x2, x1, #_TIF_WORK_MASK | |
568 | cbnz x2, fast_work_pending | |
569 | tbz x1, #TIF_SINGLESTEP, fast_exit | |
570 | disable_dbg | |
571 | enable_step x2 | |
572 | fast_exit: | |
573 | kernel_exit 0, ret = 1 | |
574 | ||
575 | /* | |
576 | * Ok, we need to do extra processing, enter the slow path. | |
577 | */ | |
578 | fast_work_pending: | |
579 | str x0, [sp, #S_X0] // returned x0 | |
580 | work_pending: | |
581 | tbnz x1, #TIF_NEED_RESCHED, work_resched | |
582 | /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */ | |
583 | ldr x2, [sp, #S_PSTATE] | |
584 | mov x0, sp // 'regs' | |
585 | tst x2, #PSR_MODE_MASK // user mode regs? | |
586 | b.ne no_work_pending // returning to kernel | |
6916fd08 | 587 | enable_irq // enable interrupts for do_notify_resume() |
60ffc30d CM |
588 | bl do_notify_resume |
589 | b ret_to_user | |
590 | work_resched: | |
591 | enable_dbg | |
592 | bl schedule | |
593 | ||
594 | /* | |
595 | * "slow" syscall return path. | |
596 | */ | |
597 | ENTRY(ret_to_user) | |
598 | disable_irq // disable interrupts | |
599 | ldr x1, [tsk, #TI_FLAGS] | |
600 | and x2, x1, #_TIF_WORK_MASK | |
601 | cbnz x2, work_pending | |
602 | tbz x1, #TIF_SINGLESTEP, no_work_pending | |
603 | disable_dbg | |
604 | enable_step x2 | |
605 | no_work_pending: | |
606 | kernel_exit 0, ret = 0 | |
607 | ENDPROC(ret_to_user) | |
608 | ||
609 | /* | |
610 | * This is how we return from a fork. | |
611 | */ | |
612 | ENTRY(ret_from_fork) | |
613 | bl schedule_tail | |
c34501d2 CM |
614 | cbz x19, 1f // not a kernel thread |
615 | mov x0, x20 | |
616 | blr x19 | |
617 | 1: get_thread_info tsk | |
60ffc30d CM |
618 | b ret_to_user |
619 | ENDPROC(ret_from_fork) | |
620 | ||
621 | /* | |
622 | * SVC handler. | |
623 | */ | |
624 | .align 6 | |
625 | el0_svc: | |
626 | adrp stbl, sys_call_table // load syscall table pointer | |
627 | uxtw scno, w8 // syscall number in w8 | |
628 | mov sc_nr, #__NR_syscalls | |
629 | el0_svc_naked: // compat entry point | |
630 | stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number | |
631 | disable_step x16 | |
632 | isb | |
633 | enable_dbg | |
634 | enable_irq | |
635 | ||
636 | get_thread_info tsk | |
637 | ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing | |
638 | tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls? | |
639 | adr lr, ret_fast_syscall // return address | |
640 | cmp scno, sc_nr // check upper syscall limit | |
641 | b.hs ni_sys | |
642 | ldr x16, [stbl, scno, lsl #3] // address in the syscall table | |
643 | br x16 // call sys_* routine | |
644 | ni_sys: | |
645 | mov x0, sp | |
646 | b do_ni_syscall | |
647 | ENDPROC(el0_svc) | |
648 | ||
649 | /* | |
650 | * This is the really slow path. We're going to be doing context | |
651 | * switches, and waiting for our parent to respond. | |
652 | */ | |
653 | __sys_trace: | |
654 | mov x1, sp | |
655 | mov w0, #0 // trace entry | |
656 | bl syscall_trace | |
657 | adr lr, __sys_trace_return // return address | |
658 | uxtw scno, w0 // syscall number (possibly new) | |
659 | mov x1, sp // pointer to regs | |
660 | cmp scno, sc_nr // check upper syscall limit | |
661 | b.hs ni_sys | |
662 | ldp x0, x1, [sp] // restore the syscall args | |
663 | ldp x2, x3, [sp, #S_X2] | |
664 | ldp x4, x5, [sp, #S_X4] | |
665 | ldp x6, x7, [sp, #S_X6] | |
666 | ldr x16, [stbl, scno, lsl #3] // address in the syscall table | |
667 | br x16 // call sys_* routine | |
668 | ||
669 | __sys_trace_return: | |
670 | str x0, [sp] // save returned x0 | |
671 | mov x1, sp | |
672 | mov w0, #1 // trace exit | |
673 | bl syscall_trace | |
674 | b ret_to_user | |
675 | ||
676 | /* | |
677 | * Special system call wrappers. | |
678 | */ | |
679 | ENTRY(sys_execve_wrapper) | |
680 | mov x3, sp | |
681 | b sys_execve | |
682 | ENDPROC(sys_execve_wrapper) | |
683 | ||
684 | ENTRY(sys_clone_wrapper) | |
685 | mov x5, sp | |
686 | b sys_clone | |
687 | ENDPROC(sys_clone_wrapper) | |
688 | ||
689 | ENTRY(sys_rt_sigreturn_wrapper) | |
690 | mov x0, sp | |
691 | b sys_rt_sigreturn | |
692 | ENDPROC(sys_rt_sigreturn_wrapper) | |
693 | ||
694 | ENTRY(sys_sigaltstack_wrapper) | |
695 | ldr x2, [sp, #S_SP] | |
696 | b sys_sigaltstack | |
697 | ENDPROC(sys_sigaltstack_wrapper) | |
698 | ||
699 | ENTRY(handle_arch_irq) | |
700 | .quad 0 |