arm64: kernel: restore HW breakpoint registers in cpu_suspend
[deliverable/linux.git] / arch / arm64 / kernel / entry.S
CommitLineData
60ffc30d
CM
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/assembler.h>
25#include <asm/asm-offsets.h>
26#include <asm/errno.h>
5c1ce6f7 27#include <asm/esr.h>
60ffc30d
CM
28#include <asm/thread_info.h>
29#include <asm/unistd.h>
f3d447a9 30#include <asm/unistd32.h>
60ffc30d
CM
31
32/*
33 * Bad Abort numbers
34 *-----------------
35 */
36#define BAD_SYNC 0
37#define BAD_IRQ 1
38#define BAD_FIQ 2
39#define BAD_ERROR 3
40
41 .macro kernel_entry, el, regsize = 64
42 sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
43 .if \regsize == 32
44 mov w0, w0 // zero upper 32 bits of x0
45 .endif
46 push x28, x29
47 push x26, x27
48 push x24, x25
49 push x22, x23
50 push x20, x21
51 push x18, x19
52 push x16, x17
53 push x14, x15
54 push x12, x13
55 push x10, x11
56 push x8, x9
57 push x6, x7
58 push x4, x5
59 push x2, x3
60 push x0, x1
61 .if \el == 0
62 mrs x21, sp_el0
63 .else
64 add x21, sp, #S_FRAME_SIZE
65 .endif
66 mrs x22, elr_el1
67 mrs x23, spsr_el1
68 stp lr, x21, [sp, #S_LR]
69 stp x22, x23, [sp, #S_PC]
70
71 /*
72 * Set syscallno to -1 by default (overridden later if real syscall).
73 */
74 .if \el == 0
75 mvn x21, xzr
76 str x21, [sp, #S_SYSCALLNO]
77 .endif
78
79 /*
80 * Registers that may be useful after this macro is invoked:
81 *
82 * x21 - aborted SP
83 * x22 - aborted PC
84 * x23 - aborted PSTATE
85 */
86 .endm
87
88 .macro kernel_exit, el, ret = 0
89 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
90 .if \el == 0
91 ldr x23, [sp, #S_SP] // load return stack pointer
92 .endif
93 .if \ret
94 ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
95 add sp, sp, S_X2
96 .else
97 pop x0, x1
98 .endif
99 pop x2, x3 // load the rest of the registers
100 pop x4, x5
101 pop x6, x7
102 pop x8, x9
103 msr elr_el1, x21 // set up the return data
104 msr spsr_el1, x22
105 .if \el == 0
106 msr sp_el0, x23
107 .endif
108 pop x10, x11
109 pop x12, x13
110 pop x14, x15
111 pop x16, x17
112 pop x18, x19
113 pop x20, x21
114 pop x22, x23
115 pop x24, x25
116 pop x26, x27
117 pop x28, x29
118 ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
119 eret // return to kernel
120 .endm
121
122 .macro get_thread_info, rd
123 mov \rd, sp
845ad05e 124 and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
60ffc30d
CM
125 .endm
126
127/*
128 * These are the registers used in the syscall handler, and allow us to
129 * have in theory up to 7 arguments to a function - x0 to x6.
130 *
131 * x7 is reserved for the system call number in 32-bit mode.
132 */
133sc_nr .req x25 // number of system calls
134scno .req x26 // syscall number
135stbl .req x27 // syscall table pointer
136tsk .req x28 // current thread_info
137
138/*
139 * Interrupt handling.
140 */
141 .macro irq_handler
142 ldr x1, handle_arch_irq
143 mov x0, sp
144 blr x1
145 .endm
146
147 .text
148
149/*
150 * Exception vectors.
151 */
60ffc30d
CM
152
153 .align 11
154ENTRY(vectors)
155 ventry el1_sync_invalid // Synchronous EL1t
156 ventry el1_irq_invalid // IRQ EL1t
157 ventry el1_fiq_invalid // FIQ EL1t
158 ventry el1_error_invalid // Error EL1t
159
160 ventry el1_sync // Synchronous EL1h
161 ventry el1_irq // IRQ EL1h
162 ventry el1_fiq_invalid // FIQ EL1h
163 ventry el1_error_invalid // Error EL1h
164
165 ventry el0_sync // Synchronous 64-bit EL0
166 ventry el0_irq // IRQ 64-bit EL0
167 ventry el0_fiq_invalid // FIQ 64-bit EL0
168 ventry el0_error_invalid // Error 64-bit EL0
169
170#ifdef CONFIG_COMPAT
171 ventry el0_sync_compat // Synchronous 32-bit EL0
172 ventry el0_irq_compat // IRQ 32-bit EL0
173 ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
174 ventry el0_error_invalid_compat // Error 32-bit EL0
175#else
176 ventry el0_sync_invalid // Synchronous 32-bit EL0
177 ventry el0_irq_invalid // IRQ 32-bit EL0
178 ventry el0_fiq_invalid // FIQ 32-bit EL0
179 ventry el0_error_invalid // Error 32-bit EL0
180#endif
181END(vectors)
182
183/*
184 * Invalid mode handlers
185 */
186 .macro inv_entry, el, reason, regsize = 64
187 kernel_entry el, \regsize
188 mov x0, sp
189 mov x1, #\reason
190 mrs x2, esr_el1
191 b bad_mode
192 .endm
193
194el0_sync_invalid:
195 inv_entry 0, BAD_SYNC
196ENDPROC(el0_sync_invalid)
197
198el0_irq_invalid:
199 inv_entry 0, BAD_IRQ
200ENDPROC(el0_irq_invalid)
201
202el0_fiq_invalid:
203 inv_entry 0, BAD_FIQ
204ENDPROC(el0_fiq_invalid)
205
206el0_error_invalid:
207 inv_entry 0, BAD_ERROR
208ENDPROC(el0_error_invalid)
209
210#ifdef CONFIG_COMPAT
211el0_fiq_invalid_compat:
212 inv_entry 0, BAD_FIQ, 32
213ENDPROC(el0_fiq_invalid_compat)
214
215el0_error_invalid_compat:
216 inv_entry 0, BAD_ERROR, 32
217ENDPROC(el0_error_invalid_compat)
218#endif
219
220el1_sync_invalid:
221 inv_entry 1, BAD_SYNC
222ENDPROC(el1_sync_invalid)
223
224el1_irq_invalid:
225 inv_entry 1, BAD_IRQ
226ENDPROC(el1_irq_invalid)
227
228el1_fiq_invalid:
229 inv_entry 1, BAD_FIQ
230ENDPROC(el1_fiq_invalid)
231
232el1_error_invalid:
233 inv_entry 1, BAD_ERROR
234ENDPROC(el1_error_invalid)
235
236/*
237 * EL1 mode handlers.
238 */
239 .align 6
240el1_sync:
241 kernel_entry 1
242 mrs x1, esr_el1 // read the syndrome register
5c1ce6f7
MZ
243 lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
244 cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
60ffc30d 245 b.eq el1_da
5c1ce6f7 246 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
60ffc30d 247 b.eq el1_undef
5c1ce6f7 248 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
60ffc30d 249 b.eq el1_sp_pc
5c1ce6f7 250 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
60ffc30d 251 b.eq el1_sp_pc
5c1ce6f7 252 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
60ffc30d 253 b.eq el1_undef
5c1ce6f7 254 cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
60ffc30d
CM
255 b.ge el1_dbg
256 b el1_inv
257el1_da:
258 /*
259 * Data abort handling
260 */
261 mrs x0, far_el1
262 enable_dbg_if_not_stepping x2
263 // re-enable interrupts if they were enabled in the aborted context
264 tbnz x23, #7, 1f // PSR_I_BIT
265 enable_irq
2661:
267 mov x2, sp // struct pt_regs
268 bl do_mem_abort
269
270 // disable interrupts before pulling preserved data off the stack
271 disable_irq
272 kernel_exit 1
273el1_sp_pc:
274 /*
275 * Stack or PC alignment exception handling
276 */
277 mrs x0, far_el1
278 mov x1, x25
279 mov x2, sp
280 b do_sp_pc_abort
281el1_undef:
282 /*
283 * Undefined instruction
284 */
285 mov x0, sp
286 b do_undefinstr
287el1_dbg:
288 /*
289 * Debug exception handling
290 */
ee6214ce
SP
291 cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
292 cinc x24, x24, eq // set bit '0'
60ffc30d
CM
293 tbz x24, #0, el1_inv // EL1 only
294 mrs x0, far_el1
295 mov x2, sp // struct pt_regs
296 bl do_debug_exception
297
298 kernel_exit 1
299el1_inv:
300 // TODO: add support for undefined instructions in kernel mode
301 mov x0, sp
302 mov x1, #BAD_SYNC
303 mrs x2, esr_el1
304 b bad_mode
305ENDPROC(el1_sync)
306
307 .align 6
308el1_irq:
309 kernel_entry 1
310 enable_dbg_if_not_stepping x0
311#ifdef CONFIG_TRACE_IRQFLAGS
312 bl trace_hardirqs_off
313#endif
64681787 314
60ffc30d 315 irq_handler
64681787 316
60ffc30d 317#ifdef CONFIG_PREEMPT
64681787
MZ
318 get_thread_info tsk
319 ldr w24, [tsk, #TI_PREEMPT] // restore preempt count
717321fc 320 cbnz w24, 1f // preempt count != 0
60ffc30d
CM
321 ldr x0, [tsk, #TI_FLAGS] // get flags
322 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
323 bl el1_preempt
3241:
325#endif
326#ifdef CONFIG_TRACE_IRQFLAGS
327 bl trace_hardirqs_on
328#endif
329 kernel_exit 1
330ENDPROC(el1_irq)
331
332#ifdef CONFIG_PREEMPT
333el1_preempt:
334 mov x24, lr
3351: enable_dbg
336 bl preempt_schedule_irq // irq en/disable is done inside
337 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
338 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
339 ret x24
340#endif
341
342/*
343 * EL0 mode handlers.
344 */
345 .align 6
346el0_sync:
347 kernel_entry 0
348 mrs x25, esr_el1 // read the syndrome register
5c1ce6f7
MZ
349 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
350 cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
60ffc30d
CM
351 b.eq el0_svc
352 adr lr, ret_from_exception
5c1ce6f7 353 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
60ffc30d 354 b.eq el0_da
5c1ce6f7 355 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
60ffc30d 356 b.eq el0_ia
5c1ce6f7 357 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
60ffc30d 358 b.eq el0_fpsimd_acc
5c1ce6f7 359 cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
60ffc30d 360 b.eq el0_fpsimd_exc
5c1ce6f7 361 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
60ffc30d 362 b.eq el0_undef
5c1ce6f7 363 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
60ffc30d 364 b.eq el0_sp_pc
5c1ce6f7 365 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
60ffc30d 366 b.eq el0_sp_pc
5c1ce6f7 367 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
60ffc30d 368 b.eq el0_undef
5c1ce6f7 369 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
60ffc30d
CM
370 b.ge el0_dbg
371 b el0_inv
372
373#ifdef CONFIG_COMPAT
374 .align 6
375el0_sync_compat:
376 kernel_entry 0, 32
377 mrs x25, esr_el1 // read the syndrome register
5c1ce6f7
MZ
378 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
379 cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
60ffc30d
CM
380 b.eq el0_svc_compat
381 adr lr, ret_from_exception
5c1ce6f7 382 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
60ffc30d 383 b.eq el0_da
5c1ce6f7 384 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
60ffc30d 385 b.eq el0_ia
5c1ce6f7 386 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
60ffc30d 387 b.eq el0_fpsimd_acc
5c1ce6f7 388 cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
60ffc30d 389 b.eq el0_fpsimd_exc
5c1ce6f7 390 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
60ffc30d 391 b.eq el0_undef
381cc2b9
MR
392 cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
393 b.eq el0_undef
394 cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
395 b.eq el0_undef
396 cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
397 b.eq el0_undef
398 cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
399 b.eq el0_undef
400 cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
401 b.eq el0_undef
5c1ce6f7 402 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
60ffc30d
CM
403 b.ge el0_dbg
404 b el0_inv
405el0_svc_compat:
406 /*
407 * AArch32 syscall handling
408 */
409 adr stbl, compat_sys_call_table // load compat syscall table pointer
410 uxtw scno, w7 // syscall number in w7 (r7)
411 mov sc_nr, #__NR_compat_syscalls
412 b el0_svc_naked
413
414 .align 6
415el0_irq_compat:
416 kernel_entry 0, 32
417 b el0_irq_naked
418#endif
419
420el0_da:
421 /*
422 * Data abort handling
423 */
424 mrs x0, far_el1
d50240a5 425 bic x0, x0, #(0xff << 56)
60ffc30d
CM
426 disable_step x1
427 isb
428 enable_dbg
429 // enable interrupts before calling the main handler
430 enable_irq
431 mov x1, x25
432 mov x2, sp
433 b do_mem_abort
434el0_ia:
435 /*
436 * Instruction abort handling
437 */
438 mrs x0, far_el1
439 disable_step x1
440 isb
441 enable_dbg
442 // enable interrupts before calling the main handler
443 enable_irq
444 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
445 mov x2, sp
446 b do_mem_abort
447el0_fpsimd_acc:
448 /*
449 * Floating Point or Advanced SIMD access
450 */
451 mov x0, x25
452 mov x1, sp
453 b do_fpsimd_acc
454el0_fpsimd_exc:
455 /*
456 * Floating Point or Advanced SIMD exception
457 */
458 mov x0, x25
459 mov x1, sp
460 b do_fpsimd_exc
461el0_sp_pc:
462 /*
463 * Stack or PC alignment exception handling
464 */
465 mrs x0, far_el1
466 disable_step x1
467 isb
468 enable_dbg
469 // enable interrupts before calling the main handler
470 enable_irq
471 mov x1, x25
472 mov x2, sp
473 b do_sp_pc_abort
474el0_undef:
475 /*
476 * Undefined instruction
477 */
478 mov x0, sp
2600e130
CM
479 // enable interrupts before calling the main handler
480 enable_irq
60ffc30d
CM
481 b do_undefinstr
482el0_dbg:
483 /*
484 * Debug exception handling
485 */
486 tbnz x24, #0, el0_inv // EL0 only
487 mrs x0, far_el1
488 disable_step x1
489 mov x1, x25
490 mov x2, sp
491 b do_debug_exception
492el0_inv:
493 mov x0, sp
494 mov x1, #BAD_SYNC
495 mrs x2, esr_el1
496 b bad_mode
497ENDPROC(el0_sync)
498
499 .align 6
500el0_irq:
501 kernel_entry 0
502el0_irq_naked:
503 disable_step x1
504 isb
505 enable_dbg
506#ifdef CONFIG_TRACE_IRQFLAGS
507 bl trace_hardirqs_off
508#endif
64681787 509
60ffc30d 510 irq_handler
64681787
MZ
511 get_thread_info tsk
512
60ffc30d
CM
513#ifdef CONFIG_TRACE_IRQFLAGS
514 bl trace_hardirqs_on
515#endif
516 b ret_to_user
517ENDPROC(el0_irq)
518
519/*
520 * This is the return code to user mode for abort handlers
521 */
522ret_from_exception:
523 get_thread_info tsk
524 b ret_to_user
525ENDPROC(ret_from_exception)
526
527/*
528 * Register switch for AArch64. The callee-saved registers need to be saved
529 * and restored. On entry:
530 * x0 = previous task_struct (must be preserved across the switch)
531 * x1 = next task_struct
532 * Previous and next are guaranteed not to be the same.
533 *
534 */
535ENTRY(cpu_switch_to)
536 add x8, x0, #THREAD_CPU_CONTEXT
537 mov x9, sp
538 stp x19, x20, [x8], #16 // store callee-saved registers
539 stp x21, x22, [x8], #16
540 stp x23, x24, [x8], #16
541 stp x25, x26, [x8], #16
542 stp x27, x28, [x8], #16
543 stp x29, x9, [x8], #16
544 str lr, [x8]
545 add x8, x1, #THREAD_CPU_CONTEXT
546 ldp x19, x20, [x8], #16 // restore callee-saved registers
547 ldp x21, x22, [x8], #16
548 ldp x23, x24, [x8], #16
549 ldp x25, x26, [x8], #16
550 ldp x27, x28, [x8], #16
551 ldp x29, x9, [x8], #16
552 ldr lr, [x8]
553 mov sp, x9
554 ret
555ENDPROC(cpu_switch_to)
556
557/*
558 * This is the fast syscall return path. We do as little as possible here,
559 * and this includes saving x0 back into the kernel stack.
560 */
561ret_fast_syscall:
562 disable_irq // disable interrupts
563 ldr x1, [tsk, #TI_FLAGS]
564 and x2, x1, #_TIF_WORK_MASK
565 cbnz x2, fast_work_pending
566 tbz x1, #TIF_SINGLESTEP, fast_exit
567 disable_dbg
568 enable_step x2
569fast_exit:
570 kernel_exit 0, ret = 1
571
572/*
573 * Ok, we need to do extra processing, enter the slow path.
574 */
575fast_work_pending:
576 str x0, [sp, #S_X0] // returned x0
577work_pending:
578 tbnz x1, #TIF_NEED_RESCHED, work_resched
579 /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
580 ldr x2, [sp, #S_PSTATE]
581 mov x0, sp // 'regs'
582 tst x2, #PSR_MODE_MASK // user mode regs?
583 b.ne no_work_pending // returning to kernel
6916fd08 584 enable_irq // enable interrupts for do_notify_resume()
60ffc30d
CM
585 bl do_notify_resume
586 b ret_to_user
587work_resched:
588 enable_dbg
589 bl schedule
590
591/*
592 * "slow" syscall return path.
593 */
59dc67b0 594ret_to_user:
60ffc30d
CM
595 disable_irq // disable interrupts
596 ldr x1, [tsk, #TI_FLAGS]
597 and x2, x1, #_TIF_WORK_MASK
598 cbnz x2, work_pending
599 tbz x1, #TIF_SINGLESTEP, no_work_pending
600 disable_dbg
601 enable_step x2
602no_work_pending:
603 kernel_exit 0, ret = 0
604ENDPROC(ret_to_user)
605
606/*
607 * This is how we return from a fork.
608 */
609ENTRY(ret_from_fork)
610 bl schedule_tail
c34501d2
CM
611 cbz x19, 1f // not a kernel thread
612 mov x0, x20
613 blr x19
6141: get_thread_info tsk
60ffc30d
CM
615 b ret_to_user
616ENDPROC(ret_from_fork)
617
618/*
619 * SVC handler.
620 */
621 .align 6
622el0_svc:
623 adrp stbl, sys_call_table // load syscall table pointer
624 uxtw scno, w8 // syscall number in w8
625 mov sc_nr, #__NR_syscalls
626el0_svc_naked: // compat entry point
627 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
628 disable_step x16
629 isb
630 enable_dbg
631 enable_irq
632
633 get_thread_info tsk
634 ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing
635 tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
636 adr lr, ret_fast_syscall // return address
637 cmp scno, sc_nr // check upper syscall limit
638 b.hs ni_sys
639 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
640 br x16 // call sys_* routine
641ni_sys:
642 mov x0, sp
643 b do_ni_syscall
644ENDPROC(el0_svc)
645
646 /*
647 * This is the really slow path. We're going to be doing context
648 * switches, and waiting for our parent to respond.
649 */
650__sys_trace:
651 mov x1, sp
652 mov w0, #0 // trace entry
653 bl syscall_trace
654 adr lr, __sys_trace_return // return address
655 uxtw scno, w0 // syscall number (possibly new)
656 mov x1, sp // pointer to regs
657 cmp scno, sc_nr // check upper syscall limit
658 b.hs ni_sys
659 ldp x0, x1, [sp] // restore the syscall args
660 ldp x2, x3, [sp, #S_X2]
661 ldp x4, x5, [sp, #S_X4]
662 ldp x6, x7, [sp, #S_X6]
663 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
664 br x16 // call sys_* routine
665
666__sys_trace_return:
667 str x0, [sp] // save returned x0
668 mov x1, sp
669 mov w0, #1 // trace exit
670 bl syscall_trace
671 b ret_to_user
672
673/*
674 * Special system call wrappers.
675 */
60ffc30d
CM
676ENTRY(sys_rt_sigreturn_wrapper)
677 mov x0, sp
678 b sys_rt_sigreturn
679ENDPROC(sys_rt_sigreturn_wrapper)
680
60ffc30d
CM
681ENTRY(handle_arch_irq)
682 .quad 0
This page took 0.109168 seconds and 5 git commands to generate.