| 1 | /* |
| 2 | * linux/arch/arm/kernel/entry-armv.S |
| 3 | * |
| 4 | * Copyright (C) 1996,1997,1998 Russell King. |
| 5 | * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) |
| 6 | * nommu support by Hyok S. Choi (hyok.choi@samsung.com) |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as |
| 10 | * published by the Free Software Foundation. |
| 11 | * |
| 12 | * Low-level vector interface routines |
| 13 | * |
| 14 | * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction |
| 15 | * that causes it to save wrong values... Be aware! |
| 16 | */ |
| 17 | |
| 18 | #include <linux/init.h> |
| 19 | |
| 20 | #include <asm/assembler.h> |
| 21 | #include <asm/memory.h> |
| 22 | #include <asm/glue-df.h> |
| 23 | #include <asm/glue-pf.h> |
| 24 | #include <asm/vfpmacros.h> |
| 25 | #ifndef CONFIG_MULTI_IRQ_HANDLER |
| 26 | #include <mach/entry-macro.S> |
| 27 | #endif |
| 28 | #include <asm/thread_notify.h> |
| 29 | #include <asm/unwind.h> |
| 30 | #include <asm/unistd.h> |
| 31 | #include <asm/tls.h> |
| 32 | #include <asm/system_info.h> |
| 33 | |
| 34 | #include "entry-header.S" |
| 35 | #include <asm/entry-macro-multi.S> |
| 36 | #include <asm/probes.h> |
| 37 | |
| 38 | /* |
| 39 | * Interrupt handling. |
| 40 | */ |
| 41 | .macro irq_handler |
| 42 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
| 43 | ldr r1, =handle_arch_irq |
| 44 | mov r0, sp |
| 45 | badr lr, 9997f |
| 46 | ldr pc, [r1] |
| 47 | #else |
| 48 | arch_irq_handler_default |
| 49 | #endif |
| 50 | 9997: |
| 51 | .endm |
| 52 | |
| 53 | .macro pabt_helper |
| 54 | @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 |
| 55 | #ifdef MULTI_PABORT |
| 56 | ldr ip, .LCprocfns |
| 57 | mov lr, pc |
| 58 | ldr pc, [ip, #PROCESSOR_PABT_FUNC] |
| 59 | #else |
| 60 | bl CPU_PABORT_HANDLER |
| 61 | #endif |
| 62 | .endm |
| 63 | |
| 64 | .macro dabt_helper |
| 65 | |
| 66 | @ |
| 67 | @ Call the processor-specific abort handler: |
| 68 | @ |
| 69 | @ r2 - pt_regs |
| 70 | @ r4 - aborted context pc |
| 71 | @ r5 - aborted context psr |
| 72 | @ |
| 73 | @ The abort handler must return the aborted address in r0, and |
| 74 | @ the fault status register in r1. r9 must be preserved. |
| 75 | @ |
| 76 | #ifdef MULTI_DABORT |
| 77 | ldr ip, .LCprocfns |
| 78 | mov lr, pc |
| 79 | ldr pc, [ip, #PROCESSOR_DABT_FUNC] |
| 80 | #else |
| 81 | bl CPU_DABORT_HANDLER |
| 82 | #endif |
| 83 | .endm |
| 84 | |
| 85 | #ifdef CONFIG_KPROBES |
| 86 | .section .kprobes.text,"ax",%progbits |
| 87 | #else |
| 88 | .text |
| 89 | #endif |
| 90 | |
| 91 | /* |
| 92 | * Invalid mode handlers |
| 93 | */ |
| 94 | .macro inv_entry, reason |
| 95 | sub sp, sp, #PT_REGS_SIZE |
| 96 | ARM( stmib sp, {r1 - lr} ) |
| 97 | THUMB( stmia sp, {r0 - r12} ) |
| 98 | THUMB( str sp, [sp, #S_SP] ) |
| 99 | THUMB( str lr, [sp, #S_LR] ) |
| 100 | mov r1, #\reason |
| 101 | .endm |
| 102 | |
| 103 | __pabt_invalid: |
| 104 | inv_entry BAD_PREFETCH |
| 105 | b common_invalid |
| 106 | ENDPROC(__pabt_invalid) |
| 107 | |
| 108 | __dabt_invalid: |
| 109 | inv_entry BAD_DATA |
| 110 | b common_invalid |
| 111 | ENDPROC(__dabt_invalid) |
| 112 | |
| 113 | __irq_invalid: |
| 114 | inv_entry BAD_IRQ |
| 115 | b common_invalid |
| 116 | ENDPROC(__irq_invalid) |
| 117 | |
| 118 | __und_invalid: |
| 119 | inv_entry BAD_UNDEFINSTR |
| 120 | |
| 121 | @ |
| 122 | @ XXX fall through to common_invalid |
| 123 | @ |
| 124 | |
| 125 | @ |
| 126 | @ common_invalid - generic code for failed exception (re-entrant version of handlers) |
| 127 | @ |
| 128 | common_invalid: |
| 129 | zero_fp |
| 130 | |
| 131 | ldmia r0, {r4 - r6} |
| 132 | add r0, sp, #S_PC @ here for interlock avoidance |
| 133 | mov r7, #-1 @ "" "" "" "" |
| 134 | str r4, [sp] @ save preserved r0 |
| 135 | stmia r0, {r5 - r7} @ lr_<exception>, |
| 136 | @ cpsr_<exception>, "old_r0" |
| 137 | |
| 138 | mov r0, sp |
| 139 | b bad_mode |
| 140 | ENDPROC(__und_invalid) |
| 141 | |
| 142 | /* |
| 143 | * SVC mode handlers |
| 144 | */ |
| 145 | |
| 146 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) |
| 147 | #define SPFIX(code...) code |
| 148 | #else |
| 149 | #define SPFIX(code...) |
| 150 | #endif |
| 151 | |
| 152 | .macro svc_entry, stack_hole=0, trace=1, uaccess=1 |
| 153 | UNWIND(.fnstart ) |
| 154 | UNWIND(.save {r0 - pc} ) |
| 155 | sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4) |
| 156 | #ifdef CONFIG_THUMB2_KERNEL |
| 157 | SPFIX( str r0, [sp] ) @ temporarily saved |
| 158 | SPFIX( mov r0, sp ) |
| 159 | SPFIX( tst r0, #4 ) @ test original stack alignment |
| 160 | SPFIX( ldr r0, [sp] ) @ restored |
| 161 | #else |
| 162 | SPFIX( tst sp, #4 ) |
| 163 | #endif |
| 164 | SPFIX( subeq sp, sp, #4 ) |
| 165 | stmia sp, {r1 - r12} |
| 166 | |
| 167 | ldmia r0, {r3 - r5} |
| 168 | add r7, sp, #S_SP - 4 @ here for interlock avoidance |
| 169 | mov r6, #-1 @ "" "" "" "" |
| 170 | add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4) |
| 171 | SPFIX( addeq r2, r2, #4 ) |
| 172 | str r3, [sp, #-4]! @ save the "real" r0 copied |
| 173 | @ from the exception stack |
| 174 | |
| 175 | mov r3, lr |
| 176 | |
| 177 | @ |
| 178 | @ We are now ready to fill in the remaining blanks on the stack: |
| 179 | @ |
| 180 | @ r2 - sp_svc |
| 181 | @ r3 - lr_svc |
| 182 | @ r4 - lr_<exception>, already fixed up for correct return/restart |
| 183 | @ r5 - spsr_<exception> |
| 184 | @ r6 - orig_r0 (see pt_regs definition in ptrace.h) |
| 185 | @ |
| 186 | stmia r7, {r2 - r6} |
| 187 | |
| 188 | get_thread_info tsk |
| 189 | ldr r0, [tsk, #TI_ADDR_LIMIT] |
| 190 | mov r1, #TASK_SIZE |
| 191 | str r1, [tsk, #TI_ADDR_LIMIT] |
| 192 | str r0, [sp, #SVC_ADDR_LIMIT] |
| 193 | |
| 194 | uaccess_save r0 |
| 195 | .if \uaccess |
| 196 | uaccess_disable r0 |
| 197 | .endif |
| 198 | |
| 199 | .if \trace |
| 200 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 201 | bl trace_hardirqs_off |
| 202 | #endif |
| 203 | .endif |
| 204 | .endm |
| 205 | |
| 206 | .align 5 |
| 207 | __dabt_svc: |
| 208 | svc_entry uaccess=0 |
| 209 | mov r2, sp |
| 210 | dabt_helper |
| 211 | THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR |
| 212 | svc_exit r5 @ return from exception |
| 213 | UNWIND(.fnend ) |
| 214 | ENDPROC(__dabt_svc) |
| 215 | |
| 216 | .align 5 |
| 217 | __irq_svc: |
| 218 | svc_entry |
| 219 | irq_handler |
| 220 | |
| 221 | #ifdef CONFIG_PREEMPT |
| 222 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count |
| 223 | ldr r0, [tsk, #TI_FLAGS] @ get flags |
| 224 | teq r8, #0 @ if preempt count != 0 |
| 225 | movne r0, #0 @ force flags to 0 |
| 226 | tst r0, #_TIF_NEED_RESCHED |
| 227 | blne svc_preempt |
| 228 | #endif |
| 229 | |
| 230 | svc_exit r5, irq = 1 @ return from exception |
| 231 | UNWIND(.fnend ) |
| 232 | ENDPROC(__irq_svc) |
| 233 | |
| 234 | .ltorg |
| 235 | |
| 236 | #ifdef CONFIG_PREEMPT |
| 237 | svc_preempt: |
| 238 | mov r8, lr |
| 239 | 1: bl preempt_schedule_irq @ irq en/disable is done inside |
| 240 | ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS |
| 241 | tst r0, #_TIF_NEED_RESCHED |
| 242 | reteq r8 @ go again |
| 243 | b 1b |
| 244 | #endif |
| 245 | |
| 246 | __und_fault: |
| 247 | @ Correct the PC such that it is pointing at the instruction |
| 248 | @ which caused the fault. If the faulting instruction was ARM |
| 249 | @ the PC will be pointing at the next instruction, and have to |
| 250 | @ subtract 4. Otherwise, it is Thumb, and the PC will be |
| 251 | @ pointing at the second half of the Thumb instruction. We |
| 252 | @ have to subtract 2. |
| 253 | ldr r2, [r0, #S_PC] |
| 254 | sub r2, r2, r1 |
| 255 | str r2, [r0, #S_PC] |
| 256 | b do_undefinstr |
| 257 | ENDPROC(__und_fault) |
| 258 | |
| 259 | .align 5 |
| 260 | __und_svc: |
| 261 | #ifdef CONFIG_KPROBES |
| 262 | @ If a kprobe is about to simulate a "stmdb sp..." instruction, |
| 263 | @ it obviously needs free stack space which then will belong to |
| 264 | @ the saved context. |
| 265 | svc_entry MAX_STACK_SIZE |
| 266 | #else |
| 267 | svc_entry |
| 268 | #endif |
| 269 | @ |
| 270 | @ call emulation code, which returns using r9 if it has emulated |
| 271 | @ the instruction, or the more conventional lr if we are to treat |
| 272 | @ this as a real undefined instruction |
| 273 | @ |
| 274 | @ r0 - instruction |
| 275 | @ |
| 276 | #ifndef CONFIG_THUMB2_KERNEL |
| 277 | ldr r0, [r4, #-4] |
| 278 | #else |
| 279 | mov r1, #2 |
| 280 | ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 |
| 281 | cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 |
| 282 | blo __und_svc_fault |
| 283 | ldrh r9, [r4] @ bottom 16 bits |
| 284 | add r4, r4, #2 |
| 285 | str r4, [sp, #S_PC] |
| 286 | orr r0, r9, r0, lsl #16 |
| 287 | #endif |
| 288 | badr r9, __und_svc_finish |
| 289 | mov r2, r4 |
| 290 | bl call_fpe |
| 291 | |
| 292 | mov r1, #4 @ PC correction to apply |
| 293 | __und_svc_fault: |
| 294 | mov r0, sp @ struct pt_regs *regs |
| 295 | bl __und_fault |
| 296 | |
| 297 | __und_svc_finish: |
| 298 | get_thread_info tsk |
| 299 | ldr r5, [sp, #S_PSR] @ Get SVC cpsr |
| 300 | svc_exit r5 @ return from exception |
| 301 | UNWIND(.fnend ) |
| 302 | ENDPROC(__und_svc) |
| 303 | |
| 304 | .align 5 |
| 305 | __pabt_svc: |
| 306 | svc_entry |
| 307 | mov r2, sp @ regs |
| 308 | pabt_helper |
| 309 | svc_exit r5 @ return from exception |
| 310 | UNWIND(.fnend ) |
| 311 | ENDPROC(__pabt_svc) |
| 312 | |
| 313 | .align 5 |
| 314 | __fiq_svc: |
| 315 | svc_entry trace=0 |
| 316 | mov r0, sp @ struct pt_regs *regs |
| 317 | bl handle_fiq_as_nmi |
| 318 | svc_exit_via_fiq |
| 319 | UNWIND(.fnend ) |
| 320 | ENDPROC(__fiq_svc) |
| 321 | |
| 322 | .align 5 |
| 323 | .LCcralign: |
| 324 | .word cr_alignment |
| 325 | #ifdef MULTI_DABORT |
| 326 | .LCprocfns: |
| 327 | .word processor |
| 328 | #endif |
| 329 | .LCfp: |
| 330 | .word fp_enter |
| 331 | |
| 332 | /* |
| 333 | * Abort mode handlers |
| 334 | */ |
| 335 | |
| 336 | @ |
| 337 | @ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode |
| 338 | @ and reuses the same macros. However in abort mode we must also |
| 339 | @ save/restore lr_abt and spsr_abt to make nested aborts safe. |
| 340 | @ |
| 341 | .align 5 |
| 342 | __fiq_abt: |
| 343 | svc_entry trace=0 |
| 344 | |
| 345 | ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) |
| 346 | THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) |
| 347 | THUMB( msr cpsr_c, r0 ) |
| 348 | mov r1, lr @ Save lr_abt |
| 349 | mrs r2, spsr @ Save spsr_abt, abort is now safe |
| 350 | ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) |
| 351 | THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) |
| 352 | THUMB( msr cpsr_c, r0 ) |
| 353 | stmfd sp!, {r1 - r2} |
| 354 | |
| 355 | add r0, sp, #8 @ struct pt_regs *regs |
| 356 | bl handle_fiq_as_nmi |
| 357 | |
| 358 | ldmfd sp!, {r1 - r2} |
| 359 | ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) |
| 360 | THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) |
| 361 | THUMB( msr cpsr_c, r0 ) |
| 362 | mov lr, r1 @ Restore lr_abt, abort is unsafe |
| 363 | msr spsr_cxsf, r2 @ Restore spsr_abt |
| 364 | ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) |
| 365 | THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) |
| 366 | THUMB( msr cpsr_c, r0 ) |
| 367 | |
| 368 | svc_exit_via_fiq |
| 369 | UNWIND(.fnend ) |
| 370 | ENDPROC(__fiq_abt) |
| 371 | |
| 372 | /* |
| 373 | * User mode handlers |
| 374 | * |
| 375 | * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE |
| 376 | */ |
| 377 | |
| 378 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7) |
| 379 | #error "sizeof(struct pt_regs) must be a multiple of 8" |
| 380 | #endif |
| 381 | |
| 382 | .macro usr_entry, trace=1, uaccess=1 |
| 383 | UNWIND(.fnstart ) |
| 384 | UNWIND(.cantunwind ) @ don't unwind the user space |
| 385 | sub sp, sp, #PT_REGS_SIZE |
| 386 | ARM( stmib sp, {r1 - r12} ) |
| 387 | THUMB( stmia sp, {r0 - r12} ) |
| 388 | |
| 389 | ATRAP( mrc p15, 0, r7, c1, c0, 0) |
| 390 | ATRAP( ldr r8, .LCcralign) |
| 391 | |
| 392 | ldmia r0, {r3 - r5} |
| 393 | add r0, sp, #S_PC @ here for interlock avoidance |
| 394 | mov r6, #-1 @ "" "" "" "" |
| 395 | |
| 396 | str r3, [sp] @ save the "real" r0 copied |
| 397 | @ from the exception stack |
| 398 | |
| 399 | ATRAP( ldr r8, [r8, #0]) |
| 400 | |
| 401 | @ |
| 402 | @ We are now ready to fill in the remaining blanks on the stack: |
| 403 | @ |
| 404 | @ r4 - lr_<exception>, already fixed up for correct return/restart |
| 405 | @ r5 - spsr_<exception> |
| 406 | @ r6 - orig_r0 (see pt_regs definition in ptrace.h) |
| 407 | @ |
| 408 | @ Also, separately save sp_usr and lr_usr |
| 409 | @ |
| 410 | stmia r0, {r4 - r6} |
| 411 | ARM( stmdb r0, {sp, lr}^ ) |
| 412 | THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) |
| 413 | |
| 414 | .if \uaccess |
| 415 | uaccess_disable ip |
| 416 | .endif |
| 417 | |
| 418 | @ Enable the alignment trap while in kernel mode |
| 419 | ATRAP( teq r8, r7) |
| 420 | ATRAP( mcrne p15, 0, r8, c1, c0, 0) |
| 421 | |
| 422 | @ |
| 423 | @ Clear FP to mark the first stack frame |
| 424 | @ |
| 425 | zero_fp |
| 426 | |
| 427 | .if \trace |
| 428 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 429 | bl trace_hardirqs_off |
| 430 | #endif |
| 431 | ct_user_exit save = 0 |
| 432 | .endif |
| 433 | .endm |
| 434 | |
| 435 | .macro kuser_cmpxchg_check |
| 436 | #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) |
| 437 | #ifndef CONFIG_MMU |
| 438 | #warning "NPTL on non MMU needs fixing" |
| 439 | #else |
| 440 | @ Make sure our user space atomic helper is restarted |
| 441 | @ if it was interrupted in a critical region. Here we |
| 442 | @ perform a quick test inline since it should be false |
| 443 | @ 99.9999% of the time. The rest is done out of line. |
| 444 | cmp r4, #TASK_SIZE |
| 445 | blhs kuser_cmpxchg64_fixup |
| 446 | #endif |
| 447 | #endif |
| 448 | .endm |
| 449 | |
| 450 | .align 5 |
| 451 | __dabt_usr: |
| 452 | usr_entry uaccess=0 |
| 453 | kuser_cmpxchg_check |
| 454 | mov r2, sp |
| 455 | dabt_helper |
| 456 | b ret_from_exception |
| 457 | UNWIND(.fnend ) |
| 458 | ENDPROC(__dabt_usr) |
| 459 | |
| 460 | .align 5 |
| 461 | __irq_usr: |
| 462 | usr_entry |
| 463 | kuser_cmpxchg_check |
| 464 | irq_handler |
| 465 | get_thread_info tsk |
| 466 | mov why, #0 |
| 467 | b ret_to_user_from_irq |
| 468 | UNWIND(.fnend ) |
| 469 | ENDPROC(__irq_usr) |
| 470 | |
| 471 | .ltorg |
| 472 | |
| 473 | .align 5 |
| 474 | __und_usr: |
| 475 | usr_entry uaccess=0 |
| 476 | |
| 477 | mov r2, r4 |
| 478 | mov r3, r5 |
| 479 | |
| 480 | @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the |
| 481 | @ faulting instruction depending on Thumb mode. |
| 482 | @ r3 = regs->ARM_cpsr |
| 483 | @ |
| 484 | @ The emulation code returns using r9 if it has emulated the |
| 485 | @ instruction, or the more conventional lr if we are to treat |
| 486 | @ this as a real undefined instruction |
| 487 | @ |
| 488 | badr r9, ret_from_exception |
| 489 | |
| 490 | @ IRQs must be enabled before attempting to read the instruction from |
| 491 | @ user space since that could cause a page/translation fault if the |
| 492 | @ page table was modified by another CPU. |
| 493 | enable_irq |
| 494 | |
| 495 | tst r3, #PSR_T_BIT @ Thumb mode? |
| 496 | bne __und_usr_thumb |
| 497 | sub r4, r2, #4 @ ARM instr at LR - 4 |
| 498 | 1: ldrt r0, [r4] |
| 499 | ARM_BE8(rev r0, r0) @ little endian instruction |
| 500 | |
| 501 | uaccess_disable ip |
| 502 | |
| 503 | @ r0 = 32-bit ARM instruction which caused the exception |
| 504 | @ r2 = PC value for the following instruction (:= regs->ARM_pc) |
| 505 | @ r4 = PC value for the faulting instruction |
| 506 | @ lr = 32-bit undefined instruction function |
| 507 | badr lr, __und_usr_fault_32 |
| 508 | b call_fpe |
| 509 | |
| 510 | __und_usr_thumb: |
| 511 | @ Thumb instruction |
| 512 | sub r4, r2, #2 @ First half of thumb instr at LR - 2 |
| 513 | #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 |
| 514 | /* |
| 515 | * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms |
| 516 | * can never be supported in a single kernel, this code is not applicable at |
| 517 | * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be |
| 518 | * made about .arch directives. |
| 519 | */ |
| 520 | #if __LINUX_ARM_ARCH__ < 7 |
| 521 | /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */ |
| 522 | #define NEED_CPU_ARCHITECTURE |
| 523 | ldr r5, .LCcpu_architecture |
| 524 | ldr r5, [r5] |
| 525 | cmp r5, #CPU_ARCH_ARMv7 |
| 526 | blo __und_usr_fault_16 @ 16bit undefined instruction |
| 527 | /* |
| 528 | * The following code won't get run unless the running CPU really is v7, so |
| 529 | * coding round the lack of ldrht on older arches is pointless. Temporarily |
| 530 | * override the assembler target arch with the minimum required instead: |
| 531 | */ |
| 532 | .arch armv6t2 |
| 533 | #endif |
| 534 | 2: ldrht r5, [r4] |
| 535 | ARM_BE8(rev16 r5, r5) @ little endian instruction |
| 536 | cmp r5, #0xe800 @ 32bit instruction if xx != 0 |
| 537 | blo __und_usr_fault_16_pan @ 16bit undefined instruction |
| 538 | 3: ldrht r0, [r2] |
| 539 | ARM_BE8(rev16 r0, r0) @ little endian instruction |
| 540 | uaccess_disable ip |
| 541 | add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 |
| 542 | str r2, [sp, #S_PC] @ it's a 2x16bit instr, update |
| 543 | orr r0, r0, r5, lsl #16 |
| 544 | badr lr, __und_usr_fault_32 |
| 545 | @ r0 = the two 16-bit Thumb instructions which caused the exception |
| 546 | @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc) |
| 547 | @ r4 = PC value for the first 16-bit Thumb instruction |
| 548 | @ lr = 32bit undefined instruction function |
| 549 | |
| 550 | #if __LINUX_ARM_ARCH__ < 7 |
| 551 | /* If the target arch was overridden, change it back: */ |
| 552 | #ifdef CONFIG_CPU_32v6K |
| 553 | .arch armv6k |
| 554 | #else |
| 555 | .arch armv6 |
| 556 | #endif |
| 557 | #endif /* __LINUX_ARM_ARCH__ < 7 */ |
| 558 | #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ |
| 559 | b __und_usr_fault_16 |
| 560 | #endif |
| 561 | UNWIND(.fnend) |
| 562 | ENDPROC(__und_usr) |
| 563 | |
| 564 | /* |
| 565 | * The out of line fixup for the ldrt instructions above. |
| 566 | */ |
| 567 | .pushsection .text.fixup, "ax" |
| 568 | .align 2 |
| 569 | 4: str r4, [sp, #S_PC] @ retry current instruction |
| 570 | ret r9 |
| 571 | .popsection |
| 572 | .pushsection __ex_table,"a" |
| 573 | .long 1b, 4b |
| 574 | #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 |
| 575 | .long 2b, 4b |
| 576 | .long 3b, 4b |
| 577 | #endif |
| 578 | .popsection |
| 579 | |
| 580 | /* |
| 581 | * Check whether the instruction is a co-processor instruction. |
| 582 | * If yes, we need to call the relevant co-processor handler. |
| 583 | * |
| 584 | * Note that we don't do a full check here for the co-processor |
| 585 | * instructions; all instructions with bit 27 set are well |
| 586 | * defined. The only instructions that should fault are the |
| 587 | * co-processor instructions. However, we have to watch out |
| 588 | * for the ARM6/ARM7 SWI bug. |
| 589 | * |
| 590 | * NEON is a special case that has to be handled here. Not all |
| 591 | * NEON instructions are co-processor instructions, so we have |
| 592 | * to make a special case of checking for them. Plus, there's |
| 593 | * five groups of them, so we have a table of mask/opcode pairs |
| 594 | * to check against, and if any match then we branch off into the |
| 595 | * NEON handler code. |
| 596 | * |
| 597 | * Emulators may wish to make use of the following registers: |
| 598 | * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) |
| 599 | * r2 = PC value to resume execution after successful emulation |
| 600 | * r9 = normal "successful" return address |
| 601 | * r10 = this threads thread_info structure |
| 602 | * lr = unrecognised instruction return address |
| 603 | * IRQs enabled, FIQs enabled. |
| 604 | */ |
| 605 | @ |
| 606 | @ Fall-through from Thumb-2 __und_usr |
| 607 | @ |
| 608 | #ifdef CONFIG_NEON |
| 609 | get_thread_info r10 @ get current thread |
| 610 | adr r6, .LCneon_thumb_opcodes |
| 611 | b 2f |
| 612 | #endif |
| 613 | call_fpe: |
| 614 | get_thread_info r10 @ get current thread |
| 615 | #ifdef CONFIG_NEON |
| 616 | adr r6, .LCneon_arm_opcodes |
| 617 | 2: ldr r5, [r6], #4 @ mask value |
| 618 | ldr r7, [r6], #4 @ opcode bits matching in mask |
| 619 | cmp r5, #0 @ end mask? |
| 620 | beq 1f |
| 621 | and r8, r0, r5 |
| 622 | cmp r8, r7 @ NEON instruction? |
| 623 | bne 2b |
| 624 | mov r7, #1 |
| 625 | strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used |
| 626 | strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used |
| 627 | b do_vfp @ let VFP handler handle this |
| 628 | 1: |
| 629 | #endif |
| 630 | tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 |
| 631 | tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 |
| 632 | reteq lr |
| 633 | and r8, r0, #0x00000f00 @ mask out CP number |
| 634 | THUMB( lsr r8, r8, #8 ) |
| 635 | mov r7, #1 |
| 636 | add r6, r10, #TI_USED_CP |
| 637 | ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] |
| 638 | THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] |
| 639 | #ifdef CONFIG_IWMMXT |
| 640 | @ Test if we need to give access to iWMMXt coprocessors |
| 641 | ldr r5, [r10, #TI_FLAGS] |
| 642 | rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only |
| 643 | movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) |
| 644 | bcs iwmmxt_task_enable |
| 645 | #endif |
| 646 | ARM( add pc, pc, r8, lsr #6 ) |
| 647 | THUMB( lsl r8, r8, #2 ) |
| 648 | THUMB( add pc, r8 ) |
| 649 | nop |
| 650 | |
| 651 | ret.w lr @ CP#0 |
| 652 | W(b) do_fpe @ CP#1 (FPE) |
| 653 | W(b) do_fpe @ CP#2 (FPE) |
| 654 | ret.w lr @ CP#3 |
| 655 | #ifdef CONFIG_CRUNCH |
| 656 | b crunch_task_enable @ CP#4 (MaverickCrunch) |
| 657 | b crunch_task_enable @ CP#5 (MaverickCrunch) |
| 658 | b crunch_task_enable @ CP#6 (MaverickCrunch) |
| 659 | #else |
| 660 | ret.w lr @ CP#4 |
| 661 | ret.w lr @ CP#5 |
| 662 | ret.w lr @ CP#6 |
| 663 | #endif |
| 664 | ret.w lr @ CP#7 |
| 665 | ret.w lr @ CP#8 |
| 666 | ret.w lr @ CP#9 |
| 667 | #ifdef CONFIG_VFP |
| 668 | W(b) do_vfp @ CP#10 (VFP) |
| 669 | W(b) do_vfp @ CP#11 (VFP) |
| 670 | #else |
| 671 | ret.w lr @ CP#10 (VFP) |
| 672 | ret.w lr @ CP#11 (VFP) |
| 673 | #endif |
| 674 | ret.w lr @ CP#12 |
| 675 | ret.w lr @ CP#13 |
| 676 | ret.w lr @ CP#14 (Debug) |
| 677 | ret.w lr @ CP#15 (Control) |
| 678 | |
| 679 | #ifdef NEED_CPU_ARCHITECTURE |
| 680 | .align 2 |
| 681 | .LCcpu_architecture: |
| 682 | .word __cpu_architecture |
| 683 | #endif |
| 684 | |
| 685 | #ifdef CONFIG_NEON |
| 686 | .align 6 |
| 687 | |
| 688 | .LCneon_arm_opcodes: |
| 689 | .word 0xfe000000 @ mask |
| 690 | .word 0xf2000000 @ opcode |
| 691 | |
| 692 | .word 0xff100000 @ mask |
| 693 | .word 0xf4000000 @ opcode |
| 694 | |
| 695 | .word 0x00000000 @ mask |
| 696 | .word 0x00000000 @ opcode |
| 697 | |
| 698 | .LCneon_thumb_opcodes: |
| 699 | .word 0xef000000 @ mask |
| 700 | .word 0xef000000 @ opcode |
| 701 | |
| 702 | .word 0xff100000 @ mask |
| 703 | .word 0xf9000000 @ opcode |
| 704 | |
| 705 | .word 0x00000000 @ mask |
| 706 | .word 0x00000000 @ opcode |
| 707 | #endif |
| 708 | |
| 709 | do_fpe: |
| 710 | ldr r4, .LCfp |
| 711 | add r10, r10, #TI_FPSTATE @ r10 = workspace |
| 712 | ldr pc, [r4] @ Call FP module USR entry point |
| 713 | |
| 714 | /* |
| 715 | * The FP module is called with these registers set: |
| 716 | * r0 = instruction |
| 717 | * r2 = PC+4 |
| 718 | * r9 = normal "successful" return address |
| 719 | * r10 = FP workspace |
| 720 | * lr = unrecognised FP instruction return address |
| 721 | */ |
| 722 | |
| 723 | .pushsection .data |
| 724 | ENTRY(fp_enter) |
| 725 | .word no_fp |
| 726 | .popsection |
| 727 | |
| 728 | ENTRY(no_fp) |
| 729 | ret lr |
| 730 | ENDPROC(no_fp) |
| 731 | |
| 732 | __und_usr_fault_32: |
| 733 | mov r1, #4 |
| 734 | b 1f |
| 735 | __und_usr_fault_16_pan: |
| 736 | uaccess_disable ip |
| 737 | __und_usr_fault_16: |
| 738 | mov r1, #2 |
| 739 | 1: mov r0, sp |
| 740 | badr lr, ret_from_exception |
| 741 | b __und_fault |
| 742 | ENDPROC(__und_usr_fault_32) |
| 743 | ENDPROC(__und_usr_fault_16) |
| 744 | |
| 745 | .align 5 |
| 746 | __pabt_usr: |
| 747 | usr_entry |
| 748 | mov r2, sp @ regs |
| 749 | pabt_helper |
| 750 | UNWIND(.fnend ) |
| 751 | /* fall through */ |
| 752 | /* |
| 753 | * This is the return code to user mode for abort handlers |
| 754 | */ |
| 755 | ENTRY(ret_from_exception) |
| 756 | UNWIND(.fnstart ) |
| 757 | UNWIND(.cantunwind ) |
| 758 | get_thread_info tsk |
| 759 | mov why, #0 |
| 760 | b ret_to_user |
| 761 | UNWIND(.fnend ) |
| 762 | ENDPROC(__pabt_usr) |
| 763 | ENDPROC(ret_from_exception) |
| 764 | |
| 765 | .align 5 |
| 766 | __fiq_usr: |
| 767 | usr_entry trace=0 |
| 768 | kuser_cmpxchg_check |
| 769 | mov r0, sp @ struct pt_regs *regs |
| 770 | bl handle_fiq_as_nmi |
| 771 | get_thread_info tsk |
| 772 | restore_user_regs fast = 0, offset = 0 |
| 773 | UNWIND(.fnend ) |
| 774 | ENDPROC(__fiq_usr) |
| 775 | |
| 776 | /* |
| 777 | * Register switch for ARMv3 and ARMv4 processors |
| 778 | * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info |
| 779 | * previous and next are guaranteed not to be the same. |
| 780 | */ |
| 781 | ENTRY(__switch_to) |
| 782 | UNWIND(.fnstart ) |
| 783 | UNWIND(.cantunwind ) |
| 784 | add ip, r1, #TI_CPU_SAVE |
| 785 | ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack |
| 786 | THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack |
| 787 | THUMB( str sp, [ip], #4 ) |
| 788 | THUMB( str lr, [ip], #4 ) |
| 789 | ldr r4, [r2, #TI_TP_VALUE] |
| 790 | ldr r5, [r2, #TI_TP_VALUE + 4] |
| 791 | #ifdef CONFIG_CPU_USE_DOMAINS |
| 792 | mrc p15, 0, r6, c3, c0, 0 @ Get domain register |
| 793 | str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register |
| 794 | ldr r6, [r2, #TI_CPU_DOMAIN] |
| 795 | #endif |
| 796 | switch_tls r1, r4, r5, r3, r7 |
| 797 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) |
| 798 | ldr r7, [r2, #TI_TASK] |
| 799 | ldr r8, =__stack_chk_guard |
| 800 | ldr r7, [r7, #TSK_STACK_CANARY] |
| 801 | #endif |
| 802 | #ifdef CONFIG_CPU_USE_DOMAINS |
| 803 | mcr p15, 0, r6, c3, c0, 0 @ Set domain register |
| 804 | #endif |
| 805 | mov r5, r0 |
| 806 | add r4, r2, #TI_CPU_SAVE |
| 807 | ldr r0, =thread_notify_head |
| 808 | mov r1, #THREAD_NOTIFY_SWITCH |
| 809 | bl atomic_notifier_call_chain |
| 810 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) |
| 811 | str r7, [r8] |
| 812 | #endif |
| 813 | THUMB( mov ip, r4 ) |
| 814 | mov r0, r5 |
| 815 | ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously |
| 816 | THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously |
| 817 | THUMB( ldr sp, [ip], #4 ) |
| 818 | THUMB( ldr pc, [ip] ) |
| 819 | UNWIND(.fnend ) |
| 820 | ENDPROC(__switch_to) |
| 821 | |
| 822 | __INIT |
| 823 | |
| 824 | /* |
| 825 | * User helpers. |
| 826 | * |
| 827 | * Each segment is 32-byte aligned and will be moved to the top of the high |
| 828 | * vector page. New segments (if ever needed) must be added in front of |
| 829 | * existing ones. This mechanism should be used only for things that are |
| 830 | * really small and justified, and not be abused freely. |
| 831 | * |
| 832 | * See Documentation/arm/kernel_user_helpers.txt for formal definitions. |
| 833 | */ |
| 834 | THUMB( .arm ) |
| 835 | |
| 836 | .macro usr_ret, reg |
| 837 | #ifdef CONFIG_ARM_THUMB |
| 838 | bx \reg |
| 839 | #else |
| 840 | ret \reg |
| 841 | #endif |
| 842 | .endm |
| 843 | |
| 844 | .macro kuser_pad, sym, size |
| 845 | .if (. - \sym) & 3 |
| 846 | .rept 4 - (. - \sym) & 3 |
| 847 | .byte 0 |
| 848 | .endr |
| 849 | .endif |
| 850 | .rept (\size - (. - \sym)) / 4 |
| 851 | .word 0xe7fddef1 |
| 852 | .endr |
| 853 | .endm |
| 854 | |
| 855 | #ifdef CONFIG_KUSER_HELPERS |
| 856 | .align 5 |
| 857 | .globl __kuser_helper_start |
| 858 | __kuser_helper_start: |
| 859 | |
| 860 | /* |
| 861 | * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular |
| 862 | * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point. |
| 863 | */ |
| 864 | |
| 865 | __kuser_cmpxchg64: @ 0xffff0f60 |
| 866 | |
| 867 | #if defined(CONFIG_CPU_32v6K) |
| 868 | |
| 869 | stmfd sp!, {r4, r5, r6, r7} |
| 870 | ldrd r4, r5, [r0] @ load old val |
| 871 | ldrd r6, r7, [r1] @ load new val |
| 872 | smp_dmb arm |
| 873 | 1: ldrexd r0, r1, [r2] @ load current val |
| 874 | eors r3, r0, r4 @ compare with oldval (1) |
| 875 | eoreqs r3, r1, r5 @ compare with oldval (2) |
| 876 | strexdeq r3, r6, r7, [r2] @ store newval if eq |
| 877 | teqeq r3, #1 @ success? |
| 878 | beq 1b @ if no then retry |
| 879 | smp_dmb arm |
| 880 | rsbs r0, r3, #0 @ set returned val and C flag |
| 881 | ldmfd sp!, {r4, r5, r6, r7} |
| 882 | usr_ret lr |
| 883 | |
| 884 | #elif !defined(CONFIG_SMP) |
| 885 | |
| 886 | #ifdef CONFIG_MMU |
| 887 | |
| 888 | /* |
| 889 | * The only thing that can break atomicity in this cmpxchg64 |
| 890 | * implementation is either an IRQ or a data abort exception |
| 891 | * causing another process/thread to be scheduled in the middle of |
| 892 | * the critical sequence. The same strategy as for cmpxchg is used. |
| 893 | */ |
| 894 | stmfd sp!, {r4, r5, r6, lr} |
| 895 | ldmia r0, {r4, r5} @ load old val |
| 896 | ldmia r1, {r6, lr} @ load new val |
| 897 | 1: ldmia r2, {r0, r1} @ load current val |
| 898 | eors r3, r0, r4 @ compare with oldval (1) |
| 899 | eoreqs r3, r1, r5 @ compare with oldval (2) |
| 900 | 2: stmeqia r2, {r6, lr} @ store newval if eq |
| 901 | rsbs r0, r3, #0 @ set return val and C flag |
| 902 | ldmfd sp!, {r4, r5, r6, pc} |
| 903 | |
| 904 | .text |
| 905 | kuser_cmpxchg64_fixup: |
| 906 | @ Called from kuser_cmpxchg_fixup. |
| 907 | @ r4 = address of interrupted insn (must be preserved). |
| 908 | @ sp = saved regs. r7 and r8 are clobbered. |
| 909 | @ 1b = first critical insn, 2b = last critical insn. |
| 910 | @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. |
| 911 | mov r7, #0xffff0fff |
| 912 | sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) |
| 913 | subs r8, r4, r7 |
| 914 | rsbcss r8, r8, #(2b - 1b) |
| 915 | strcs r7, [sp, #S_PC] |
| 916 | #if __LINUX_ARM_ARCH__ < 6 |
| 917 | bcc kuser_cmpxchg32_fixup |
| 918 | #endif |
| 919 | ret lr |
| 920 | .previous |
| 921 | |
| 922 | #else |
| 923 | #warning "NPTL on non MMU needs fixing" |
| 924 | mov r0, #-1 |
| 925 | adds r0, r0, #0 |
| 926 | usr_ret lr |
| 927 | #endif |
| 928 | |
| 929 | #else |
| 930 | #error "incoherent kernel configuration" |
| 931 | #endif |
| 932 | |
| 933 | kuser_pad __kuser_cmpxchg64, 64 |
| 934 | |
| 935 | __kuser_memory_barrier: @ 0xffff0fa0 |
| 936 | smp_dmb arm |
| 937 | usr_ret lr |
| 938 | |
| 939 | kuser_pad __kuser_memory_barrier, 32 |
| 940 | |
| 941 | __kuser_cmpxchg: @ 0xffff0fc0 |
| 942 | |
| 943 | #if __LINUX_ARM_ARCH__ < 6 |
| 944 | |
| 945 | #ifdef CONFIG_MMU |
| 946 | |
| 947 | /* |
| 948 | * The only thing that can break atomicity in this cmpxchg |
| 949 | * implementation is either an IRQ or a data abort exception |
| 950 | * causing another process/thread to be scheduled in the middle |
| 951 | * of the critical sequence. To prevent this, code is added to |
| 952 | * the IRQ and data abort exception handlers to set the pc back |
| 953 | * to the beginning of the critical section if it is found to be |
| 954 | * within that critical section (see kuser_cmpxchg_fixup). |
| 955 | */ |
| 956 | 1: ldr r3, [r2] @ load current val |
| 957 | subs r3, r3, r0 @ compare with oldval |
| 958 | 2: streq r1, [r2] @ store newval if eq |
| 959 | rsbs r0, r3, #0 @ set return val and C flag |
| 960 | usr_ret lr |
| 961 | |
| 962 | .text |
| 963 | kuser_cmpxchg32_fixup: |
| 964 | @ Called from kuser_cmpxchg_check macro. |
| 965 | @ r4 = address of interrupted insn (must be preserved). |
| 966 | @ sp = saved regs. r7 and r8 are clobbered. |
| 967 | @ 1b = first critical insn, 2b = last critical insn. |
| 968 | @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. |
| 969 | mov r7, #0xffff0fff |
| 970 | sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) |
| 971 | subs r8, r4, r7 |
| 972 | rsbcss r8, r8, #(2b - 1b) |
| 973 | strcs r7, [sp, #S_PC] |
| 974 | ret lr |
| 975 | .previous |
| 976 | |
| 977 | #else |
| 978 | #warning "NPTL on non MMU needs fixing" |
| 979 | mov r0, #-1 |
| 980 | adds r0, r0, #0 |
| 981 | usr_ret lr |
| 982 | #endif |
| 983 | |
| 984 | #else |
| 985 | |
| 986 | smp_dmb arm |
| 987 | 1: ldrex r3, [r2] |
| 988 | subs r3, r3, r0 |
| 989 | strexeq r3, r1, [r2] |
| 990 | teqeq r3, #1 |
| 991 | beq 1b |
| 992 | rsbs r0, r3, #0 |
| 993 | /* beware -- each __kuser slot must be 8 instructions max */ |
| 994 | ALT_SMP(b __kuser_memory_barrier) |
| 995 | ALT_UP(usr_ret lr) |
| 996 | |
| 997 | #endif |
| 998 | |
| 999 | kuser_pad __kuser_cmpxchg, 32 |
| 1000 | |
| 1001 | __kuser_get_tls: @ 0xffff0fe0 |
| 1002 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init |
| 1003 | usr_ret lr |
| 1004 | mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code |
| 1005 | kuser_pad __kuser_get_tls, 16 |
| 1006 | .rep 3 |
| 1007 | .word 0 @ 0xffff0ff0 software TLS value, then |
| 1008 | .endr @ pad up to __kuser_helper_version |
| 1009 | |
| 1010 | __kuser_helper_version: @ 0xffff0ffc |
| 1011 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) |
| 1012 | |
| 1013 | .globl __kuser_helper_end |
| 1014 | __kuser_helper_end: |
| 1015 | |
| 1016 | #endif |
| 1017 | |
| 1018 | THUMB( .thumb ) |
| 1019 | |
| 1020 | /* |
| 1021 | * Vector stubs. |
| 1022 | * |
| 1023 | * This code is copied to 0xffff1000 so we can use branches in the |
| 1024 | * vectors, rather than ldr's. Note that this code must not exceed |
| 1025 | * a page size. |
| 1026 | * |
| 1027 | * Common stub entry macro: |
| 1028 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC |
| 1029 | * |
| 1030 | * SP points to a minimal amount of processor-private memory, the address |
| 1031 | * of which is copied into r0 for the mode specific abort handler. |
| 1032 | */ |
| 1033 | .macro vector_stub, name, mode, correction=0 |
| 1034 | .align 5 |
| 1035 | |
| 1036 | vector_\name: |
| 1037 | .if \correction |
| 1038 | sub lr, lr, #\correction |
| 1039 | .endif |
| 1040 | |
| 1041 | @ |
| 1042 | @ Save r0, lr_<exception> (parent PC) and spsr_<exception> |
| 1043 | @ (parent CPSR) |
| 1044 | @ |
| 1045 | stmia sp, {r0, lr} @ save r0, lr |
| 1046 | mrs lr, spsr |
| 1047 | str lr, [sp, #8] @ save spsr |
| 1048 | |
| 1049 | @ |
| 1050 | @ Prepare for SVC32 mode. IRQs remain disabled. |
| 1051 | @ |
| 1052 | mrs r0, cpsr |
| 1053 | eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE) |
| 1054 | msr spsr_cxsf, r0 |
| 1055 | |
| 1056 | @ |
| 1057 | @ the branch table must immediately follow this code |
| 1058 | @ |
| 1059 | and lr, lr, #0x0f |
| 1060 | THUMB( adr r0, 1f ) |
| 1061 | THUMB( ldr lr, [r0, lr, lsl #2] ) |
| 1062 | mov r0, sp |
| 1063 | ARM( ldr lr, [pc, lr, lsl #2] ) |
| 1064 | movs pc, lr @ branch to handler in SVC mode |
| 1065 | ENDPROC(vector_\name) |
| 1066 | |
| 1067 | .align 2 |
| 1068 | @ handler addresses follow this label |
| 1069 | 1: |
| 1070 | .endm |
| 1071 | |
| 1072 | .section .stubs, "ax", %progbits |
| 1073 | @ This must be the first word |
| 1074 | .word vector_swi |
| 1075 | |
| 1076 | vector_rst: |
| 1077 | ARM( swi SYS_ERROR0 ) |
| 1078 | THUMB( svc #0 ) |
| 1079 | THUMB( nop ) |
| 1080 | b vector_und |
| 1081 | |
| 1082 | /* |
| 1083 | * Interrupt dispatcher |
| 1084 | */ |
| 1085 | vector_stub irq, IRQ_MODE, 4 |
| 1086 | |
| 1087 | .long __irq_usr @ 0 (USR_26 / USR_32) |
| 1088 | .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) |
| 1089 | .long __irq_invalid @ 2 (IRQ_26 / IRQ_32) |
| 1090 | .long __irq_svc @ 3 (SVC_26 / SVC_32) |
| 1091 | .long __irq_invalid @ 4 |
| 1092 | .long __irq_invalid @ 5 |
| 1093 | .long __irq_invalid @ 6 |
| 1094 | .long __irq_invalid @ 7 |
| 1095 | .long __irq_invalid @ 8 |
| 1096 | .long __irq_invalid @ 9 |
| 1097 | .long __irq_invalid @ a |
| 1098 | .long __irq_invalid @ b |
| 1099 | .long __irq_invalid @ c |
| 1100 | .long __irq_invalid @ d |
| 1101 | .long __irq_invalid @ e |
| 1102 | .long __irq_invalid @ f |
| 1103 | |
| 1104 | /* |
| 1105 | * Data abort dispatcher |
| 1106 | * Enter in ABT mode, spsr = USR CPSR, lr = USR PC |
| 1107 | */ |
| 1108 | vector_stub dabt, ABT_MODE, 8 |
| 1109 | |
| 1110 | .long __dabt_usr @ 0 (USR_26 / USR_32) |
| 1111 | .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) |
| 1112 | .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32) |
| 1113 | .long __dabt_svc @ 3 (SVC_26 / SVC_32) |
| 1114 | .long __dabt_invalid @ 4 |
| 1115 | .long __dabt_invalid @ 5 |
| 1116 | .long __dabt_invalid @ 6 |
| 1117 | .long __dabt_invalid @ 7 |
| 1118 | .long __dabt_invalid @ 8 |
| 1119 | .long __dabt_invalid @ 9 |
| 1120 | .long __dabt_invalid @ a |
| 1121 | .long __dabt_invalid @ b |
| 1122 | .long __dabt_invalid @ c |
| 1123 | .long __dabt_invalid @ d |
| 1124 | .long __dabt_invalid @ e |
| 1125 | .long __dabt_invalid @ f |
| 1126 | |
| 1127 | /* |
| 1128 | * Prefetch abort dispatcher |
| 1129 | * Enter in ABT mode, spsr = USR CPSR, lr = USR PC |
| 1130 | */ |
| 1131 | vector_stub pabt, ABT_MODE, 4 |
| 1132 | |
| 1133 | .long __pabt_usr @ 0 (USR_26 / USR_32) |
| 1134 | .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) |
| 1135 | .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32) |
| 1136 | .long __pabt_svc @ 3 (SVC_26 / SVC_32) |
| 1137 | .long __pabt_invalid @ 4 |
| 1138 | .long __pabt_invalid @ 5 |
| 1139 | .long __pabt_invalid @ 6 |
| 1140 | .long __pabt_invalid @ 7 |
| 1141 | .long __pabt_invalid @ 8 |
| 1142 | .long __pabt_invalid @ 9 |
| 1143 | .long __pabt_invalid @ a |
| 1144 | .long __pabt_invalid @ b |
| 1145 | .long __pabt_invalid @ c |
| 1146 | .long __pabt_invalid @ d |
| 1147 | .long __pabt_invalid @ e |
| 1148 | .long __pabt_invalid @ f |
| 1149 | |
| 1150 | /* |
| 1151 | * Undef instr entry dispatcher |
| 1152 | * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC |
| 1153 | */ |
| 1154 | vector_stub und, UND_MODE |
| 1155 | |
| 1156 | .long __und_usr @ 0 (USR_26 / USR_32) |
| 1157 | .long __und_invalid @ 1 (FIQ_26 / FIQ_32) |
| 1158 | .long __und_invalid @ 2 (IRQ_26 / IRQ_32) |
| 1159 | .long __und_svc @ 3 (SVC_26 / SVC_32) |
| 1160 | .long __und_invalid @ 4 |
| 1161 | .long __und_invalid @ 5 |
| 1162 | .long __und_invalid @ 6 |
| 1163 | .long __und_invalid @ 7 |
| 1164 | .long __und_invalid @ 8 |
| 1165 | .long __und_invalid @ 9 |
| 1166 | .long __und_invalid @ a |
| 1167 | .long __und_invalid @ b |
| 1168 | .long __und_invalid @ c |
| 1169 | .long __und_invalid @ d |
| 1170 | .long __und_invalid @ e |
| 1171 | .long __und_invalid @ f |
| 1172 | |
| 1173 | .align 5 |
| 1174 | |
| 1175 | /*============================================================================= |
| 1176 | * Address exception handler |
| 1177 | *----------------------------------------------------------------------------- |
| 1178 | * These aren't too critical. |
| 1179 | * (they're not supposed to happen, and won't happen in 32-bit data mode). |
| 1180 | */ |
| 1181 | |
| 1182 | vector_addrexcptn: |
| 1183 | b vector_addrexcptn |
| 1184 | |
| 1185 | /*============================================================================= |
| 1186 | * FIQ "NMI" handler |
| 1187 | *----------------------------------------------------------------------------- |
| 1188 | * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86 |
| 1189 | * systems. |
| 1190 | */ |
| 1191 | vector_stub fiq, FIQ_MODE, 4 |
| 1192 | |
| 1193 | .long __fiq_usr @ 0 (USR_26 / USR_32) |
| 1194 | .long __fiq_svc @ 1 (FIQ_26 / FIQ_32) |
| 1195 | .long __fiq_svc @ 2 (IRQ_26 / IRQ_32) |
| 1196 | .long __fiq_svc @ 3 (SVC_26 / SVC_32) |
| 1197 | .long __fiq_svc @ 4 |
| 1198 | .long __fiq_svc @ 5 |
| 1199 | .long __fiq_svc @ 6 |
| 1200 | .long __fiq_abt @ 7 |
| 1201 | .long __fiq_svc @ 8 |
| 1202 | .long __fiq_svc @ 9 |
| 1203 | .long __fiq_svc @ a |
| 1204 | .long __fiq_svc @ b |
| 1205 | .long __fiq_svc @ c |
| 1206 | .long __fiq_svc @ d |
| 1207 | .long __fiq_svc @ e |
| 1208 | .long __fiq_svc @ f |
| 1209 | |
| 1210 | .globl vector_fiq |
| 1211 | |
| 1212 | .section .vectors, "ax", %progbits |
| 1213 | .L__vectors_start: |
| 1214 | W(b) vector_rst |
| 1215 | W(b) vector_und |
| 1216 | W(ldr) pc, .L__vectors_start + 0x1000 |
| 1217 | W(b) vector_pabt |
| 1218 | W(b) vector_dabt |
| 1219 | W(b) vector_addrexcptn |
| 1220 | W(b) vector_irq |
| 1221 | W(b) vector_fiq |
| 1222 | |
| 1223 | .data |
| 1224 | |
| 1225 | .globl cr_alignment |
| 1226 | cr_alignment: |
| 1227 | .space 4 |
| 1228 | |
| 1229 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
| 1230 | .globl handle_arch_irq |
| 1231 | handle_arch_irq: |
| 1232 | .space 4 |
| 1233 | #endif |