xtensa: check thread flags atomically on return from user exception
[deliverable/linux.git] / arch / xtensa / kernel / entry.S
CommitLineData
5a0015d6
CZ
1/*
2 * arch/xtensa/kernel/entry.S
3 *
4 * Low-level exception handling
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
2d1c645c 10 * Copyright (C) 2004 - 2008 by Tensilica Inc.
5a0015d6
CZ
11 *
12 * Chris Zankel <chris@zankel.net>
13 *
14 */
15
16#include <linux/linkage.h>
0013a854 17#include <asm/asm-offsets.h>
5a0015d6 18#include <asm/processor.h>
4573e398 19#include <asm/coprocessor.h>
5a0015d6
CZ
20#include <asm/thread_info.h>
21#include <asm/uaccess.h>
22#include <asm/unistd.h>
23#include <asm/ptrace.h>
24#include <asm/current.h>
25#include <asm/pgtable.h>
26#include <asm/page.h>
27#include <asm/signal.h>
173d6681 28#include <asm/tlbflush.h>
367b8112 29#include <variant/tie-asm.h>
5a0015d6
CZ
30
31/* Unimplemented features. */
32
5a0015d6
CZ
33#undef KERNEL_STACK_OVERFLOW_CHECK
34#undef PREEMPTIBLE_KERNEL
35#undef ALLOCA_EXCEPTION_IN_IRAM
36
37/* Not well tested.
38 *
39 * - fast_coprocessor
40 */
41
42/*
43 * Macro to find first bit set in WINDOWBASE from the left + 1
44 *
45 * 100....0 -> 1
46 * 010....0 -> 2
47 * 000....1 -> WSBITS
48 */
49
50 .macro ffs_ws bit mask
51
52#if XCHAL_HAVE_NSA
53 nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0)
54 addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1
55#else
56 movi \bit, WSBITS
57#if WSBITS > 16
58 _bltui \mask, 0x10000, 99f
59 addi \bit, \bit, -16
60 extui \mask, \mask, 16, 16
61#endif
62#if WSBITS > 8
6399: _bltui \mask, 0x100, 99f
64 addi \bit, \bit, -8
65 srli \mask, \mask, 8
66#endif
6799: _bltui \mask, 0x10, 99f
68 addi \bit, \bit, -4
69 srli \mask, \mask, 4
7099: _bltui \mask, 0x4, 99f
71 addi \bit, \bit, -2
72 srli \mask, \mask, 2
7399: _bltui \mask, 0x2, 99f
74 addi \bit, \bit, -1
7599:
76
77#endif
78 .endm
79
80/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
81
82/*
83 * First-level exception handler for user exceptions.
84 * Save some special registers, extra states and all registers in the AR
85 * register file that were in use in the user task, and jump to the common
86 * exception code.
87 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
88 * save them for kernel exceptions).
89 *
90 * Entry condition for user_exception:
91 *
92 * a0: trashed, original value saved on stack (PT_AREG0)
93 * a1: a1
94 * a2: new stack pointer, original value in depc
95 * a3: dispatch table
96 * depc: a2, original value saved on stack (PT_DEPC)
97 * excsave1: a3
98 *
99 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
100 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
101 *
102 * Entry condition for _user_exception:
103 *
104 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
105 * excsave has been restored, and
106 * stack pointer (a1) has been set.
107 *
3ad2f3fb 108 * Note: _user_exception might be at an odd address. Don't use call0..call12
5a0015d6
CZ
109 */
110
111ENTRY(user_exception)
112
113 /* Save a2, a3, and depc, restore excsave_1 and set SP. */
114
bc5378fc
MF
115 xsr a3, excsave1
116 rsr a0, depc
5a0015d6
CZ
117 s32i a1, a2, PT_AREG1
118 s32i a0, a2, PT_AREG2
119 s32i a3, a2, PT_AREG3
120 mov a1, a2
121
122 .globl _user_exception
123_user_exception:
124
125 /* Save SAR and turn off single stepping */
126
127 movi a2, 0
bc5378fc
MF
128 rsr a3, sar
129 xsr a2, icountlevel
5a0015d6 130 s32i a3, a1, PT_SAR
29c4dfd9 131 s32i a2, a1, PT_ICOUNTLEVEL
5a0015d6 132
c50842df
CZ
133#if XCHAL_HAVE_THREADPTR
134 rur a2, threadptr
135 s32i a2, a1, PT_THREADPTR
136#endif
137
5a0015d6
CZ
138 /* Rotate ws so that the current windowbase is at bit0. */
139 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
140
bc5378fc
MF
141 rsr a2, windowbase
142 rsr a3, windowstart
5a0015d6
CZ
143 ssr a2
144 s32i a2, a1, PT_WINDOWBASE
145 s32i a3, a1, PT_WINDOWSTART
146 slli a2, a3, 32-WSBITS
147 src a2, a3, a2
148 srli a2, a2, 32-WSBITS
149 s32i a2, a1, PT_WMASK # needed for restoring registers
150
151 /* Save only live registers. */
152
153 _bbsi.l a2, 1, 1f
154 s32i a4, a1, PT_AREG4
155 s32i a5, a1, PT_AREG5
156 s32i a6, a1, PT_AREG6
157 s32i a7, a1, PT_AREG7
158 _bbsi.l a2, 2, 1f
159 s32i a8, a1, PT_AREG8
160 s32i a9, a1, PT_AREG9
161 s32i a10, a1, PT_AREG10
162 s32i a11, a1, PT_AREG11
163 _bbsi.l a2, 3, 1f
164 s32i a12, a1, PT_AREG12
165 s32i a13, a1, PT_AREG13
166 s32i a14, a1, PT_AREG14
167 s32i a15, a1, PT_AREG15
168 _bnei a2, 1, 1f # only one valid frame?
169
170 /* Only one valid frame, skip saving regs. */
171
172 j 2f
173
174 /* Save the remaining registers.
175 * We have to save all registers up to the first '1' from
176 * the right, except the current frame (bit 0).
177 * Assume a2 is: 001001000110001
6656920b 178 * All register frames starting from the top field to the marked '1'
5a0015d6
CZ
179 * must be saved.
180 */
181
1821: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
183 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1
184 and a3, a3, a2 # max. only one bit is set
185
186 /* Find number of frames to save */
187
188 ffs_ws a0, a3 # number of frames to the '1' from left
189
190 /* Store information into WMASK:
191 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
192 * bits 4...: number of valid 4-register frames
193 */
194
195 slli a3, a0, 4 # number of frames to save in bits 8..4
196 extui a2, a2, 0, 4 # mask for the first 16 registers
197 or a2, a3, a2
198 s32i a2, a1, PT_WMASK # needed when we restore the reg-file
199
200 /* Save 4 registers at a time */
201
2021: rotw -1
203 s32i a0, a5, PT_AREG_END - 16
204 s32i a1, a5, PT_AREG_END - 12
205 s32i a2, a5, PT_AREG_END - 8
206 s32i a3, a5, PT_AREG_END - 4
207 addi a0, a4, -1
208 addi a1, a5, -16
209 _bnez a0, 1b
210
211 /* WINDOWBASE still in SAR! */
212
bc5378fc 213 rsr a2, sar # original WINDOWBASE
5a0015d6
CZ
214 movi a3, 1
215 ssl a2
216 sll a3, a3
bc5378fc
MF
217 wsr a3, windowstart # set corresponding WINDOWSTART bit
218 wsr a2, windowbase # and WINDOWSTART
5a0015d6
CZ
219 rsync
220
221 /* We are back to the original stack pointer (a1) */
222
c658eac6 2232: /* Now, jump to the common exception handler. */
5a0015d6
CZ
224
225 j common_exception
226
d1538c46 227ENDPROC(user_exception)
5a0015d6
CZ
228
229/*
230 * First-level exit handler for kernel exceptions
231 * Save special registers and the live window frame.
232 * Note: Even though we changes the stack pointer, we don't have to do a
233 * MOVSP here, as we do that when we return from the exception.
234 * (See comment in the kernel exception exit code)
235 *
236 * Entry condition for kernel_exception:
237 *
238 * a0: trashed, original value saved on stack (PT_AREG0)
239 * a1: a1
240 * a2: new stack pointer, original in DEPC
241 * a3: dispatch table
242 * depc: a2, original value saved on stack (PT_DEPC)
243 * excsave_1: a3
244 *
245 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
246 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
247 *
248 * Entry condition for _kernel_exception:
249 *
250 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
251 * excsave has been restored, and
252 * stack pointer (a1) has been set.
253 *
3ad2f3fb 254 * Note: _kernel_exception might be at an odd address. Don't use call0..call12
5a0015d6
CZ
255 */
256
257ENTRY(kernel_exception)
258
259 /* Save a0, a2, a3, DEPC and set SP. */
260
bc5378fc
MF
261 xsr a3, excsave1 # restore a3, excsave_1
262 rsr a0, depc # get a2
5a0015d6
CZ
263 s32i a1, a2, PT_AREG1
264 s32i a0, a2, PT_AREG2
265 s32i a3, a2, PT_AREG3
266 mov a1, a2
267
268 .globl _kernel_exception
269_kernel_exception:
270
271 /* Save SAR and turn off single stepping */
272
273 movi a2, 0
bc5378fc
MF
274 rsr a3, sar
275 xsr a2, icountlevel
5a0015d6 276 s32i a3, a1, PT_SAR
29c4dfd9 277 s32i a2, a1, PT_ICOUNTLEVEL
5a0015d6
CZ
278
279 /* Rotate ws so that the current windowbase is at bit0. */
280 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
281
bc5378fc
MF
282 rsr a2, windowbase # don't need to save these, we only
283 rsr a3, windowstart # need shifted windowstart: windowmask
5a0015d6
CZ
284 ssr a2
285 slli a2, a3, 32-WSBITS
286 src a2, a3, a2
287 srli a2, a2, 32-WSBITS
288 s32i a2, a1, PT_WMASK # needed for kernel_exception_exit
289
290 /* Save only the live window-frame */
291
292 _bbsi.l a2, 1, 1f
293 s32i a4, a1, PT_AREG4
294 s32i a5, a1, PT_AREG5
295 s32i a6, a1, PT_AREG6
296 s32i a7, a1, PT_AREG7
297 _bbsi.l a2, 2, 1f
298 s32i a8, a1, PT_AREG8
299 s32i a9, a1, PT_AREG9
300 s32i a10, a1, PT_AREG10
301 s32i a11, a1, PT_AREG11
302 _bbsi.l a2, 3, 1f
303 s32i a12, a1, PT_AREG12
304 s32i a13, a1, PT_AREG13
305 s32i a14, a1, PT_AREG14
306 s32i a15, a1, PT_AREG15
307
3081:
309
310#ifdef KERNEL_STACK_OVERFLOW_CHECK
311
312 /* Stack overflow check, for debugging */
313 extui a2, a1, TASK_SIZE_BITS,XX
314 movi a3, SIZE??
315 _bge a2, a3, out_of_stack_panic
316
317#endif
318
319/*
320 * This is the common exception handler.
321 * We get here from the user exception handler or simply by falling through
322 * from the kernel exception handler.
323 * Save the remaining special registers, switch to kernel mode, and jump
324 * to the second-level exception handler.
325 *
326 */
327
328common_exception:
329
29c4dfd9 330 /* Save some registers, disable loops and clear the syscall flag. */
5a0015d6 331
bc5378fc
MF
332 rsr a2, debugcause
333 rsr a3, epc1
5a0015d6
CZ
334 s32i a2, a1, PT_DEBUGCAUSE
335 s32i a3, a1, PT_PC
336
29c4dfd9 337 movi a2, -1
bc5378fc 338 rsr a3, excvaddr
29c4dfd9 339 s32i a2, a1, PT_SYSCALL
5a0015d6
CZ
340 movi a2, 0
341 s32i a3, a1, PT_EXCVADDR
bc5378fc 342 xsr a2, lcount
5a0015d6
CZ
343 s32i a2, a1, PT_LCOUNT
344
345 /* It is now save to restore the EXC_TABLE_FIXUP variable. */
346
bc5378fc 347 rsr a0, exccause
5a0015d6 348 movi a3, 0
bc5378fc 349 rsr a2, excsave1
5a0015d6
CZ
350 s32i a0, a1, PT_EXCCAUSE
351 s32i a3, a2, EXC_TABLE_FIXUP
352
353 /* All unrecoverable states are saved on stack, now, and a1 is valid,
354 * so we can allow exceptions and interrupts (*) again.
355 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
356 *
895666a9
MF
357 * (*) We only allow interrupts if they were previously enabled and
358 * we're not handling an IRQ
5a0015d6
CZ
359 */
360
bc5378fc 361 rsr a3, ps
895666a9
MF
362 addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT
363 movi a2, LOCKLEVEL
2d1c645c
MG
364 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
365 # a3 = PS.INTLEVEL
895666a9 366 moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt
173d6681 367 movi a2, 1 << PS_WOE_BIT
5a0015d6 368 or a3, a3, a2
bc5378fc
MF
369 rsr a0, exccause
370 xsr a3, ps
5a0015d6
CZ
371
372 s32i a3, a1, PT_PS # save ps
373
bc5378fc 374 /* Save lbeg, lend */
5a0015d6 375
bc5378fc
MF
376 rsr a2, lbeg
377 rsr a3, lend
5a0015d6
CZ
378 s32i a2, a1, PT_LBEG
379 s32i a3, a1, PT_LEND
380
733536b8
MF
381 /* Save SCOMPARE1 */
382
383#if XCHAL_HAVE_S32C1I
384 rsr a2, scompare1
385 s32i a2, a1, PT_SCOMPARE1
386#endif
387
c658eac6
CZ
388 /* Save optional registers. */
389
390 save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
391
c92931b2
MF
392#ifdef CONFIG_TRACE_IRQFLAGS
393 l32i a4, a1, PT_DEPC
394 /* Double exception means we came here with an exception
395 * while PS.EXCM was set, i.e. interrupts disabled.
396 */
397 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
398 l32i a4, a1, PT_EXCCAUSE
399 bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
400 /* We came here with an interrupt means interrupts were enabled
401 * and we've just disabled them.
402 */
403 movi a4, trace_hardirqs_off
404 callx4 a4
4051:
406#endif
407
5a0015d6
CZ
408 /* Go to second-level dispatcher. Set up parameters to pass to the
409 * exception handler and call the exception handler.
410 */
411
412 movi a4, exc_table
413 mov a6, a1 # pass stack frame
414 mov a7, a0 # pass EXCCAUSE
415 addx4 a4, a0, a4
416 l32i a4, a4, EXC_TABLE_DEFAULT # load handler
417
418 /* Call the second-level handler */
419
420 callx4 a4
421
422 /* Jump here for exception exit */
e6ffe17e 423 .global common_exception_return
5a0015d6
CZ
424common_exception_return:
425
c92931b2 4261:
aea8e7c8 427 rsil a2, LOCKLEVEL
c92931b2 428
5a0015d6
CZ
429 /* Jump if we are returning from kernel exceptions. */
430
aea8e7c8 431 l32i a3, a1, PT_PS
e1088430 432 _bbci.l a3, PS_UM_BIT, 4f
5a0015d6
CZ
433
434 /* Specific to a user exception exit:
435 * We need to check some flags for signal handling and rescheduling,
436 * and have to restore WB and WS, extra states, and all registers
437 * in the register file that were in use in the user task.
e1088430 438 * Note that we don't disable interrupts here.
5a0015d6
CZ
439 */
440
441 GET_THREAD_INFO(a2,a1)
442 l32i a4, a2, TI_FLAGS
443
5a0015d6 444 _bbsi.l a4, TIF_NEED_RESCHED, 3f
a53bb24e 445 _bbsi.l a4, TIF_NOTIFY_RESUME, 2f
a99e07ee 446 _bbci.l a4, TIF_SIGPENDING, 5f
5a0015d6 447
a53bb24e 4482: l32i a4, a1, PT_DEPC
5a0015d6 449 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
5a0015d6 450
e1088430
CZ
451 /* Call do_signal() */
452
aea8e7c8 453 rsil a2, 0
a53bb24e 454 movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*)
5a0015d6 455 mov a6, a1
5a0015d6
CZ
456 callx4 a4
457 j 1b
458
e1088430 4593: /* Reschedule */
5a0015d6 460
aea8e7c8 461 rsil a2, 0
5a0015d6
CZ
462 movi a4, schedule # void schedule (void)
463 callx4 a4
464 j 1b
465
a99e07ee
MF
4665:
467#ifdef CONFIG_DEBUG_TLB_SANITY
468 l32i a4, a1, PT_DEPC
469 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
470 movi a4, check_tlb_sanity
471 callx4 a4
472#endif
aea8e7c8
MF
4734:
474#ifdef CONFIG_TRACE_IRQFLAGS
475 l32i a4, a1, PT_DEPC
476 /* Double exception means we came here with an exception
477 * while PS.EXCM was set, i.e. interrupts disabled.
478 */
479 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
480 l32i a4, a1, PT_EXCCAUSE
481 bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
482 /* We came here with an interrupt means interrupts were enabled
483 * and we'll reenable them on return.
484 */
485 movi a4, trace_hardirqs_on
486 callx4 a4
4871:
488#endif
489 /* Restore optional registers. */
e1088430
CZ
490
491 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
5a0015d6 492
733536b8
MF
493 /* Restore SCOMPARE1 */
494
495#if XCHAL_HAVE_S32C1I
496 l32i a2, a1, PT_SCOMPARE1
497 wsr a2, scompare1
498#endif
bc5378fc 499 wsr a3, ps /* disable interrupts */
e1088430
CZ
500
501 _bbci.l a3, PS_UM_BIT, kernel_exception_exit
502
503user_exception_exit:
504
505 /* Restore the state of the task and return from the exception. */
5a0015d6 506
5a0015d6
CZ
507 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
508
509 l32i a2, a1, PT_WINDOWBASE
510 l32i a3, a1, PT_WINDOWSTART
bc5378fc
MF
511 wsr a1, depc # use DEPC as temp storage
512 wsr a3, windowstart # restore WINDOWSTART
5a0015d6 513 ssr a2 # preserve user's WB in the SAR
bc5378fc 514 wsr a2, windowbase # switch to user's saved WB
5a0015d6 515 rsync
bc5378fc 516 rsr a1, depc # restore stack pointer
5a0015d6
CZ
517 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
518 rotw -1 # we restore a4..a7
519 _bltui a6, 16, 1f # only have to restore current window?
520
521 /* The working registers are a0 and a3. We are restoring to
522 * a4..a7. Be careful not to destroy what we have just restored.
523 * Note: wmask has the format YYYYM:
524 * Y: number of registers saved in groups of 4
525 * M: 4 bit mask of first 16 registers
526 */
527
528 mov a2, a6
529 mov a3, a5
530
5312: rotw -1 # a0..a3 become a4..a7
532 addi a3, a7, -4*4 # next iteration
533 addi a2, a6, -16 # decrementing Y in WMASK
534 l32i a4, a3, PT_AREG_END + 0
535 l32i a5, a3, PT_AREG_END + 4
536 l32i a6, a3, PT_AREG_END + 8
537 l32i a7, a3, PT_AREG_END + 12
538 _bgeui a2, 16, 2b
539
540 /* Clear unrestored registers (don't leak anything to user-land */
541
bc5378fc
MF
5421: rsr a0, windowbase
543 rsr a3, sar
5a0015d6
CZ
544 sub a3, a0, a3
545 beqz a3, 2f
546 extui a3, a3, 0, WBBITS
547
5481: rotw -1
549 addi a3, a7, -1
550 movi a4, 0
551 movi a5, 0
552 movi a6, 0
553 movi a7, 0
554 bgei a3, 1, 1b
555
556 /* We are back were we were when we started.
557 * Note: a2 still contains WMASK (if we've returned to the original
558 * frame where we had loaded a2), or at least the lower 4 bits
559 * (if we have restored WSBITS-1 frames).
560 */
561
c50842df
CZ
562#if XCHAL_HAVE_THREADPTR
563 l32i a3, a1, PT_THREADPTR
564 wur a3, threadptr
565#endif
566
5a0015d6
CZ
5672: j common_exception_exit
568
569 /* This is the kernel exception exit.
570 * We avoided to do a MOVSP when we entered the exception, but we
571 * have to do it here.
572 */
573
574kernel_exception_exit:
575
5a0015d6
CZ
576#ifdef PREEMPTIBLE_KERNEL
577
578#ifdef CONFIG_PREEMPT
579
580 /*
581 * Note: We've just returned from a call4, so we have
582 * at least 4 addt'l regs.
583 */
584
585 /* Check current_thread_info->preempt_count */
586
587 GET_THREAD_INFO(a2)
588 l32i a3, a2, TI_PREEMPT
589 bnez a3, 1f
590
591 l32i a2, a2, TI_FLAGS
592
5931:
594
595#endif
596
597#endif
598
599 /* Check if we have to do a movsp.
600 *
601 * We only have to do a movsp if the previous window-frame has
602 * been spilled to the *temporary* exception stack instead of the
603 * task's stack. This is the case if the corresponding bit in
604 * WINDOWSTART for the previous window-frame was set before
605 * (not spilled) but is zero now (spilled).
606 * If this bit is zero, all other bits except the one for the
607 * current window frame are also zero. So, we can use a simple test:
608 * 'and' WINDOWSTART and WINDOWSTART-1:
609 *
610 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
611 *
612 * The result is zero only if one bit was set.
613 *
614 * (Note: We might have gone through several task switches before
615 * we come back to the current task, so WINDOWBASE might be
616 * different from the time the exception occurred.)
617 */
618
619 /* Test WINDOWSTART before and after the exception.
620 * We actually have WMASK, so we only have to test if it is 1 or not.
621 */
622
623 l32i a2, a1, PT_WMASK
624 _beqi a2, 1, common_exception_exit # Spilled before exception,jump
625
626 /* Test WINDOWSTART now. If spilled, do the movsp */
627
bc5378fc 628 rsr a3, windowstart
5a0015d6
CZ
629 addi a0, a3, -1
630 and a3, a3, a0
631 _bnez a3, common_exception_exit
632
633 /* Do a movsp (we returned from a call4, so we have at least a0..a7) */
634
635 addi a0, a1, -16
636 l32i a3, a0, 0
637 l32i a4, a0, 4
638 s32i a3, a1, PT_SIZE+0
639 s32i a4, a1, PT_SIZE+4
640 l32i a3, a0, 8
641 l32i a4, a0, 12
642 s32i a3, a1, PT_SIZE+8
643 s32i a4, a1, PT_SIZE+12
644
645 /* Common exception exit.
646 * We restore the special register and the current window frame, and
647 * return from the exception.
648 *
649 * Note: We expect a2 to hold PT_WMASK
650 */
651
652common_exception_exit:
653
c658eac6
CZ
654 /* Restore address registers. */
655
5a0015d6
CZ
656 _bbsi.l a2, 1, 1f
657 l32i a4, a1, PT_AREG4
658 l32i a5, a1, PT_AREG5
659 l32i a6, a1, PT_AREG6
660 l32i a7, a1, PT_AREG7
661 _bbsi.l a2, 2, 1f
662 l32i a8, a1, PT_AREG8
663 l32i a9, a1, PT_AREG9
664 l32i a10, a1, PT_AREG10
665 l32i a11, a1, PT_AREG11
666 _bbsi.l a2, 3, 1f
667 l32i a12, a1, PT_AREG12
668 l32i a13, a1, PT_AREG13
669 l32i a14, a1, PT_AREG14
670 l32i a15, a1, PT_AREG15
671
672 /* Restore PC, SAR */
673
6741: l32i a2, a1, PT_PC
675 l32i a3, a1, PT_SAR
bc5378fc
MF
676 wsr a2, epc1
677 wsr a3, sar
5a0015d6
CZ
678
679 /* Restore LBEG, LEND, LCOUNT */
680
681 l32i a2, a1, PT_LBEG
682 l32i a3, a1, PT_LEND
bc5378fc 683 wsr a2, lbeg
5a0015d6 684 l32i a2, a1, PT_LCOUNT
bc5378fc
MF
685 wsr a3, lend
686 wsr a2, lcount
5a0015d6 687
29c4dfd9
CZ
688 /* We control single stepping through the ICOUNTLEVEL register. */
689
690 l32i a2, a1, PT_ICOUNTLEVEL
691 movi a3, -2
bc5378fc
MF
692 wsr a2, icountlevel
693 wsr a3, icount
29c4dfd9 694
5a0015d6
CZ
695 /* Check if it was double exception. */
696
697 l32i a0, a1, PT_DEPC
698 l32i a3, a1, PT_AREG3
699 l32i a2, a1, PT_AREG2
895666a9 700 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
5a0015d6
CZ
701
702 /* Restore a0...a3 and return */
703
704 l32i a0, a1, PT_AREG0
705 l32i a1, a1, PT_AREG1
895666a9 706 rfe
5a0015d6 707
895666a9 7081: wsr a0, depc
5a0015d6
CZ
709 l32i a0, a1, PT_AREG0
710 l32i a1, a1, PT_AREG1
895666a9 711 rfde
5a0015d6 712
d1538c46
CZ
713ENDPROC(kernel_exception)
714
5a0015d6
CZ
715/*
716 * Debug exception handler.
717 *
718 * Currently, we don't support KGDB, so only user application can be debugged.
719 *
720 * When we get here, a0 is trashed and saved to excsave[debuglevel]
721 */
722
723ENTRY(debug_exception)
724
bc5378fc 725 rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
173d6681 726 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
5a0015d6 727
bc5378fc 728 /* Set EPC1 and EXCCAUSE */
5a0015d6 729
bc5378fc
MF
730 wsr a2, depc # save a2 temporarily
731 rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL
732 wsr a2, epc1
5a0015d6
CZ
733
734 movi a2, EXCCAUSE_MAPPED_DEBUG
bc5378fc 735 wsr a2, exccause
5a0015d6
CZ
736
737 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
738
173d6681 739 movi a2, 1 << PS_EXCM_BIT
5a0015d6
CZ
740 or a2, a0, a2
741 movi a0, debug_exception # restore a3, debug jump vector
bc5378fc
MF
742 wsr a2, ps
743 xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
5a0015d6
CZ
744
745 /* Switch to kernel/user stack, restore jump vector, and save a0 */
746
173d6681 747 bbsi.l a2, PS_UM_BIT, 2f # jump if user mode
5a0015d6
CZ
748
749 addi a2, a1, -16-PT_SIZE # assume kernel stack
750 s32i a0, a2, PT_AREG0
751 movi a0, 0
752 s32i a1, a2, PT_AREG1
753 s32i a0, a2, PT_DEPC # mark it as a regular exception
bc5378fc 754 xsr a0, depc
5a0015d6
CZ
755 s32i a3, a2, PT_AREG3
756 s32i a0, a2, PT_AREG2
757 mov a1, a2
758 j _kernel_exception
759
bc5378fc 7602: rsr a2, excsave1
5a0015d6
CZ
761 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
762 s32i a0, a2, PT_AREG0
763 movi a0, 0
764 s32i a1, a2, PT_AREG1
765 s32i a0, a2, PT_DEPC
bc5378fc 766 xsr a0, depc
5a0015d6
CZ
767 s32i a3, a2, PT_AREG3
768 s32i a0, a2, PT_AREG2
769 mov a1, a2
770 j _user_exception
771
772 /* Debug exception while in exception mode. */
7731: j 1b // FIXME!!
774
d1538c46 775ENDPROC(debug_exception)
5a0015d6
CZ
776
777/*
778 * We get here in case of an unrecoverable exception.
779 * The only thing we can do is to be nice and print a panic message.
780 * We only produce a single stack frame for panic, so ???
781 *
782 *
783 * Entry conditions:
784 *
785 * - a0 contains the caller address; original value saved in excsave1.
786 * - the original a0 contains a valid return address (backtrace) or 0.
787 * - a2 contains a valid stackpointer
788 *
789 * Notes:
790 *
791 * - If the stack pointer could be invalid, the caller has to setup a
792 * dummy stack pointer (e.g. the stack of the init_task)
793 *
794 * - If the return address could be invalid, the caller has to set it
795 * to 0, so the backtrace would stop.
796 *
797 */
798 .align 4
799unrecoverable_text:
800 .ascii "Unrecoverable error in exception handler\0"
801
802ENTRY(unrecoverable_exception)
803
804 movi a0, 1
805 movi a1, 0
806
bc5378fc
MF
807 wsr a0, windowstart
808 wsr a1, windowbase
5a0015d6
CZ
809 rsync
810
2d1c645c 811 movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL
bc5378fc 812 wsr a1, ps
5a0015d6
CZ
813 rsync
814
815 movi a1, init_task
816 movi a0, 0
817 addi a1, a1, PT_REGS_OFFSET
818
819 movi a4, panic
820 movi a6, unrecoverable_text
821
822 callx4 a4
823
8241: j 1b
825
d1538c46 826ENDPROC(unrecoverable_exception)
5a0015d6
CZ
827
828/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
829
830/*
831 * Fast-handler for alloca exceptions
832 *
833 * The ALLOCA handler is entered when user code executes the MOVSP
834 * instruction and the caller's frame is not in the register file.
835 * In this case, the caller frame's a0..a3 are on the stack just
836 * below sp (a1), and this handler moves them.
837 *
838 * For "MOVSP <ar>,<as>" without destination register a1, this routine
839 * simply moves the value from <as> to <ar> without moving the save area.
840 *
841 * Entry condition:
842 *
843 * a0: trashed, original value saved on stack (PT_AREG0)
844 * a1: a1
845 * a2: new stack pointer, original in DEPC
846 * a3: dispatch table
847 * depc: a2, original value saved on stack (PT_DEPC)
848 * excsave_1: a3
849 *
850 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
851 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
852 */
853
854#if XCHAL_HAVE_BE
855#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4
856#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4
857#else
858#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4
859#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4
860#endif
861
862ENTRY(fast_alloca)
863
864 /* We shouldn't be in a double exception. */
865
866 l32i a0, a2, PT_DEPC
867 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
868
bc5378fc 869 rsr a0, depc # get a2
5a0015d6
CZ
870 s32i a4, a2, PT_AREG4 # save a4 and
871 s32i a0, a2, PT_AREG2 # a2 to stack
872
873 /* Exit critical section. */
874
875 movi a0, 0
876 s32i a0, a3, EXC_TABLE_FIXUP
877
878 /* Restore a3, excsave_1 */
879
bc5378fc
MF
880 xsr a3, excsave1 # make sure excsave_1 is valid for dbl.
881 rsr a4, epc1 # get exception address
5a0015d6
CZ
882 s32i a3, a2, PT_AREG3 # save a3 to stack
883
884#ifdef ALLOCA_EXCEPTION_IN_IRAM
885#error iram not supported
886#else
887 /* Note: l8ui not allowed in IRAM/IROM!! */
888 l8ui a0, a4, 1 # read as(src) from MOVSP instruction
889#endif
890 movi a3, .Lmovsp_src
891 _EXTUI_MOVSP_SRC(a0) # extract source register number
892 addx8 a3, a0, a3
893 jx a3
894
895.Lunhandled_double:
bc5378fc 896 wsr a0, excsave1
5a0015d6
CZ
897 movi a0, unrecoverable_exception
898 callx0 a0
899
900 .align 8
901.Lmovsp_src:
902 l32i a3, a2, PT_AREG0; _j 1f; .align 8
903 mov a3, a1; _j 1f; .align 8
904 l32i a3, a2, PT_AREG2; _j 1f; .align 8
905 l32i a3, a2, PT_AREG3; _j 1f; .align 8
906 l32i a3, a2, PT_AREG4; _j 1f; .align 8
907 mov a3, a5; _j 1f; .align 8
908 mov a3, a6; _j 1f; .align 8
909 mov a3, a7; _j 1f; .align 8
910 mov a3, a8; _j 1f; .align 8
911 mov a3, a9; _j 1f; .align 8
912 mov a3, a10; _j 1f; .align 8
913 mov a3, a11; _j 1f; .align 8
914 mov a3, a12; _j 1f; .align 8
915 mov a3, a13; _j 1f; .align 8
916 mov a3, a14; _j 1f; .align 8
917 mov a3, a15; _j 1f; .align 8
918
9191:
920
921#ifdef ALLOCA_EXCEPTION_IN_IRAM
922#error iram not supported
923#else
924 l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction
925#endif
926 addi a4, a4, 3 # step over movsp
927 _EXTUI_MOVSP_DST(a0) # extract destination register
bc5378fc 928 wsr a4, epc1 # save new epc_1
5a0015d6
CZ
929
930 _bnei a0, 1, 1f # no 'movsp a1, ax': jump
931
c4c4594b 932 /* Move the save area. This implies the use of the L32E
5a0015d6
CZ
933 * and S32E instructions, because this move must be done with
934 * the user's PS.RING privilege levels, not with ring 0
935 * (kernel's) privileges currently active with PS.EXCM
936 * set. Note that we have stil registered a fixup routine with the
937 * double exception vector in case a double exception occurs.
938 */
939
940 /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */
941
942 l32e a0, a1, -16
943 l32e a4, a1, -12
944 s32e a0, a3, -16
945 s32e a4, a3, -12
946 l32e a0, a1, -8
947 l32e a4, a1, -4
948 s32e a0, a3, -8
949 s32e a4, a3, -4
950
951 /* Restore stack-pointer and all the other saved registers. */
952
953 mov a1, a3
954
955 l32i a4, a2, PT_AREG4
956 l32i a3, a2, PT_AREG3
957 l32i a0, a2, PT_AREG0
958 l32i a2, a2, PT_AREG2
959 rfe
960
961 /* MOVSP <at>,<as> was invoked with <at> != a1.
962 * Because the stack pointer is not being modified,
963 * we should be able to just modify the pointer
964 * without moving any save area.
965 * The processor only traps these occurrences if the
966 * caller window isn't live, so unfortunately we can't
967 * use this as an alternate trap mechanism.
968 * So we just do the move. This requires that we
969 * resolve the destination register, not just the source,
970 * so there's some extra work.
971 * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...)
972 */
973
974 /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */
975
9761: movi a4, .Lmovsp_dst
977 addx8 a4, a0, a4
978 jx a4
979
980 .align 8
981.Lmovsp_dst:
982 s32i a3, a2, PT_AREG0; _j 1f; .align 8
983 mov a1, a3; _j 1f; .align 8
984 s32i a3, a2, PT_AREG2; _j 1f; .align 8
985 s32i a3, a2, PT_AREG3; _j 1f; .align 8
986 s32i a3, a2, PT_AREG4; _j 1f; .align 8
987 mov a5, a3; _j 1f; .align 8
988 mov a6, a3; _j 1f; .align 8
989 mov a7, a3; _j 1f; .align 8
990 mov a8, a3; _j 1f; .align 8
991 mov a9, a3; _j 1f; .align 8
992 mov a10, a3; _j 1f; .align 8
993 mov a11, a3; _j 1f; .align 8
994 mov a12, a3; _j 1f; .align 8
995 mov a13, a3; _j 1f; .align 8
996 mov a14, a3; _j 1f; .align 8
997 mov a15, a3; _j 1f; .align 8
998
9991: l32i a4, a2, PT_AREG4
1000 l32i a3, a2, PT_AREG3
1001 l32i a0, a2, PT_AREG0
1002 l32i a2, a2, PT_AREG2
1003 rfe
1004
d1538c46 1005ENDPROC(fast_alloca)
5a0015d6
CZ
1006
1007/*
1008 * fast system calls.
1009 *
1010 * WARNING: The kernel doesn't save the entire user context before
1011 * handling a fast system call. These functions are small and short,
1012 * usually offering some functionality not available to user tasks.
1013 *
1014 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
1015 *
1016 * Entry condition:
1017 *
1018 * a0: trashed, original value saved on stack (PT_AREG0)
1019 * a1: a1
1020 * a2: new stack pointer, original in DEPC
1021 * a3: dispatch table
1022 * depc: a2, original value saved on stack (PT_DEPC)
1023 * excsave_1: a3
1024 */
1025
1026ENTRY(fast_syscall_kernel)
1027
1028 /* Skip syscall. */
1029
bc5378fc 1030 rsr a0, epc1
5a0015d6 1031 addi a0, a0, 3
bc5378fc 1032 wsr a0, epc1
5a0015d6
CZ
1033
1034 l32i a0, a2, PT_DEPC
1035 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1036
bc5378fc 1037 rsr a0, depc # get syscall-nr
5a0015d6 1038 _beqz a0, fast_syscall_spill_registers
fc4fb2ad 1039 _beqi a0, __NR_xtensa, fast_syscall_xtensa
5a0015d6
CZ
1040
1041 j kernel_exception
1042
d1538c46
CZ
1043ENDPROC(fast_syscall_kernel)
1044
5a0015d6
CZ
1045ENTRY(fast_syscall_user)
1046
1047 /* Skip syscall. */
1048
bc5378fc 1049 rsr a0, epc1
5a0015d6 1050 addi a0, a0, 3
bc5378fc 1051 wsr a0, epc1
5a0015d6
CZ
1052
1053 l32i a0, a2, PT_DEPC
1054 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1055
bc5378fc 1056 rsr a0, depc # get syscall-nr
5a0015d6 1057 _beqz a0, fast_syscall_spill_registers
fc4fb2ad 1058 _beqi a0, __NR_xtensa, fast_syscall_xtensa
5a0015d6
CZ
1059
1060 j user_exception
1061
d1538c46
CZ
1062ENDPROC(fast_syscall_user)
1063
5a0015d6
CZ
1064ENTRY(fast_syscall_unrecoverable)
1065
c4c4594b 1066 /* Restore all states. */
5a0015d6 1067
c4c4594b
CZ
1068 l32i a0, a2, PT_AREG0 # restore a0
1069 xsr a2, depc # restore a2, depc
1070 rsr a3, excsave1
5a0015d6 1071
c4c4594b
CZ
1072 wsr a0, excsave1
1073 movi a0, unrecoverable_exception
1074 callx0 a0
5a0015d6 1075
d1538c46 1076ENDPROC(fast_syscall_unrecoverable)
5a0015d6
CZ
1077
1078/*
1079 * sysxtensa syscall handler
1080 *
fc4fb2ad
CZ
1081 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
1082 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
1083 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
1084 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
1085 * a2 a6 a3 a4 a5
5a0015d6
CZ
1086 *
1087 * Entry condition:
1088 *
fc4fb2ad 1089 * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0)
5a0015d6 1090 * a1: a1
fc4fb2ad
CZ
1091 * a2: new stack pointer, original in a0 and DEPC
1092 * a3: dispatch table, original in excsave_1
1093 * a4..a15: unchanged
5a0015d6
CZ
1094 * depc: a2, original value saved on stack (PT_DEPC)
1095 * excsave_1: a3
1096 *
1097 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1098 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1099 *
1100 * Note: we don't have to save a2; a2 holds the return value
1101 *
1102 * We use the two macros TRY and CATCH:
1103 *
1104 * TRY adds an entry to the __ex_table fixup table for the immediately
1105 * following instruction.
1106 *
25985edc 1107 * CATCH catches any exception that occurred at one of the preceding TRY
5a0015d6
CZ
1108 * statements and continues from there
1109 *
1110 * Usage TRY l32i a0, a1, 0
1111 * <other code>
1112 * done: rfe
1113 * CATCH <set return code>
1114 * j done
1115 */
1116
1117#define TRY \
1118 .section __ex_table, "a"; \
1119 .word 66f, 67f; \
1120 .text; \
112166:
1122
1123#define CATCH \
112467:
1125
fc4fb2ad 1126ENTRY(fast_syscall_xtensa)
5a0015d6 1127
bc5378fc 1128 xsr a3, excsave1 # restore a3, excsave1
5a0015d6 1129
fc4fb2ad 1130 s32i a7, a2, PT_AREG7 # we need an additional register
5a0015d6 1131 movi a7, 4 # sizeof(unsigned int)
fc4fb2ad 1132 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
5a0015d6 1133
fc4fb2ad
CZ
1134 addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1
1135 _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill
1136 _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
5a0015d6 1137
fc4fb2ad 1138 /* Fall through for ATOMIC_CMP_SWP. */
5a0015d6
CZ
1139
1140.Lswp: /* Atomic compare and swap */
1141
fc4fb2ad
CZ
1142TRY l32i a0, a3, 0 # read old value
1143 bne a0, a4, 1f # same as old value? jump
1144TRY s32i a5, a3, 0 # different, modify value
1145 l32i a7, a2, PT_AREG7 # restore a7
1146 l32i a0, a2, PT_AREG0 # restore a0
1147 movi a2, 1 # and return 1
1148 addi a6, a6, 1 # restore a6 (really necessary?)
1149 rfe
5a0015d6 1150
fc4fb2ad
CZ
11511: l32i a7, a2, PT_AREG7 # restore a7
1152 l32i a0, a2, PT_AREG0 # restore a0
1153 movi a2, 0 # return 0 (note that we cannot set
1154 addi a6, a6, 1 # restore a6 (really necessary?)
1155 rfe
5a0015d6 1156
fc4fb2ad 1157.Lnswp: /* Atomic set, add, and exg_add. */
5a0015d6 1158
fc4fb2ad
CZ
1159TRY l32i a7, a3, 0 # orig
1160 add a0, a4, a7 # + arg
1161 moveqz a0, a4, a6 # set
1162TRY s32i a0, a3, 0 # write new value
5a0015d6 1163
fc4fb2ad 1164 mov a0, a2
5a0015d6 1165 mov a2, a7
fc4fb2ad
CZ
1166 l32i a7, a0, PT_AREG7 # restore a7
1167 l32i a0, a0, PT_AREG0 # restore a0
1168 addi a6, a6, 1 # restore a6 (really necessary?)
5a0015d6
CZ
1169 rfe
1170
1171CATCH
fc4fb2ad
CZ
1172.Leac: l32i a7, a2, PT_AREG7 # restore a7
1173 l32i a0, a2, PT_AREG0 # restore a0
1174 movi a2, -EFAULT
1175 rfe
1176
1177.Lill: l32i a7, a2, PT_AREG0 # restore a7
1178 l32i a0, a2, PT_AREG0 # restore a0
1179 movi a2, -EINVAL
1180 rfe
1181
d1538c46 1182ENDPROC(fast_syscall_xtensa)
5a0015d6
CZ
1183
1184
1185/* fast_syscall_spill_registers.
1186 *
1187 * Entry condition:
1188 *
1189 * a0: trashed, original value saved on stack (PT_AREG0)
1190 * a1: a1
1191 * a2: new stack pointer, original in DEPC
1192 * a3: dispatch table
1193 * depc: a2, original value saved on stack (PT_DEPC)
1194 * excsave_1: a3
1195 *
1196 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
5a0015d6
CZ
1197 */
1198
1199ENTRY(fast_syscall_spill_registers)
1200
1201 /* Register a FIXUP handler (pass current wb as a parameter) */
1202
1203 movi a0, fast_syscall_spill_registers_fixup
1204 s32i a0, a3, EXC_TABLE_FIXUP
bc5378fc 1205 rsr a0, windowbase
5a0015d6
CZ
1206 s32i a0, a3, EXC_TABLE_PARAM
1207
1208 /* Save a3 and SAR on stack. */
1209
bc5378fc
MF
1210 rsr a0, sar
1211 xsr a3, excsave1 # restore a3 and excsave_1
5a0015d6 1212 s32i a3, a2, PT_AREG3
c658eac6
CZ
1213 s32i a4, a2, PT_AREG4
1214 s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
5a0015d6
CZ
1215
1216 /* The spill routine might clobber a7, a11, and a15. */
1217
c658eac6
CZ
1218 s32i a7, a2, PT_AREG7
1219 s32i a11, a2, PT_AREG11
1220 s32i a15, a2, PT_AREG15
5a0015d6 1221
c658eac6 1222 call0 _spill_registers # destroys a3, a4, and SAR
5a0015d6
CZ
1223
1224 /* Advance PC, restore registers and SAR, and return from exception. */
1225
c658eac6
CZ
1226 l32i a3, a2, PT_AREG5
1227 l32i a4, a2, PT_AREG4
5a0015d6 1228 l32i a0, a2, PT_AREG0
bc5378fc 1229 wsr a3, sar
5a0015d6
CZ
1230 l32i a3, a2, PT_AREG3
1231
1232 /* Restore clobbered registers. */
1233
c658eac6
CZ
1234 l32i a7, a2, PT_AREG7
1235 l32i a11, a2, PT_AREG11
1236 l32i a15, a2, PT_AREG15
5a0015d6
CZ
1237
1238 movi a2, 0
1239 rfe
1240
d1538c46
CZ
1241ENDPROC(fast_syscall_spill_registers)
1242
5a0015d6
CZ
1243/* Fixup handler.
1244 *
1245 * We get here if the spill routine causes an exception, e.g. tlb miss.
1246 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1247 * we entered the spill routine and jump to the user exception handler.
1248 *
1249 * a0: value of depc, original value in depc
1250 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1251 * a3: exctable, original value in excsave1
1252 */
1253
1254fast_syscall_spill_registers_fixup:
1255
bc5378fc
MF
1256 rsr a2, windowbase # get current windowbase (a2 is saved)
1257 xsr a0, depc # restore depc and a0
5a0015d6
CZ
1258 ssl a2 # set shift (32 - WB)
1259
1260 /* We need to make sure the current registers (a0-a3) are preserved.
1261 * To do this, we simply set the bit for the current window frame
1262 * in WS, so that the exception handlers save them to the task stack.
1263 */
1264
bc5378fc 1265 rsr a3, excsave1 # get spill-mask
5a0015d6
CZ
1266 slli a2, a3, 1 # shift left by one
1267
1268 slli a3, a2, 32-WSBITS
1269 src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy......
bc5378fc 1270 wsr a2, windowstart # set corrected windowstart
5a0015d6
CZ
1271
1272 movi a3, exc_table
1273 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2
1274 l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task)
1275
1276 /* Return to the original (user task) WINDOWBASE.
1277 * We leave the following frame behind:
1278 * a0, a1, a2 same
1279 * a3: trashed (saved in excsave_1)
1280 * depc: depc (we have to return to that address)
1281 * excsave_1: a3
1282 */
1283
bc5378fc 1284 wsr a3, windowbase
5a0015d6
CZ
1285 rsync
1286
1287 /* We are now in the original frame when we entered _spill_registers:
1288 * a0: return address
1289 * a1: used, stack pointer
1290 * a2: kernel stack pointer
1291 * a3: available, saved in EXCSAVE_1
1292 * depc: exception address
1293 * excsave: a3
1294 * Note: This frame might be the same as above.
1295 */
1296
5a0015d6
CZ
1297 /* Setup stack pointer. */
1298
1299 addi a2, a2, -PT_USER_SIZE
1300 s32i a0, a2, PT_AREG0
1301
1302 /* Make sure we return to this fixup handler. */
1303
1304 movi a3, fast_syscall_spill_registers_fixup_return
1305 s32i a3, a2, PT_DEPC # setup depc
1306
1307 /* Jump to the exception handler. */
1308
1309 movi a3, exc_table
bc5378fc 1310 rsr a0, exccause
c4c4594b
CZ
1311 addx4 a0, a0, a3 # find entry in table
1312 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
1313 jx a0
5a0015d6
CZ
1314
1315fast_syscall_spill_registers_fixup_return:
1316
1317 /* When we return here, all registers have been restored (a2: DEPC) */
1318
bc5378fc 1319 wsr a2, depc # exception address
5a0015d6
CZ
1320
1321 /* Restore fixup handler. */
1322
bc5378fc 1323 xsr a3, excsave1
5a0015d6
CZ
1324 movi a2, fast_syscall_spill_registers_fixup
1325 s32i a2, a3, EXC_TABLE_FIXUP
bc5378fc 1326 rsr a2, windowbase
5a0015d6
CZ
1327 s32i a2, a3, EXC_TABLE_PARAM
1328 l32i a2, a3, EXC_TABLE_KSTK
1329
5a0015d6
CZ
1330 /* Load WB at the time the exception occurred. */
1331
bc5378fc 1332 rsr a3, sar # WB is still in SAR
5a0015d6 1333 neg a3, a3
bc5378fc 1334 wsr a3, windowbase
5a0015d6
CZ
1335 rsync
1336
1337 /* Restore a3 and return. */
1338
1339 movi a3, exc_table
bc5378fc 1340 xsr a3, excsave1
5a0015d6
CZ
1341
1342 rfde
1343
1344
1345/*
1346 * spill all registers.
1347 *
1348 * This is not a real function. The following conditions must be met:
1349 *
1350 * - must be called with call0.
c658eac6 1351 * - uses a3, a4 and SAR.
5a0015d6
CZ
1352 * - the last 'valid' register of each frame are clobbered.
1353 * - the caller must have registered a fixup handler
1354 * (or be inside a critical section)
1355 * - PS_EXCM must be set (PS_WOE cleared?)
1356 */
1357
1358ENTRY(_spill_registers)
1359
1360 /*
1361 * Rotate ws so that the current windowbase is at bit 0.
1362 * Assume ws = xxxwww1yy (www1 current window frame).
c658eac6 1363 * Rotate ws right so that a4 = yyxxxwww1.
5a0015d6
CZ
1364 */
1365
bc5378fc
MF
1366 rsr a4, windowbase
1367 rsr a3, windowstart # a3 = xxxwww1yy
c658eac6
CZ
1368 ssr a4 # holds WB
1369 slli a4, a3, WSBITS
1370 or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
ea0b6b06 1371 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
5a0015d6
CZ
1372
1373 /* We are done if there are no more than the current register frame. */
1374
50c0716a 1375 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
c658eac6 1376 movi a4, (1 << (WSBITS-1))
5a0015d6
CZ
1377 _beqz a3, .Lnospill # only one active frame? jump
1378
1379 /* We want 1 at the top, so that we return to the current windowbase */
1380
c658eac6 1381 or a3, a3, a4 # 1yyxxxwww
5a0015d6
CZ
1382
1383 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1384
bc5378fc 1385 wsr a3, windowstart # save shifted windowstart
c658eac6
CZ
1386 neg a4, a3
1387 and a3, a4, a3 # first bit set from right: 000010000
5a0015d6 1388
c658eac6 1389 ffs_ws a4, a3 # a4: shifts to skip empty frames
5a0015d6 1390 movi a3, WSBITS
c658eac6
CZ
1391 sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
1392 ssr a4 # save in SAR for later.
5a0015d6 1393
bc5378fc 1394 rsr a3, windowbase
c658eac6 1395 add a3, a3, a4
bc5378fc 1396 wsr a3, windowbase
5a0015d6
CZ
1397 rsync
1398
bc5378fc 1399 rsr a3, windowstart
5a0015d6
CZ
1400 srl a3, a3 # shift windowstart
1401
1402 /* WB is now just one frame below the oldest frame in the register
1403 window. WS is shifted so the oldest frame is in bit 0, thus, WB
1404 and WS differ by one 4-register frame. */
1405
1406 /* Save frames. Depending what call was used (call4, call8, call12),
1407 * we have to save 4,8. or 12 registers.
1408 */
1409
1410 _bbsi.l a3, 1, .Lc4
1411 _bbsi.l a3, 2, .Lc8
1412
1413 /* Special case: we have a call12-frame starting at a4. */
1414
1415 _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first)
1416
1417 s32e a4, a1, -16 # a1 is valid with an empty spill area
1418 l32e a4, a5, -12
1419 s32e a8, a4, -48
1420 mov a8, a4
1421 l32e a4, a1, -16
1422 j .Lc12c
1423
50c0716a 1424.Lnospill:
ea0b6b06 1425 ret
50c0716a 1426
5a0015d6
CZ
1427.Lloop: _bbsi.l a3, 1, .Lc4
1428 _bbci.l a3, 2, .Lc12
1429
1430.Lc8: s32e a4, a13, -16
1431 l32e a4, a5, -12
1432 s32e a8, a4, -32
1433 s32e a5, a13, -12
1434 s32e a6, a13, -8
1435 s32e a7, a13, -4
1436 s32e a9, a4, -28
1437 s32e a10, a4, -24
1438 s32e a11, a4, -20
1439
1440 srli a11, a3, 2 # shift windowbase by 2
1441 rotw 2
1442 _bnei a3, 1, .Lloop
1443
1444.Lexit: /* Done. Do the final rotation, set WS, and return. */
1445
1446 rotw 1
bc5378fc 1447 rsr a3, windowbase
5a0015d6
CZ
1448 ssl a3
1449 movi a3, 1
1450 sll a3, a3
bc5378fc 1451 wsr a3, windowstart
ea0b6b06 1452 ret
5a0015d6
CZ
1453
1454.Lc4: s32e a4, a9, -16
1455 s32e a5, a9, -12
1456 s32e a6, a9, -8
1457 s32e a7, a9, -4
1458
1459 srli a7, a3, 1
1460 rotw 1
1461 _bnei a3, 1, .Lloop
1462 j .Lexit
1463
1464.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
1465
1466 /* 12-register frame (call12) */
1467
1468 l32e a2, a5, -12
1469 s32e a8, a2, -48
1470 mov a8, a2
1471
1472.Lc12c: s32e a9, a8, -44
1473 s32e a10, a8, -40
1474 s32e a11, a8, -36
1475 s32e a12, a8, -32
1476 s32e a13, a8, -28
1477 s32e a14, a8, -24
1478 s32e a15, a8, -20
1479 srli a15, a3, 3
1480
1481 /* The stack pointer for a4..a7 is out of reach, so we rotate the
1482 * window, grab the stackpointer, and rotate back.
1483 * Alternatively, we could also use the following approach, but that
1484 * makes the fixup routine much more complicated:
1485 * rotw 1
1486 * s32e a0, a13, -16
1487 * ...
1488 * rotw 2
1489 */
1490
1491 rotw 1
1492 mov a5, a13
1493 rotw -1
1494
1495 s32e a4, a9, -16
1496 s32e a5, a9, -12
1497 s32e a6, a9, -8
1498 s32e a7, a9, -4
1499
1500 rotw 3
1501
1502 _beqi a3, 1, .Lexit
1503 j .Lloop
1504
1505.Linvalid_mask:
1506
1507 /* We get here because of an unrecoverable error in the window
1508 * registers. If we are in user space, we kill the application,
1509 * however, this condition is unrecoverable in kernel space.
1510 */
1511
bc5378fc 1512 rsr a0, ps
173d6681 1513 _bbci.l a0, PS_UM_BIT, 1f
5a0015d6 1514
c4c4594b 1515 /* User space: Setup a dummy frame and kill application.
5a0015d6
CZ
1516 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1517 */
1518
1519 movi a0, 1
1520 movi a1, 0
1521
bc5378fc
MF
1522 wsr a0, windowstart
1523 wsr a1, windowbase
5a0015d6
CZ
1524 rsync
1525
1526 movi a0, 0
1527
1528 movi a3, exc_table
1529 l32i a1, a3, EXC_TABLE_KSTK
bc5378fc 1530 wsr a3, excsave1
5a0015d6 1531
2d1c645c 1532 movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL
bc5378fc 1533 wsr a4, ps
5a0015d6
CZ
1534 rsync
1535
1536 movi a6, SIGSEGV
1537 movi a4, do_exit
1538 callx4 a4
1539
15401: /* Kernel space: PANIC! */
1541
bc5378fc 1542 wsr a0, excsave1
5a0015d6
CZ
1543 movi a0, unrecoverable_exception
1544 callx0 a0 # should not return
15451: j 1b
1546
d1538c46
CZ
1547ENDPROC(_spill_registers)
1548
e5083a63 1549#ifdef CONFIG_MMU
5a0015d6
CZ
1550/*
1551 * We should never get here. Bail out!
1552 */
1553
1554ENTRY(fast_second_level_miss_double_kernel)
1555
15561: movi a0, unrecoverable_exception
1557 callx0 a0 # should not return
15581: j 1b
1559
d1538c46
CZ
1560ENDPROC(fast_second_level_miss_double_kernel)
1561
5a0015d6
CZ
1562/* First-level entry handler for user, kernel, and double 2nd-level
1563 * TLB miss exceptions. Note that for now, user and kernel miss
1564 * exceptions share the same entry point and are handled identically.
1565 *
1566 * An old, less-efficient C version of this function used to exist.
1567 * We include it below, interleaved as comments, for reference.
1568 *
1569 * Entry condition:
1570 *
1571 * a0: trashed, original value saved on stack (PT_AREG0)
1572 * a1: a1
1573 * a2: new stack pointer, original in DEPC
1574 * a3: dispatch table
1575 * depc: a2, original value saved on stack (PT_DEPC)
1576 * excsave_1: a3
1577 *
1578 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1579 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1580 */
1581
1582ENTRY(fast_second_level_miss)
1583
1584 /* Save a1. Note: we don't expect a double exception. */
1585
1586 s32i a1, a2, PT_AREG1
1587
1588 /* We need to map the page of PTEs for the user task. Find
1589 * the pointer to that page. Also, it's possible for tsk->mm
1590 * to be NULL while tsk->active_mm is nonzero if we faulted on
1591 * a vmalloc address. In that rare case, we must use
1592 * active_mm instead to avoid a fault in this handler. See
1593 *
1594 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
1595 * (or search Internet on "mm vs. active_mm")
1596 *
1597 * if (!mm)
1598 * mm = tsk->active_mm;
1599 * pgd = pgd_offset (mm, regs->excvaddr);
1600 * pmd = pmd_offset (pgd, regs->excvaddr);
1601 * pmdval = *pmd;
1602 */
1603
1604 GET_CURRENT(a1,a2)
1605 l32i a0, a1, TASK_MM # tsk->mm
1606 beqz a0, 9f
1607
01858d1b
CZ
1608
1609 /* We deliberately destroy a3 that holds the exception table. */
1610
bc5378fc 16118: rsr a3, excvaddr # fault address
01858d1b 1612 _PGD_OFFSET(a0, a3, a1)
5a0015d6 1613 l32i a0, a0, 0 # read pmdval
5a0015d6
CZ
1614 beqz a0, 2f
1615
1616 /* Read ptevaddr and convert to top of page-table page.
1617 *
1618 * vpnval = read_ptevaddr_register() & PAGE_MASK;
1619 * vpnval += DTLB_WAY_PGTABLE;
1620 * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
1621 * write_dtlb_entry (pteval, vpnval);
1622 *
1623 * The messy computation for 'pteval' above really simplifies
1624 * into the following:
1625 *
6656920b 1626 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY
5a0015d6
CZ
1627 */
1628
39070cb8 1629 movi a1, (-PAGE_OFFSET) & 0xffffffff
5a0015d6
CZ
1630 add a0, a0, a1 # pmdval - PAGE_OFFSET
1631 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
1632 xor a0, a0, a1
1633
01858d1b 1634 movi a1, _PAGE_DIRECTORY
5a0015d6
CZ
1635 or a0, a0, a1 # ... | PAGE_DIRECTORY
1636
01858d1b 1637 /*
6656920b 1638 * We utilize all three wired-ways (7-9) to hold pmd translations.
01858d1b
CZ
1639 * Memory regions are mapped to the DTLBs according to bits 28 and 29.
1640 * This allows to map the three most common regions to three different
1641 * DTLBs:
1642 * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000)
1643 * 2 -> way 8 shared libaries (2000.0000)
1644 * 3 -> way 0 stack (3000.0000)
1645 */
1646
1647 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
bc5378fc 1648 rsr a1, ptevaddr
01858d1b 1649 addx2 a3, a3, a3 # -> 0,3,6,9
5a0015d6 1650 srli a1, a1, PAGE_SHIFT
01858d1b 1651 extui a3, a3, 2, 2 # -> 0,0,1,2
5a0015d6 1652 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
01858d1b
CZ
1653 addi a3, a3, DTLB_WAY_PGD
1654 add a1, a1, a3 # ... + way_number
5a0015d6 1655
01858d1b 16563: wdtlb a0, a1
5a0015d6
CZ
1657 dsync
1658
1659 /* Exit critical section. */
1660
01858d1b 16614: movi a3, exc_table # restore a3
5a0015d6
CZ
1662 movi a0, 0
1663 s32i a0, a3, EXC_TABLE_FIXUP
1664
1665 /* Restore the working registers, and return. */
1666
1667 l32i a0, a2, PT_AREG0
1668 l32i a1, a2, PT_AREG1
1669 l32i a2, a2, PT_DEPC
bc5378fc 1670 xsr a3, excsave1
5a0015d6
CZ
1671
1672 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1673
1674 /* Restore excsave1 and return. */
1675
bc5378fc 1676 rsr a2, depc
5a0015d6
CZ
1677 rfe
1678
1679 /* Return from double exception. */
1680
bc5378fc 16811: xsr a2, depc
5a0015d6
CZ
1682 esync
1683 rfde
1684
16859: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1686 j 8b
1687
6656920b
CZ
1688#if (DCACHE_WAY_SIZE > PAGE_SIZE)
1689
16902: /* Special case for cache aliasing.
1691 * We (should) only get here if a clear_user_page, copy_user_page
1692 * or the aliased cache flush functions got preemptively interrupted
1693 * by another task. Re-establish temporary mapping to the
1694 * TLBTEMP_BASE areas.
1695 */
1696
1697 /* We shouldn't be in a double exception */
1698
1699 l32i a0, a2, PT_DEPC
1700 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
1701
1702 /* Make sure the exception originated in the special functions */
1703
1704 movi a0, __tlbtemp_mapping_start
bc5378fc 1705 rsr a3, epc1
6656920b
CZ
1706 bltu a3, a0, 2f
1707 movi a0, __tlbtemp_mapping_end
1708 bgeu a3, a0, 2f
1709
1710 /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
1711
1712 movi a3, TLBTEMP_BASE_1
bc5378fc 1713 rsr a0, excvaddr
6656920b
CZ
1714 bltu a0, a3, 2f
1715
1716 addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
1717 bgeu a1, a3, 2f
1718
1719 /* Check if we have to restore an ITLB mapping. */
1720
1721 movi a1, __tlbtemp_mapping_itlb
bc5378fc 1722 rsr a3, epc1
6656920b
CZ
1723 sub a3, a3, a1
1724
1725 /* Calculate VPN */
1726
1727 movi a1, PAGE_MASK
1728 and a1, a1, a0
1729
1730 /* Jump for ITLB entry */
1731
1732 bgez a3, 1f
1733
1734 /* We can use up to two TLBTEMP areas, one for src and one for dst. */
1735
1736 extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
1737 add a1, a3, a1
1738
1739 /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
1740
1741 mov a0, a6
1742 movnez a0, a7, a3
1743 j 3b
1744
1745 /* ITLB entry. We only use dst in a6. */
1746
17471: witlb a6, a1
1748 isync
1749 j 4b
1750
1751
1752#endif // DCACHE_WAY_SIZE > PAGE_SIZE
1753
1754
5a0015d6
CZ
17552: /* Invalid PGD, default exception handling */
1756
01858d1b 1757 movi a3, exc_table
bc5378fc
MF
1758 rsr a1, depc
1759 xsr a3, excsave1
5a0015d6
CZ
1760 s32i a1, a2, PT_AREG2
1761 s32i a3, a2, PT_AREG3
1762 mov a1, a2
1763
bc5378fc 1764 rsr a2, ps
173d6681 1765 bbsi.l a2, PS_UM_BIT, 1f
5a0015d6
CZ
1766 j _kernel_exception
17671: j _user_exception
1768
d1538c46 1769ENDPROC(fast_second_level_miss)
5a0015d6
CZ
1770
1771/*
1772 * StoreProhibitedException
1773 *
1774 * Update the pte and invalidate the itlb mapping for this pte.
1775 *
1776 * Entry condition:
1777 *
1778 * a0: trashed, original value saved on stack (PT_AREG0)
1779 * a1: a1
1780 * a2: new stack pointer, original in DEPC
1781 * a3: dispatch table
1782 * depc: a2, original value saved on stack (PT_DEPC)
1783 * excsave_1: a3
1784 *
1785 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1786 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1787 */
1788
1789ENTRY(fast_store_prohibited)
1790
1791 /* Save a1 and a4. */
1792
1793 s32i a1, a2, PT_AREG1
1794 s32i a4, a2, PT_AREG4
1795
1796 GET_CURRENT(a1,a2)
1797 l32i a0, a1, TASK_MM # tsk->mm
1798 beqz a0, 9f
1799
bc5378fc 18008: rsr a1, excvaddr # fault address
5a0015d6
CZ
1801 _PGD_OFFSET(a0, a1, a4)
1802 l32i a0, a0, 0
5a0015d6
CZ
1803 beqz a0, 2f
1804
51fc41a9
MF
1805 /*
1806 * Note that we test _PAGE_WRITABLE_BIT only if PTE is present
1807 * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
1808 */
01858d1b 1809
5a0015d6
CZ
1810 _PTE_OFFSET(a0, a1, a4)
1811 l32i a4, a0, 0 # read pteval
51fc41a9
MF
1812 movi a1, _PAGE_CA_INVALID
1813 ball a4, a1, 2f
01858d1b 1814 bbci.l a4, _PAGE_WRITABLE_BIT, 2f
5a0015d6 1815
01858d1b 1816 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
5a0015d6 1817 or a4, a4, a1
bc5378fc 1818 rsr a1, excvaddr
5a0015d6
CZ
1819 s32i a4, a0, 0
1820
1821 /* We need to flush the cache if we have page coloring. */
1822#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
1823 dhwb a0, 0
1824#endif
1825 pdtlb a0, a1
5a0015d6 1826 wdtlb a4, a0
5a0015d6
CZ
1827
1828 /* Exit critical section. */
1829
1830 movi a0, 0
1831 s32i a0, a3, EXC_TABLE_FIXUP
1832
1833 /* Restore the working registers, and return. */
1834
1835 l32i a4, a2, PT_AREG4
1836 l32i a1, a2, PT_AREG1
1837 l32i a0, a2, PT_AREG0
1838 l32i a2, a2, PT_DEPC
1839
1840 /* Restore excsave1 and a3. */
1841
bc5378fc 1842 xsr a3, excsave1
5a0015d6
CZ
1843 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1844
bc5378fc 1845 rsr a2, depc
5a0015d6
CZ
1846 rfe
1847
1848 /* Double exception. Restore FIXUP handler and return. */
1849
bc5378fc 18501: xsr a2, depc
5a0015d6
CZ
1851 esync
1852 rfde
1853
18549: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1855 j 8b
1856
18572: /* If there was a problem, handle fault in C */
1858
bc5378fc
MF
1859 rsr a4, depc # still holds a2
1860 xsr a3, excsave1
5a0015d6
CZ
1861 s32i a4, a2, PT_AREG2
1862 s32i a3, a2, PT_AREG3
1863 l32i a4, a2, PT_AREG4
1864 mov a1, a2
1865
bc5378fc 1866 rsr a2, ps
173d6681 1867 bbsi.l a2, PS_UM_BIT, 1f
5a0015d6
CZ
1868 j _kernel_exception
18691: j _user_exception
d1538c46
CZ
1870
1871ENDPROC(fast_store_prohibited)
1872
e5083a63 1873#endif /* CONFIG_MMU */
5a0015d6 1874
fc4fb2ad
CZ
1875/*
1876 * System Calls.
1877 *
1878 * void system_call (struct pt_regs* regs, int exccause)
1879 * a2 a3
1880 */
1881
1882ENTRY(system_call)
d1538c46 1883
fc4fb2ad
CZ
1884 entry a1, 32
1885
1886 /* regs->syscall = regs->areg[2] */
1887
1888 l32i a3, a2, PT_AREG2
1889 mov a6, a2
1890 movi a4, do_syscall_trace_enter
1891 s32i a3, a2, PT_SYSCALL
1892 callx4 a4
1893
1894 /* syscall = sys_call_table[syscall_nr] */
1895
1896 movi a4, sys_call_table;
1897 movi a5, __NR_syscall_count
1898 movi a6, -ENOSYS
1899 bgeu a3, a5, 1f
1900
1901 addx4 a4, a3, a4
1902 l32i a4, a4, 0
1903 movi a5, sys_ni_syscall;
1904 beq a4, a5, 1f
1905
1906 /* Load args: arg0 - arg5 are passed via regs. */
1907
1908 l32i a6, a2, PT_AREG6
1909 l32i a7, a2, PT_AREG3
1910 l32i a8, a2, PT_AREG4
1911 l32i a9, a2, PT_AREG5
1912 l32i a10, a2, PT_AREG8
1913 l32i a11, a2, PT_AREG9
1914
1915 /* Pass one additional argument to the syscall: pt_regs (on stack) */
1916 s32i a2, a1, 0
1917
1918 callx4 a4
1919
19201: /* regs->areg[2] = return_value */
1921
1922 s32i a6, a2, PT_AREG2
1923 movi a4, do_syscall_trace_leave
1924 mov a6, a2
1925 callx4 a4
1926 retw
1927
d1538c46
CZ
1928ENDPROC(system_call)
1929
fc4fb2ad 1930
5a0015d6
CZ
1931/*
1932 * Task switch.
1933 *
1934 * struct task* _switch_to (struct task* prev, struct task* next)
1935 * a2 a2 a3
1936 */
1937
1938ENTRY(_switch_to)
1939
1940 entry a1, 16
1941
c658eac6
CZ
1942 mov a12, a2 # preserve 'prev' (a2)
1943 mov a13, a3 # and 'next' (a3)
5a0015d6 1944
c658eac6
CZ
1945 l32i a4, a2, TASK_THREAD_INFO
1946 l32i a5, a3, TASK_THREAD_INFO
5a0015d6 1947
c658eac6 1948 save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
5a0015d6 1949
c658eac6
CZ
1950 s32i a0, a12, THREAD_RA # save return address
1951 s32i a1, a12, THREAD_SP # save stack pointer
1952
1953 /* Disable ints while we manipulate the stack pointer. */
1954
1955 movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
bc5378fc
MF
1956 xsr a14, ps
1957 rsr a3, excsave1
5a0015d6
CZ
1958 rsync
1959 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
1960
c658eac6
CZ
1961 /* Switch CPENABLE */
1962
1963#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
1964 l32i a3, a5, THREAD_CPENABLE
bc5378fc 1965 xsr a3, cpenable
c658eac6
CZ
1966 s32i a3, a4, THREAD_CPENABLE
1967#endif
1968
1969 /* Flush register file. */
1970
1971 call0 _spill_registers # destroys a3, a4, and SAR
5a0015d6
CZ
1972
1973 /* Set kernel stack (and leave critical section)
1974 * Note: It's save to set it here. The stack will not be overwritten
1975 * because the kernel stack will only be loaded again after
1976 * we return from kernel space.
1977 */
1978
bc5378fc 1979 rsr a3, excsave1 # exc_table
c658eac6
CZ
1980 movi a6, 0
1981 addi a7, a5, PT_REGS_OFFSET
1982 s32i a6, a3, EXC_TABLE_FIXUP
1983 s32i a7, a3, EXC_TABLE_KSTK
5a0015d6 1984
c50842df 1985 /* restore context of the task 'next' */
5a0015d6 1986
c658eac6
CZ
1987 l32i a0, a13, THREAD_RA # restore return address
1988 l32i a1, a13, THREAD_SP # restore stack pointer
1989
1990 load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
5a0015d6 1991
bc5378fc 1992 wsr a14, ps
c658eac6 1993 mov a2, a12 # return 'prev'
5a0015d6
CZ
1994 rsync
1995
1996 retw
1997
d1538c46 1998ENDPROC(_switch_to)
5a0015d6
CZ
1999
2000ENTRY(ret_from_fork)
2001
2002 /* void schedule_tail (struct task_struct *prev)
2003 * Note: prev is still in a6 (return value from fake call4 frame)
2004 */
2005 movi a4, schedule_tail
2006 callx4 a4
2007
fc4fb2ad
CZ
2008 movi a4, do_syscall_trace_leave
2009 mov a6, a1
5a0015d6
CZ
2010 callx4 a4
2011
2012 j common_exception_return
2013
d1538c46
CZ
2014ENDPROC(ret_from_fork)
2015
3306a726
MF
2016/*
2017 * Kernel thread creation helper
2018 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
2019 * left from _switch_to: a6 = prev
2020 */
2021ENTRY(ret_from_kernel_thread)
2022
2023 call4 schedule_tail
2024 mov a6, a3
2025 callx4 a2
f0a1bf08 2026 j common_exception_return
3306a726
MF
2027
2028ENDPROC(ret_from_kernel_thread)
This page took 1.082759 seconds and 5 git commands to generate.