Merge tag 'vfio-v4.5-rc1' of git://github.com/awilliam/linux-vfio
[deliverable/linux.git] / arch / h8300 / kernel / entry.S
1 /*
2 *
3 * linux/arch/h8300/kernel/entry.S
4 *
5 * Yoshinori Sato <ysato@users.sourceforge.jp>
6 * David McCullough <davidm@snapgear.com>
7 *
8 */
9
10 /*
11 * entry.S
12 * include exception/interrupt gateway
13 * system call entry
14 */
15
16 #include <linux/sys.h>
17 #include <asm/unistd.h>
18 #include <asm/setup.h>
19 #include <asm/segment.h>
20 #include <asm/linkage.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/thread_info.h>
23 #include <asm/errno.h>
24
25 #if defined(CONFIG_CPU_H8300H)
26 #define USERRET 8
27 INTERRUPTS = 64
28 .h8300h
29 .macro SHLL2 reg
30 shll.l \reg
31 shll.l \reg
32 .endm
33 .macro SHLR2 reg
34 shlr.l \reg
35 shlr.l \reg
36 .endm
37 .macro SAVEREGS
38 mov.l er0,@-sp
39 mov.l er1,@-sp
40 mov.l er2,@-sp
41 mov.l er3,@-sp
42 .endm
43 .macro RESTOREREGS
44 mov.l @sp+,er3
45 mov.l @sp+,er2
46 .endm
47 .macro SAVEEXR
48 .endm
49 .macro RESTOREEXR
50 .endm
51 #endif
52 #if defined(CONFIG_CPU_H8S)
53 #define USERRET 10
54 #define USEREXR 8
55 INTERRUPTS = 128
56 .h8300s
57 .macro SHLL2 reg
58 shll.l #2,\reg
59 .endm
60 .macro SHLR2 reg
61 shlr.l #2,\reg
62 .endm
63 .macro SAVEREGS
64 stm.l er0-er3,@-sp
65 .endm
66 .macro RESTOREREGS
67 ldm.l @sp+,er2-er3
68 .endm
69 .macro SAVEEXR
70 mov.w @(USEREXR:16,er0),r1
71 mov.w r1,@(LEXR-LER3:16,sp) /* copy EXR */
72 .endm
73 .macro RESTOREEXR
74 mov.w @(LEXR-LER1:16,sp),r1 /* restore EXR */
75 mov.b r1l,r1h
76 mov.w r1,@(USEREXR:16,er0)
77 .endm
78 #endif
79
80
81 /* CPU context save/restore macros. */
82
83 .macro SAVE_ALL
84 mov.l er0,@-sp
85 stc ccr,r0l /* check kernel mode */
86 btst #4,r0l
87 bne 5f
88
89 /* user mode */
90 mov.l sp,@_sw_usp
91 mov.l @sp,er0 /* restore saved er0 */
92 orc #0x10,ccr /* switch kernel stack */
93 mov.l @_sw_ksp,sp
94 sub.l #(LRET-LORIG),sp /* allocate LORIG - LRET */
95 SAVEREGS
96 mov.l @_sw_usp,er0
97 mov.l @(USERRET:16,er0),er1 /* copy the RET addr */
98 mov.l er1,@(LRET-LER3:16,sp)
99 SAVEEXR
100
101 mov.l @(LORIG-LER3:16,sp),er0
102 mov.l er0,@(LER0-LER3:16,sp) /* copy ER0 */
103 mov.w e1,r1 /* e1 highbyte = ccr */
104 and #0xef,r1h /* mask mode? flag */
105 bra 6f
106 5:
107 /* kernel mode */
108 mov.l @sp,er0 /* restore saved er0 */
109 subs #2,sp /* set dummy ccr */
110 subs #4,sp /* set dummp sp */
111 SAVEREGS
112 mov.w @(LRET-LER3:16,sp),r1 /* copy old ccr */
113 6:
114 mov.b r1h,r1l
115 mov.b #0,r1h
116 mov.w r1,@(LCCR-LER3:16,sp) /* set ccr */
117 mov.l @_sw_usp,er2
118 mov.l er2,@(LSP-LER3:16,sp) /* set usp */
119 mov.l er6,@-sp /* syscall arg #6 */
120 mov.l er5,@-sp /* syscall arg #5 */
121 mov.l er4,@-sp /* syscall arg #4 */
122 .endm /* r1 = ccr */
123
124 .macro RESTORE_ALL
125 mov.l @sp+,er4
126 mov.l @sp+,er5
127 mov.l @sp+,er6
128 RESTOREREGS
129 mov.w @(LCCR-LER1:16,sp),r0 /* check kernel mode */
130 btst #4,r0l
131 bne 7f
132
133 orc #0xc0,ccr
134 mov.l @(LSP-LER1:16,sp),er0
135 mov.l @(LER0-LER1:16,sp),er1 /* restore ER0 */
136 mov.l er1,@er0
137 RESTOREEXR
138 mov.w @(LCCR-LER1:16,sp),r1 /* restore the RET addr */
139 mov.b r1l,r1h
140 mov.b @(LRET+1-LER1:16,sp),r1l
141 mov.w r1,e1
142 mov.w @(LRET+2-LER1:16,sp),r1
143 mov.l er1,@(USERRET:16,er0)
144
145 mov.l @sp+,er1
146 add.l #(LRET-LER1),sp /* remove LORIG - LRET */
147 mov.l sp,@_sw_ksp
148 andc #0xef,ccr /* switch to user mode */
149 mov.l er0,sp
150 bra 8f
151 7:
152 mov.l @sp+,er1
153 add.l #10,sp
154 8:
155 mov.l @sp+,er0
156 adds #4,sp /* remove the sw created LVEC */
157 rte
158 .endm
159
160 .globl _system_call
161 .globl ret_from_exception
162 .globl ret_from_fork
163 .globl ret_from_kernel_thread
164 .globl ret_from_interrupt
165 .globl _interrupt_redirect_table
166 .globl _sw_ksp,_sw_usp
167 .globl _resume
168 .globl _interrupt_entry
169 .globl _trace_break
170 .globl _nmi
171
172 #if defined(CONFIG_ROMKERNEL)
173 .section .int_redirect,"ax"
174 _interrupt_redirect_table:
175 #if defined(CONFIG_CPU_H8300H)
176 .rept 7
177 .long 0
178 .endr
179 #endif
180 #if defined(CONFIG_CPU_H8S)
181 .rept 5
182 .long 0
183 .endr
184 jmp @_trace_break
185 .long 0
186 #endif
187
188 jsr @_interrupt_entry /* NMI */
189 jmp @_system_call /* TRAPA #0 (System call) */
190 .long 0
191 .long 0
192 jmp @_trace_break /* TRAPA #3 (breakpoint) */
193 .rept INTERRUPTS-12
194 jsr @_interrupt_entry
195 .endr
196 #endif
197 #if defined(CONFIG_RAMKERNEL)
198 .globl _interrupt_redirect_table
199 .section .bss
200 _interrupt_redirect_table:
201 .space 4
202 #endif
203
204 .section .text
205 .align 2
206 _interrupt_entry:
207 SAVE_ALL
208 /* r1l is saved ccr */
209 mov.l sp,er0
210 add.l #LVEC,er0
211 btst #4,r1l
212 bne 1f
213 /* user LVEC */
214 mov.l @_sw_usp,er0
215 adds #4,er0
216 1:
217 mov.l @er0,er0 /* LVEC address */
218 #if defined(CONFIG_ROMKERNEL)
219 sub.l #_interrupt_redirect_table,er0
220 #endif
221 #if defined(CONFIG_RAMKERNEL)
222 mov.l @_interrupt_redirect_table,er1
223 sub.l er1,er0
224 #endif
225 SHLR2 er0
226 dec.l #1,er0
227 mov.l sp,er1
228 subs #4,er1 /* adjust ret_pc */
229 #if defined(CONFIG_CPU_H8S)
230 orc #7,exr
231 #endif
232 jsr @do_IRQ
233 jmp @ret_from_interrupt
234
235 _system_call:
236 subs #4,sp /* dummy LVEC */
237 SAVE_ALL
238 /* er0: syscall nr */
239 andc #0xbf,ccr
240 mov.l er0,er4
241
242 /* save top of frame */
243 mov.l sp,er0
244 jsr @set_esp0
245 mov.l sp,er2
246 and.w #0xe000,r2
247 mov.l @(TI_FLAGS:16,er2),er2
248 and.w #_TIF_WORK_SYSCALL_MASK,r2
249 beq 1f
250 mov.l sp,er0
251 jsr @do_syscall_trace_enter
252 1:
253 cmp.l #__NR_syscalls,er4
254 bcc badsys
255 SHLL2 er4
256 mov.l #_sys_call_table,er0
257 add.l er4,er0
258 mov.l @er0,er4
259 beq ret_from_exception:16
260 mov.l @(LER1:16,sp),er0
261 mov.l @(LER2:16,sp),er1
262 mov.l @(LER3:16,sp),er2
263 jsr @er4
264 mov.l er0,@(LER0:16,sp) /* save the return value */
265 mov.l sp,er2
266 and.w #0xe000,r2
267 mov.l @(TI_FLAGS:16,er2),er2
268 and.w #_TIF_WORK_SYSCALL_MASK,r2
269 beq 2f
270 mov.l sp,er0
271 jsr @do_syscall_trace_leave
272 2:
273 orc #0xc0,ccr
274 bra resume_userspace
275
276 badsys:
277 mov.l #-ENOSYS,er0
278 mov.l er0,@(LER0:16,sp)
279 bra resume_userspace
280
281 #if !defined(CONFIG_PREEMPT)
282 #define resume_kernel restore_all
283 #endif
284
285 ret_from_exception:
286 #if defined(CONFIG_PREEMPT)
287 orc #0xc0,ccr
288 #endif
289 ret_from_interrupt:
290 mov.b @(LCCR+1:16,sp),r0l
291 btst #4,r0l
292 bne resume_kernel:16 /* return from kernel */
293 resume_userspace:
294 andc #0xbf,ccr
295 mov.l sp,er4
296 and.w #0xe000,r4 /* er4 <- current thread info */
297 mov.l @(TI_FLAGS:16,er4),er1
298 and.l #_TIF_WORK_MASK,er1
299 beq restore_all:8
300 work_pending:
301 btst #TIF_NEED_RESCHED,r1l
302 bne work_resched:8
303 /* work notifysig */
304 mov.l sp,er0
305 subs #4,er0 /* er0: pt_regs */
306 jsr @do_notify_resume
307 bra resume_userspace:8
308 work_resched:
309 mov.l sp,er0
310 jsr @set_esp0
311 jsr @schedule
312 bra resume_userspace:8
313 restore_all:
314 RESTORE_ALL /* Does RTE */
315
316 #if defined(CONFIG_PREEMPT)
317 resume_kernel:
318 mov.l @(TI_PRE_COUNT:16,er4),er0
319 bne restore_all:8
320 need_resched:
321 mov.l @(TI_FLAGS:16,er4),er0
322 btst #TIF_NEED_RESCHED,r0l
323 beq restore_all:8
324 mov.b @(LCCR+1:16,sp),r0l /* Interrupt Enabled? */
325 bmi restore_all:8
326 mov.l sp,er0
327 jsr @set_esp0
328 jsr @preempt_schedule_irq
329 bra need_resched:8
330 #endif
331
332 ret_from_fork:
333 mov.l er2,er0
334 jsr @schedule_tail
335 jmp @ret_from_exception
336
337 ret_from_kernel_thread:
338 mov.l er2,er0
339 jsr @schedule_tail
340 mov.l @(LER4:16,sp),er0
341 mov.l @(LER5:16,sp),er1
342 jsr @er1
343 jmp @ret_from_exception
344
345 _resume:
346 /*
347 * Beware - when entering resume, offset of tss is in d1,
348 * prev (the current task) is in a0, next (the new task)
349 * is in a1 and d2.b is non-zero if the mm structure is
350 * shared between the tasks, so don't change these
351 * registers until their contents are no longer needed.
352 */
353
354 /* save sr */
355 sub.w r3,r3
356 stc ccr,r3l
357 mov.w r3,@(THREAD_CCR+2:16,er0)
358
359 /* disable interrupts */
360 orc #0xc0,ccr
361 mov.l @_sw_usp,er3
362 mov.l er3,@(THREAD_USP:16,er0)
363 mov.l sp,@(THREAD_KSP:16,er0)
364
365 /* Skip address space switching if they are the same. */
366 /* FIXME: what did we hack out of here, this does nothing! */
367
368 mov.l @(THREAD_USP:16,er1),er0
369 mov.l er0,@_sw_usp
370 mov.l @(THREAD_KSP:16,er1),sp
371
372 /* restore status register */
373 mov.w @(THREAD_CCR+2:16,er1),r3
374
375 ldc r3l,ccr
376 rts
377
378 _trace_break:
379 subs #4,sp
380 SAVE_ALL
381 sub.l er1,er1
382 dec.l #1,er1
383 mov.l er1,@(LORIG,sp)
384 mov.l sp,er0
385 jsr @set_esp0
386 mov.l @_sw_usp,er0
387 mov.l @er0,er1
388 mov.w @(-2:16,er1),r2
389 cmp.w #0x5730,r2
390 beq 1f
391 subs #2,er1
392 mov.l er1,@er0
393 1:
394 and.w #0xff,e1
395 mov.l er1,er0
396 jsr @trace_trap
397 jmp @ret_from_exception
398
399 _nmi:
400 subs #4, sp
401 mov.l er0, @-sp
402 mov.l @_interrupt_redirect_table, er0
403 add.l #8*4, er0
404 mov.l er0, @(4,sp)
405 mov.l @sp+, er0
406 jmp @_interrupt_entry
407
408 .section .bss
409 _sw_ksp:
410 .space 4
411 _sw_usp:
412 .space 4
413
414 .end
This page took 0.073838 seconds and 5 git commands to generate.