ba22ec8fab54e18c632a13839cc5c8c5180c5da8
[deliverable/linux.git] / arch / i386 / kernel / entry.S
1 /*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 /*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
14 *
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
16 * on a 486.
17 *
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
23 *
24 * 0(%esp) - %ebx
25 * 4(%esp) - %ecx
26 * 8(%esp) - %edx
27 * C(%esp) - %esi
28 * 10(%esp) - %edi
29 * 14(%esp) - %ebp
30 * 18(%esp) - %eax
31 * 1C(%esp) - %ds
32 * 20(%esp) - %es
33 * 24(%esp) - orig_eax
34 * 28(%esp) - %eip
35 * 2C(%esp) - %cs
36 * 30(%esp) - %eflags
37 * 34(%esp) - %oldesp
38 * 38(%esp) - %oldss
39 *
40 * "current" is in register %ebx during any slow entries.
41 */
42
43 #include <linux/linkage.h>
44 #include <asm/thread_info.h>
45 #include <asm/irqflags.h>
46 #include <asm/errno.h>
47 #include <asm/segment.h>
48 #include <asm/smp.h>
49 #include <asm/page.h>
50 #include <asm/desc.h>
51 #include <asm/dwarf2.h>
52 #include "irq_vectors.h"
53
54 #define nr_syscalls ((syscall_table_size)/4)
55
56 EBX = 0x00
57 ECX = 0x04
58 EDX = 0x08
59 ESI = 0x0C
60 EDI = 0x10
61 EBP = 0x14
62 EAX = 0x18
63 DS = 0x1C
64 ES = 0x20
65 ORIG_EAX = 0x24
66 EIP = 0x28
67 CS = 0x2C
68 EFLAGS = 0x30
69 OLDESP = 0x34
70 OLDSS = 0x38
71
72 CF_MASK = 0x00000001
73 TF_MASK = 0x00000100
74 IF_MASK = 0x00000200
75 DF_MASK = 0x00000400
76 NT_MASK = 0x00004000
77 VM_MASK = 0x00020000
78
79 #ifdef CONFIG_PREEMPT
80 #define preempt_stop cli; TRACE_IRQS_OFF
81 #else
82 #define preempt_stop
83 #define resume_kernel restore_nocheck
84 #endif
85
86 .macro TRACE_IRQS_IRET
87 #ifdef CONFIG_TRACE_IRQFLAGS
88 testl $IF_MASK,EFLAGS(%esp) # interrupts off?
89 jz 1f
90 TRACE_IRQS_ON
91 1:
92 #endif
93 .endm
94
95 #ifdef CONFIG_VM86
96 #define resume_userspace_sig check_userspace
97 #else
98 #define resume_userspace_sig resume_userspace
99 #endif
100
101 #define SAVE_ALL \
102 cld; \
103 pushl %es; \
104 CFI_ADJUST_CFA_OFFSET 4;\
105 /*CFI_REL_OFFSET es, 0;*/\
106 pushl %ds; \
107 CFI_ADJUST_CFA_OFFSET 4;\
108 /*CFI_REL_OFFSET ds, 0;*/\
109 pushl %eax; \
110 CFI_ADJUST_CFA_OFFSET 4;\
111 CFI_REL_OFFSET eax, 0;\
112 pushl %ebp; \
113 CFI_ADJUST_CFA_OFFSET 4;\
114 CFI_REL_OFFSET ebp, 0;\
115 pushl %edi; \
116 CFI_ADJUST_CFA_OFFSET 4;\
117 CFI_REL_OFFSET edi, 0;\
118 pushl %esi; \
119 CFI_ADJUST_CFA_OFFSET 4;\
120 CFI_REL_OFFSET esi, 0;\
121 pushl %edx; \
122 CFI_ADJUST_CFA_OFFSET 4;\
123 CFI_REL_OFFSET edx, 0;\
124 pushl %ecx; \
125 CFI_ADJUST_CFA_OFFSET 4;\
126 CFI_REL_OFFSET ecx, 0;\
127 pushl %ebx; \
128 CFI_ADJUST_CFA_OFFSET 4;\
129 CFI_REL_OFFSET ebx, 0;\
130 movl $(__USER_DS), %edx; \
131 movl %edx, %ds; \
132 movl %edx, %es;
133
134 #define RESTORE_INT_REGS \
135 popl %ebx; \
136 CFI_ADJUST_CFA_OFFSET -4;\
137 CFI_RESTORE ebx;\
138 popl %ecx; \
139 CFI_ADJUST_CFA_OFFSET -4;\
140 CFI_RESTORE ecx;\
141 popl %edx; \
142 CFI_ADJUST_CFA_OFFSET -4;\
143 CFI_RESTORE edx;\
144 popl %esi; \
145 CFI_ADJUST_CFA_OFFSET -4;\
146 CFI_RESTORE esi;\
147 popl %edi; \
148 CFI_ADJUST_CFA_OFFSET -4;\
149 CFI_RESTORE edi;\
150 popl %ebp; \
151 CFI_ADJUST_CFA_OFFSET -4;\
152 CFI_RESTORE ebp;\
153 popl %eax; \
154 CFI_ADJUST_CFA_OFFSET -4;\
155 CFI_RESTORE eax
156
157 #define RESTORE_REGS \
158 RESTORE_INT_REGS; \
159 1: popl %ds; \
160 CFI_ADJUST_CFA_OFFSET -4;\
161 /*CFI_RESTORE ds;*/\
162 2: popl %es; \
163 CFI_ADJUST_CFA_OFFSET -4;\
164 /*CFI_RESTORE es;*/\
165 .section .fixup,"ax"; \
166 3: movl $0,(%esp); \
167 jmp 1b; \
168 4: movl $0,(%esp); \
169 jmp 2b; \
170 .previous; \
171 .section __ex_table,"a";\
172 .align 4; \
173 .long 1b,3b; \
174 .long 2b,4b; \
175 .previous
176
177 #define RING0_INT_FRAME \
178 CFI_STARTPROC simple;\
179 CFI_DEF_CFA esp, 3*4;\
180 /*CFI_OFFSET cs, -2*4;*/\
181 CFI_OFFSET eip, -3*4
182
183 #define RING0_EC_FRAME \
184 CFI_STARTPROC simple;\
185 CFI_DEF_CFA esp, 4*4;\
186 /*CFI_OFFSET cs, -2*4;*/\
187 CFI_OFFSET eip, -3*4
188
189 #define RING0_PTREGS_FRAME \
190 CFI_STARTPROC simple;\
191 CFI_DEF_CFA esp, OLDESP-EBX;\
192 /*CFI_OFFSET cs, CS-OLDESP;*/\
193 CFI_OFFSET eip, EIP-OLDESP;\
194 /*CFI_OFFSET es, ES-OLDESP;*/\
195 /*CFI_OFFSET ds, DS-OLDESP;*/\
196 CFI_OFFSET eax, EAX-OLDESP;\
197 CFI_OFFSET ebp, EBP-OLDESP;\
198 CFI_OFFSET edi, EDI-OLDESP;\
199 CFI_OFFSET esi, ESI-OLDESP;\
200 CFI_OFFSET edx, EDX-OLDESP;\
201 CFI_OFFSET ecx, ECX-OLDESP;\
202 CFI_OFFSET ebx, EBX-OLDESP
203
204 ENTRY(ret_from_fork)
205 CFI_STARTPROC
206 pushl %eax
207 CFI_ADJUST_CFA_OFFSET 4
208 call schedule_tail
209 GET_THREAD_INFO(%ebp)
210 popl %eax
211 CFI_ADJUST_CFA_OFFSET -4
212 pushl $0x0202 # Reset kernel eflags
213 CFI_ADJUST_CFA_OFFSET 4
214 popfl
215 CFI_ADJUST_CFA_OFFSET -4
216 jmp syscall_exit
217 CFI_ENDPROC
218
219 /*
220 * Return to user mode is not as complex as all this looks,
221 * but we want the default path for a system call return to
222 * go as quickly as possible which is why some of this is
223 * less clear than it otherwise should be.
224 */
225
226 # userspace resumption stub bypassing syscall exit tracing
227 ALIGN
228 RING0_PTREGS_FRAME
229 ret_from_exception:
230 preempt_stop
231 ret_from_intr:
232 GET_THREAD_INFO(%ebp)
233 check_userspace:
234 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
235 movb CS(%esp), %al
236 testl $(VM_MASK | 3), %eax
237 jz resume_kernel
238 ENTRY(resume_userspace)
239 cli # make sure we don't miss an interrupt
240 # setting need_resched or sigpending
241 # between sampling and the iret
242 movl TI_flags(%ebp), %ecx
243 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
244 # int/exception return?
245 jne work_pending
246 jmp restore_all
247
248 #ifdef CONFIG_PREEMPT
249 ENTRY(resume_kernel)
250 cli
251 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
252 jnz restore_nocheck
253 need_resched:
254 movl TI_flags(%ebp), %ecx # need_resched set ?
255 testb $_TIF_NEED_RESCHED, %cl
256 jz restore_all
257 testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
258 jz restore_all
259 call preempt_schedule_irq
260 jmp need_resched
261 #endif
262 CFI_ENDPROC
263
264 /* SYSENTER_RETURN points to after the "sysenter" instruction in
265 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
266
267 # sysenter call handler stub
268 ENTRY(sysenter_entry)
269 CFI_STARTPROC simple
270 CFI_DEF_CFA esp, 0
271 CFI_REGISTER esp, ebp
272 movl TSS_sysenter_esp0(%esp),%esp
273 sysenter_past_esp:
274 /*
275 * No need to follow this irqs on/off section: the syscall
276 * disabled irqs and here we enable it straight after entry:
277 */
278 sti
279 pushl $(__USER_DS)
280 CFI_ADJUST_CFA_OFFSET 4
281 /*CFI_REL_OFFSET ss, 0*/
282 pushl %ebp
283 CFI_ADJUST_CFA_OFFSET 4
284 CFI_REL_OFFSET esp, 0
285 pushfl
286 CFI_ADJUST_CFA_OFFSET 4
287 pushl $(__USER_CS)
288 CFI_ADJUST_CFA_OFFSET 4
289 /*CFI_REL_OFFSET cs, 0*/
290 /*
291 * Push current_thread_info()->sysenter_return to the stack.
292 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
293 * pushed above; +8 corresponds to copy_thread's esp0 setting.
294 */
295 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
296 CFI_ADJUST_CFA_OFFSET 4
297 CFI_REL_OFFSET eip, 0
298
299 /*
300 * Load the potential sixth argument from user stack.
301 * Careful about security.
302 */
303 cmpl $__PAGE_OFFSET-3,%ebp
304 jae syscall_fault
305 1: movl (%ebp),%ebp
306 .section __ex_table,"a"
307 .align 4
308 .long 1b,syscall_fault
309 .previous
310
311 pushl %eax
312 CFI_ADJUST_CFA_OFFSET 4
313 SAVE_ALL
314 GET_THREAD_INFO(%ebp)
315
316 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
317 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
318 jnz syscall_trace_entry
319 cmpl $(nr_syscalls), %eax
320 jae syscall_badsys
321 call *sys_call_table(,%eax,4)
322 movl %eax,EAX(%esp)
323 cli
324 TRACE_IRQS_OFF
325 movl TI_flags(%ebp), %ecx
326 testw $_TIF_ALLWORK_MASK, %cx
327 jne syscall_exit_work
328 /* if something modifies registers it must also disable sysexit */
329 movl EIP(%esp), %edx
330 movl OLDESP(%esp), %ecx
331 xorl %ebp,%ebp
332 TRACE_IRQS_ON
333 sti
334 sysexit
335 CFI_ENDPROC
336
337
338 # system call handler stub
339 ENTRY(system_call)
340 RING0_INT_FRAME # can't unwind into user space anyway
341 pushl %eax # save orig_eax
342 CFI_ADJUST_CFA_OFFSET 4
343 SAVE_ALL
344 GET_THREAD_INFO(%ebp)
345 testl $TF_MASK,EFLAGS(%esp)
346 jz no_singlestep
347 orl $_TIF_SINGLESTEP,TI_flags(%ebp)
348 no_singlestep:
349 # system call tracing in operation / emulation
350 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
351 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
352 jnz syscall_trace_entry
353 cmpl $(nr_syscalls), %eax
354 jae syscall_badsys
355 syscall_call:
356 call *sys_call_table(,%eax,4)
357 movl %eax,EAX(%esp) # store the return value
358 syscall_exit:
359 cli # make sure we don't miss an interrupt
360 # setting need_resched or sigpending
361 # between sampling and the iret
362 TRACE_IRQS_OFF
363 movl TI_flags(%ebp), %ecx
364 testw $_TIF_ALLWORK_MASK, %cx # current->work
365 jne syscall_exit_work
366
367 restore_all:
368 movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
369 # Warning: OLDSS(%esp) contains the wrong/random values if we
370 # are returning to the kernel.
371 # See comments in process.c:copy_thread() for details.
372 movb OLDSS(%esp), %ah
373 movb CS(%esp), %al
374 andl $(VM_MASK | (4 << 8) | 3), %eax
375 cmpl $((4 << 8) | 3), %eax
376 CFI_REMEMBER_STATE
377 je ldt_ss # returning to user-space with LDT SS
378 restore_nocheck:
379 TRACE_IRQS_IRET
380 restore_nocheck_notrace:
381 RESTORE_REGS
382 addl $4, %esp
383 CFI_ADJUST_CFA_OFFSET -4
384 1: iret
385 .section .fixup,"ax"
386 iret_exc:
387 TRACE_IRQS_ON
388 sti
389 pushl $0 # no error code
390 pushl $do_iret_error
391 jmp error_code
392 .previous
393 .section __ex_table,"a"
394 .align 4
395 .long 1b,iret_exc
396 .previous
397
398 CFI_RESTORE_STATE
399 ldt_ss:
400 larl OLDSS(%esp), %eax
401 jnz restore_nocheck
402 testl $0x00400000, %eax # returning to 32bit stack?
403 jnz restore_nocheck # allright, normal return
404 /* If returning to userspace with 16bit stack,
405 * try to fix the higher word of ESP, as the CPU
406 * won't restore it.
407 * This is an "official" bug of all the x86-compatible
408 * CPUs, which we can try to work around to make
409 * dosemu and wine happy. */
410 subl $8, %esp # reserve space for switch16 pointer
411 CFI_ADJUST_CFA_OFFSET 8
412 cli
413 TRACE_IRQS_OFF
414 movl %esp, %eax
415 /* Set up the 16bit stack frame with switch32 pointer on top,
416 * and a switch16 pointer on top of the current frame. */
417 call setup_x86_bogus_stack
418 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
419 TRACE_IRQS_IRET
420 RESTORE_REGS
421 lss 20+4(%esp), %esp # switch to 16bit stack
422 1: iret
423 .section __ex_table,"a"
424 .align 4
425 .long 1b,iret_exc
426 .previous
427 CFI_ENDPROC
428
429 # perform work that needs to be done immediately before resumption
430 ALIGN
431 RING0_PTREGS_FRAME # can't unwind into user space anyway
432 work_pending:
433 testb $_TIF_NEED_RESCHED, %cl
434 jz work_notifysig
435 work_resched:
436 call schedule
437 cli # make sure we don't miss an interrupt
438 # setting need_resched or sigpending
439 # between sampling and the iret
440 TRACE_IRQS_OFF
441 movl TI_flags(%ebp), %ecx
442 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
443 # than syscall tracing?
444 jz restore_all
445 testb $_TIF_NEED_RESCHED, %cl
446 jnz work_resched
447
448 work_notifysig: # deal with pending signals and
449 # notify-resume requests
450 testl $VM_MASK, EFLAGS(%esp)
451 movl %esp, %eax
452 jne work_notifysig_v86 # returning to kernel-space or
453 # vm86-space
454 xorl %edx, %edx
455 call do_notify_resume
456 jmp resume_userspace_sig
457
458 ALIGN
459 work_notifysig_v86:
460 #ifdef CONFIG_VM86
461 pushl %ecx # save ti_flags for do_notify_resume
462 CFI_ADJUST_CFA_OFFSET 4
463 call save_v86_state # %eax contains pt_regs pointer
464 popl %ecx
465 CFI_ADJUST_CFA_OFFSET -4
466 movl %eax, %esp
467 xorl %edx, %edx
468 call do_notify_resume
469 jmp resume_userspace_sig
470 #endif
471
472 # perform syscall exit tracing
473 ALIGN
474 syscall_trace_entry:
475 movl $-ENOSYS,EAX(%esp)
476 movl %esp, %eax
477 xorl %edx,%edx
478 call do_syscall_trace
479 cmpl $0, %eax
480 jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
481 # so must skip actual syscall
482 movl ORIG_EAX(%esp), %eax
483 cmpl $(nr_syscalls), %eax
484 jnae syscall_call
485 jmp syscall_exit
486
487 # perform syscall exit tracing
488 ALIGN
489 syscall_exit_work:
490 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
491 jz work_pending
492 TRACE_IRQS_ON
493 sti # could let do_syscall_trace() call
494 # schedule() instead
495 movl %esp, %eax
496 movl $1, %edx
497 call do_syscall_trace
498 jmp resume_userspace
499 CFI_ENDPROC
500
501 RING0_INT_FRAME # can't unwind into user space anyway
502 syscall_fault:
503 pushl %eax # save orig_eax
504 CFI_ADJUST_CFA_OFFSET 4
505 SAVE_ALL
506 GET_THREAD_INFO(%ebp)
507 movl $-EFAULT,EAX(%esp)
508 jmp resume_userspace
509
510 syscall_badsys:
511 movl $-ENOSYS,EAX(%esp)
512 jmp resume_userspace
513 CFI_ENDPROC
514
515 #define FIXUP_ESPFIX_STACK \
516 movl %esp, %eax; \
517 /* switch to 32bit stack using the pointer on top of 16bit stack */ \
518 lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
519 /* copy data from 16bit stack to 32bit stack */ \
520 call fixup_x86_bogus_stack; \
521 /* put ESP to the proper location */ \
522 movl %eax, %esp;
523 #define UNWIND_ESPFIX_STACK \
524 pushl %eax; \
525 CFI_ADJUST_CFA_OFFSET 4; \
526 movl %ss, %eax; \
527 /* see if on 16bit stack */ \
528 cmpw $__ESPFIX_SS, %ax; \
529 je 28f; \
530 27: popl %eax; \
531 CFI_ADJUST_CFA_OFFSET -4; \
532 .section .fixup,"ax"; \
533 28: movl $__KERNEL_DS, %eax; \
534 movl %eax, %ds; \
535 movl %eax, %es; \
536 /* switch to 32bit stack */ \
537 FIXUP_ESPFIX_STACK; \
538 jmp 27b; \
539 .previous
540
541 /*
542 * Build the entry stubs and pointer table with
543 * some assembler magic.
544 */
545 .data
546 ENTRY(interrupt)
547 .text
548
549 vector=0
550 ENTRY(irq_entries_start)
551 RING0_INT_FRAME
552 .rept NR_IRQS
553 ALIGN
554 .if vector
555 CFI_ADJUST_CFA_OFFSET -4
556 .endif
557 1: pushl $~(vector)
558 CFI_ADJUST_CFA_OFFSET 4
559 jmp common_interrupt
560 .data
561 .long 1b
562 .text
563 vector=vector+1
564 .endr
565
566 /*
567 * the CPU automatically disables interrupts when executing an IRQ vector,
568 * so IRQ-flags tracing has to follow that:
569 */
570 ALIGN
571 common_interrupt:
572 SAVE_ALL
573 TRACE_IRQS_OFF
574 movl %esp,%eax
575 call do_IRQ
576 jmp ret_from_intr
577 CFI_ENDPROC
578
579 #define BUILD_INTERRUPT(name, nr) \
580 ENTRY(name) \
581 RING0_INT_FRAME; \
582 pushl $~(nr); \
583 CFI_ADJUST_CFA_OFFSET 4; \
584 SAVE_ALL; \
585 TRACE_IRQS_OFF \
586 movl %esp,%eax; \
587 call smp_/**/name; \
588 jmp ret_from_intr; \
589 CFI_ENDPROC
590
591 /* The include is where all of the SMP etc. interrupts come from */
592 #include "entry_arch.h"
593
594 KPROBE_ENTRY(page_fault)
595 RING0_EC_FRAME
596 pushl $do_page_fault
597 CFI_ADJUST_CFA_OFFSET 4
598 ALIGN
599 error_code:
600 pushl %ds
601 CFI_ADJUST_CFA_OFFSET 4
602 /*CFI_REL_OFFSET ds, 0*/
603 pushl %eax
604 CFI_ADJUST_CFA_OFFSET 4
605 CFI_REL_OFFSET eax, 0
606 xorl %eax, %eax
607 pushl %ebp
608 CFI_ADJUST_CFA_OFFSET 4
609 CFI_REL_OFFSET ebp, 0
610 pushl %edi
611 CFI_ADJUST_CFA_OFFSET 4
612 CFI_REL_OFFSET edi, 0
613 pushl %esi
614 CFI_ADJUST_CFA_OFFSET 4
615 CFI_REL_OFFSET esi, 0
616 pushl %edx
617 CFI_ADJUST_CFA_OFFSET 4
618 CFI_REL_OFFSET edx, 0
619 decl %eax # eax = -1
620 pushl %ecx
621 CFI_ADJUST_CFA_OFFSET 4
622 CFI_REL_OFFSET ecx, 0
623 pushl %ebx
624 CFI_ADJUST_CFA_OFFSET 4
625 CFI_REL_OFFSET ebx, 0
626 cld
627 pushl %es
628 CFI_ADJUST_CFA_OFFSET 4
629 /*CFI_REL_OFFSET es, 0*/
630 UNWIND_ESPFIX_STACK
631 popl %ecx
632 CFI_ADJUST_CFA_OFFSET -4
633 /*CFI_REGISTER es, ecx*/
634 movl ES(%esp), %edi # get the function address
635 movl ORIG_EAX(%esp), %edx # get the error code
636 movl %eax, ORIG_EAX(%esp)
637 movl %ecx, ES(%esp)
638 /*CFI_REL_OFFSET es, ES*/
639 movl $(__USER_DS), %ecx
640 movl %ecx, %ds
641 movl %ecx, %es
642 movl %esp,%eax # pt_regs pointer
643 call *%edi
644 jmp ret_from_exception
645 CFI_ENDPROC
646 KPROBE_END(page_fault)
647
648 ENTRY(coprocessor_error)
649 RING0_INT_FRAME
650 pushl $0
651 CFI_ADJUST_CFA_OFFSET 4
652 pushl $do_coprocessor_error
653 CFI_ADJUST_CFA_OFFSET 4
654 jmp error_code
655 CFI_ENDPROC
656
657 ENTRY(simd_coprocessor_error)
658 RING0_INT_FRAME
659 pushl $0
660 CFI_ADJUST_CFA_OFFSET 4
661 pushl $do_simd_coprocessor_error
662 CFI_ADJUST_CFA_OFFSET 4
663 jmp error_code
664 CFI_ENDPROC
665
666 ENTRY(device_not_available)
667 RING0_INT_FRAME
668 pushl $-1 # mark this as an int
669 CFI_ADJUST_CFA_OFFSET 4
670 SAVE_ALL
671 movl %cr0, %eax
672 testl $0x4, %eax # EM (math emulation bit)
673 jne device_not_available_emulate
674 preempt_stop
675 call math_state_restore
676 jmp ret_from_exception
677 device_not_available_emulate:
678 pushl $0 # temporary storage for ORIG_EIP
679 CFI_ADJUST_CFA_OFFSET 4
680 call math_emulate
681 addl $4, %esp
682 CFI_ADJUST_CFA_OFFSET -4
683 jmp ret_from_exception
684 CFI_ENDPROC
685
686 /*
687 * Debug traps and NMI can happen at the one SYSENTER instruction
688 * that sets up the real kernel stack. Check here, since we can't
689 * allow the wrong stack to be used.
690 *
691 * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
692 * already pushed 3 words if it hits on the sysenter instruction:
693 * eflags, cs and eip.
694 *
695 * We just load the right stack, and push the three (known) values
696 * by hand onto the new stack - while updating the return eip past
697 * the instruction that would have done it for sysenter.
698 */
699 #define FIX_STACK(offset, ok, label) \
700 cmpw $__KERNEL_CS,4(%esp); \
701 jne ok; \
702 label: \
703 movl TSS_sysenter_esp0+offset(%esp),%esp; \
704 pushfl; \
705 pushl $__KERNEL_CS; \
706 pushl $sysenter_past_esp
707
708 KPROBE_ENTRY(debug)
709 RING0_INT_FRAME
710 cmpl $sysenter_entry,(%esp)
711 jne debug_stack_correct
712 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
713 debug_stack_correct:
714 pushl $-1 # mark this as an int
715 CFI_ADJUST_CFA_OFFSET 4
716 SAVE_ALL
717 xorl %edx,%edx # error code 0
718 movl %esp,%eax # pt_regs pointer
719 call do_debug
720 jmp ret_from_exception
721 CFI_ENDPROC
722 KPROBE_END(debug)
723
724 /*
725 * NMI is doubly nasty. It can happen _while_ we're handling
726 * a debug fault, and the debug fault hasn't yet been able to
727 * clear up the stack. So we first check whether we got an
728 * NMI on the sysenter entry path, but after that we need to
729 * check whether we got an NMI on the debug path where the debug
730 * fault happened on the sysenter path.
731 */
732 ENTRY(nmi)
733 RING0_INT_FRAME
734 pushl %eax
735 CFI_ADJUST_CFA_OFFSET 4
736 movl %ss, %eax
737 cmpw $__ESPFIX_SS, %ax
738 popl %eax
739 CFI_ADJUST_CFA_OFFSET -4
740 je nmi_16bit_stack
741 cmpl $sysenter_entry,(%esp)
742 je nmi_stack_fixup
743 pushl %eax
744 CFI_ADJUST_CFA_OFFSET 4
745 movl %esp,%eax
746 /* Do not access memory above the end of our stack page,
747 * it might not exist.
748 */
749 andl $(THREAD_SIZE-1),%eax
750 cmpl $(THREAD_SIZE-20),%eax
751 popl %eax
752 CFI_ADJUST_CFA_OFFSET -4
753 jae nmi_stack_correct
754 cmpl $sysenter_entry,12(%esp)
755 je nmi_debug_stack_check
756 nmi_stack_correct:
757 pushl %eax
758 CFI_ADJUST_CFA_OFFSET 4
759 SAVE_ALL
760 xorl %edx,%edx # zero error code
761 movl %esp,%eax # pt_regs pointer
762 call do_nmi
763 jmp restore_nocheck_notrace
764 CFI_ENDPROC
765
766 nmi_stack_fixup:
767 FIX_STACK(12,nmi_stack_correct, 1)
768 jmp nmi_stack_correct
769 nmi_debug_stack_check:
770 cmpw $__KERNEL_CS,16(%esp)
771 jne nmi_stack_correct
772 cmpl $debug,(%esp)
773 jb nmi_stack_correct
774 cmpl $debug_esp_fix_insn,(%esp)
775 ja nmi_stack_correct
776 FIX_STACK(24,nmi_stack_correct, 1)
777 jmp nmi_stack_correct
778
779 nmi_16bit_stack:
780 RING0_INT_FRAME
781 /* create the pointer to lss back */
782 pushl %ss
783 CFI_ADJUST_CFA_OFFSET 4
784 pushl %esp
785 CFI_ADJUST_CFA_OFFSET 4
786 movzwl %sp, %esp
787 addw $4, (%esp)
788 /* copy the iret frame of 12 bytes */
789 .rept 3
790 pushl 16(%esp)
791 CFI_ADJUST_CFA_OFFSET 4
792 .endr
793 pushl %eax
794 CFI_ADJUST_CFA_OFFSET 4
795 SAVE_ALL
796 FIXUP_ESPFIX_STACK # %eax == %esp
797 CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
798 xorl %edx,%edx # zero error code
799 call do_nmi
800 RESTORE_REGS
801 lss 12+4(%esp), %esp # back to 16bit stack
802 1: iret
803 CFI_ENDPROC
804 .section __ex_table,"a"
805 .align 4
806 .long 1b,iret_exc
807 .previous
808
809 KPROBE_ENTRY(int3)
810 RING0_INT_FRAME
811 pushl $-1 # mark this as an int
812 CFI_ADJUST_CFA_OFFSET 4
813 SAVE_ALL
814 xorl %edx,%edx # zero error code
815 movl %esp,%eax # pt_regs pointer
816 call do_int3
817 jmp ret_from_exception
818 CFI_ENDPROC
819 KPROBE_END(int3)
820
821 ENTRY(overflow)
822 RING0_INT_FRAME
823 pushl $0
824 CFI_ADJUST_CFA_OFFSET 4
825 pushl $do_overflow
826 CFI_ADJUST_CFA_OFFSET 4
827 jmp error_code
828 CFI_ENDPROC
829
830 ENTRY(bounds)
831 RING0_INT_FRAME
832 pushl $0
833 CFI_ADJUST_CFA_OFFSET 4
834 pushl $do_bounds
835 CFI_ADJUST_CFA_OFFSET 4
836 jmp error_code
837 CFI_ENDPROC
838
839 ENTRY(invalid_op)
840 RING0_INT_FRAME
841 pushl $0
842 CFI_ADJUST_CFA_OFFSET 4
843 pushl $do_invalid_op
844 CFI_ADJUST_CFA_OFFSET 4
845 jmp error_code
846 CFI_ENDPROC
847
848 ENTRY(coprocessor_segment_overrun)
849 RING0_INT_FRAME
850 pushl $0
851 CFI_ADJUST_CFA_OFFSET 4
852 pushl $do_coprocessor_segment_overrun
853 CFI_ADJUST_CFA_OFFSET 4
854 jmp error_code
855 CFI_ENDPROC
856
857 ENTRY(invalid_TSS)
858 RING0_EC_FRAME
859 pushl $do_invalid_TSS
860 CFI_ADJUST_CFA_OFFSET 4
861 jmp error_code
862 CFI_ENDPROC
863
864 ENTRY(segment_not_present)
865 RING0_EC_FRAME
866 pushl $do_segment_not_present
867 CFI_ADJUST_CFA_OFFSET 4
868 jmp error_code
869 CFI_ENDPROC
870
871 ENTRY(stack_segment)
872 RING0_EC_FRAME
873 pushl $do_stack_segment
874 CFI_ADJUST_CFA_OFFSET 4
875 jmp error_code
876 CFI_ENDPROC
877
878 KPROBE_ENTRY(general_protection)
879 RING0_EC_FRAME
880 pushl $do_general_protection
881 CFI_ADJUST_CFA_OFFSET 4
882 jmp error_code
883 CFI_ENDPROC
884 KPROBE_END(general_protection)
885
886 ENTRY(alignment_check)
887 RING0_EC_FRAME
888 pushl $do_alignment_check
889 CFI_ADJUST_CFA_OFFSET 4
890 jmp error_code
891 CFI_ENDPROC
892
893 ENTRY(divide_error)
894 RING0_INT_FRAME
895 pushl $0 # no error code
896 CFI_ADJUST_CFA_OFFSET 4
897 pushl $do_divide_error
898 CFI_ADJUST_CFA_OFFSET 4
899 jmp error_code
900 CFI_ENDPROC
901
902 #ifdef CONFIG_X86_MCE
903 ENTRY(machine_check)
904 RING0_INT_FRAME
905 pushl $0
906 CFI_ADJUST_CFA_OFFSET 4
907 pushl machine_check_vector
908 CFI_ADJUST_CFA_OFFSET 4
909 jmp error_code
910 CFI_ENDPROC
911 #endif
912
913 ENTRY(spurious_interrupt_bug)
914 RING0_INT_FRAME
915 pushl $0
916 CFI_ADJUST_CFA_OFFSET 4
917 pushl $do_spurious_interrupt_bug
918 CFI_ADJUST_CFA_OFFSET 4
919 jmp error_code
920 CFI_ENDPROC
921
922 #ifdef CONFIG_STACK_UNWIND
923 ENTRY(arch_unwind_init_running)
924 CFI_STARTPROC
925 movl 4(%esp), %edx
926 movl (%esp), %ecx
927 leal 4(%esp), %eax
928 movl %ebx, EBX(%edx)
929 xorl %ebx, %ebx
930 movl %ebx, ECX(%edx)
931 movl %ebx, EDX(%edx)
932 movl %esi, ESI(%edx)
933 movl %edi, EDI(%edx)
934 movl %ebp, EBP(%edx)
935 movl %ebx, EAX(%edx)
936 movl $__USER_DS, DS(%edx)
937 movl $__USER_DS, ES(%edx)
938 movl %ebx, ORIG_EAX(%edx)
939 movl %ecx, EIP(%edx)
940 movl 12(%esp), %ecx
941 movl $__KERNEL_CS, CS(%edx)
942 movl %ebx, EFLAGS(%edx)
943 movl %eax, OLDESP(%edx)
944 movl 8(%esp), %eax
945 movl %ecx, 8(%esp)
946 movl EBX(%edx), %ebx
947 movl $__KERNEL_DS, OLDSS(%edx)
948 jmpl *%eax
949 CFI_ENDPROC
950 ENDPROC(arch_unwind_init_running)
951 #endif
952
953 .section .rodata,"a"
954 #include "syscall_table.S"
955
956 syscall_table_size=(.-sys_call_table)
This page took 0.080734 seconds and 4 git commands to generate.