Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/i386/entry.S | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | */ | |
6 | ||
7 | /* | |
8 | * entry.S contains the system-call and fault low-level handling routines. | |
9 | * This also contains the timer-interrupt handler, as well as all interrupts | |
10 | * and faults that can result in a task-switch. | |
11 | * | |
12 | * NOTE: This code handles signal-recognition, which happens every time | |
13 | * after a timer-interrupt and after each system call. | |
14 | * | |
15 | * I changed all the .align's to 4 (16 byte alignment), as that's faster | |
16 | * on a 486. | |
17 | * | |
18 | * Stack layout in 'ret_from_system_call': | |
19 | * ptrace needs to have all regs on the stack. | |
20 | * if the order here is changed, it needs to be | |
21 | * updated in fork.c:copy_process, signal.c:do_signal, | |
22 | * ptrace.c and ptrace.h | |
23 | * | |
24 | * 0(%esp) - %ebx | |
25 | * 4(%esp) - %ecx | |
26 | * 8(%esp) - %edx | |
27 | * C(%esp) - %esi | |
28 | * 10(%esp) - %edi | |
29 | * 14(%esp) - %ebp | |
30 | * 18(%esp) - %eax | |
31 | * 1C(%esp) - %ds | |
32 | * 20(%esp) - %es | |
33 | * 24(%esp) - orig_eax | |
34 | * 28(%esp) - %eip | |
35 | * 2C(%esp) - %cs | |
36 | * 30(%esp) - %eflags | |
37 | * 34(%esp) - %oldesp | |
38 | * 38(%esp) - %oldss | |
39 | * | |
40 | * "current" is in register %ebx during any slow entries. | |
41 | */ | |
42 | ||
43 | #include <linux/config.h> | |
44 | #include <linux/linkage.h> | |
45 | #include <asm/thread_info.h> | |
46 | #include <asm/errno.h> | |
47 | #include <asm/segment.h> | |
48 | #include <asm/smp.h> | |
49 | #include <asm/page.h> | |
50 | #include <asm/desc.h> | |
51 | #include "irq_vectors.h" | |
52 | ||
53 | #define nr_syscalls ((syscall_table_size)/4) | |
54 | ||
55 | EBX = 0x00 | |
56 | ECX = 0x04 | |
57 | EDX = 0x08 | |
58 | ESI = 0x0C | |
59 | EDI = 0x10 | |
60 | EBP = 0x14 | |
61 | EAX = 0x18 | |
62 | DS = 0x1C | |
63 | ES = 0x20 | |
64 | ORIG_EAX = 0x24 | |
65 | EIP = 0x28 | |
66 | CS = 0x2C | |
67 | EFLAGS = 0x30 | |
68 | OLDESP = 0x34 | |
69 | OLDSS = 0x38 | |
70 | ||
71 | CF_MASK = 0x00000001 | |
72 | TF_MASK = 0x00000100 | |
73 | IF_MASK = 0x00000200 | |
74 | DF_MASK = 0x00000400 | |
75 | NT_MASK = 0x00004000 | |
76 | VM_MASK = 0x00020000 | |
77 | ||
78 | #ifdef CONFIG_PREEMPT | |
79 | #define preempt_stop cli | |
80 | #else | |
81 | #define preempt_stop | |
82 | #define resume_kernel restore_nocheck | |
83 | #endif | |
84 | ||
85 | #define SAVE_ALL \ | |
86 | cld; \ | |
87 | pushl %es; \ | |
88 | pushl %ds; \ | |
89 | pushl %eax; \ | |
90 | pushl %ebp; \ | |
91 | pushl %edi; \ | |
92 | pushl %esi; \ | |
93 | pushl %edx; \ | |
94 | pushl %ecx; \ | |
95 | pushl %ebx; \ | |
96 | movl $(__USER_DS), %edx; \ | |
97 | movl %edx, %ds; \ | |
98 | movl %edx, %es; | |
99 | ||
100 | #define RESTORE_INT_REGS \ | |
101 | popl %ebx; \ | |
102 | popl %ecx; \ | |
103 | popl %edx; \ | |
104 | popl %esi; \ | |
105 | popl %edi; \ | |
106 | popl %ebp; \ | |
107 | popl %eax | |
108 | ||
109 | #define RESTORE_REGS \ | |
110 | RESTORE_INT_REGS; \ | |
111 | 1: popl %ds; \ | |
112 | 2: popl %es; \ | |
113 | .section .fixup,"ax"; \ | |
114 | 3: movl $0,(%esp); \ | |
115 | jmp 1b; \ | |
116 | 4: movl $0,(%esp); \ | |
117 | jmp 2b; \ | |
118 | .previous; \ | |
119 | .section __ex_table,"a";\ | |
120 | .align 4; \ | |
121 | .long 1b,3b; \ | |
122 | .long 2b,4b; \ | |
123 | .previous | |
124 | ||
125 | ||
126 | ENTRY(ret_from_fork) | |
127 | pushl %eax | |
128 | call schedule_tail | |
129 | GET_THREAD_INFO(%ebp) | |
130 | popl %eax | |
131 | jmp syscall_exit | |
132 | ||
133 | /* | |
134 | * Return to user mode is not as complex as all this looks, | |
135 | * but we want the default path for a system call return to | |
136 | * go as quickly as possible which is why some of this is | |
137 | * less clear than it otherwise should be. | |
138 | */ | |
139 | ||
140 | # userspace resumption stub bypassing syscall exit tracing | |
141 | ALIGN | |
142 | ret_from_exception: | |
143 | preempt_stop | |
144 | ret_from_intr: | |
145 | GET_THREAD_INFO(%ebp) | |
146 | movl EFLAGS(%esp), %eax # mix EFLAGS and CS | |
147 | movb CS(%esp), %al | |
148 | testl $(VM_MASK | 3), %eax | |
149 | jz resume_kernel | |
150 | ENTRY(resume_userspace) | |
151 | cli # make sure we don't miss an interrupt | |
152 | # setting need_resched or sigpending | |
153 | # between sampling and the iret | |
154 | movl TI_flags(%ebp), %ecx | |
155 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done on | |
156 | # int/exception return? | |
157 | jne work_pending | |
158 | jmp restore_all | |
159 | ||
160 | #ifdef CONFIG_PREEMPT | |
161 | ENTRY(resume_kernel) | |
162 | cli | |
163 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? | |
164 | jnz restore_nocheck | |
165 | need_resched: | |
166 | movl TI_flags(%ebp), %ecx # need_resched set ? | |
167 | testb $_TIF_NEED_RESCHED, %cl | |
168 | jz restore_all | |
169 | testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ? | |
170 | jz restore_all | |
171 | call preempt_schedule_irq | |
172 | jmp need_resched | |
173 | #endif | |
174 | ||
175 | /* SYSENTER_RETURN points to after the "sysenter" instruction in | |
176 | the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ | |
177 | ||
178 | # sysenter call handler stub | |
179 | ENTRY(sysenter_entry) | |
180 | movl TSS_sysenter_esp0(%esp),%esp | |
181 | sysenter_past_esp: | |
182 | sti | |
183 | pushl $(__USER_DS) | |
184 | pushl %ebp | |
185 | pushfl | |
186 | pushl $(__USER_CS) | |
187 | pushl $SYSENTER_RETURN | |
188 | ||
189 | /* | |
190 | * Load the potential sixth argument from user stack. | |
191 | * Careful about security. | |
192 | */ | |
193 | cmpl $__PAGE_OFFSET-3,%ebp | |
194 | jae syscall_fault | |
195 | 1: movl (%ebp),%ebp | |
196 | .section __ex_table,"a" | |
197 | .align 4 | |
198 | .long 1b,syscall_fault | |
199 | .previous | |
200 | ||
201 | pushl %eax | |
202 | SAVE_ALL | |
203 | GET_THREAD_INFO(%ebp) | |
204 | ||
205 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | |
206 | testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp) | |
207 | jnz syscall_trace_entry | |
208 | cmpl $(nr_syscalls), %eax | |
209 | jae syscall_badsys | |
210 | call *sys_call_table(,%eax,4) | |
211 | movl %eax,EAX(%esp) | |
212 | cli | |
213 | movl TI_flags(%ebp), %ecx | |
214 | testw $_TIF_ALLWORK_MASK, %cx | |
215 | jne syscall_exit_work | |
216 | /* if something modifies registers it must also disable sysexit */ | |
217 | movl EIP(%esp), %edx | |
218 | movl OLDESP(%esp), %ecx | |
219 | xorl %ebp,%ebp | |
220 | sti | |
221 | sysexit | |
222 | ||
223 | ||
224 | # system call handler stub | |
225 | ENTRY(system_call) | |
226 | pushl %eax # save orig_eax | |
227 | SAVE_ALL | |
228 | GET_THREAD_INFO(%ebp) | |
229 | # system call tracing in operation | |
230 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | |
231 | testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp) | |
232 | jnz syscall_trace_entry | |
233 | cmpl $(nr_syscalls), %eax | |
234 | jae syscall_badsys | |
235 | syscall_call: | |
236 | call *sys_call_table(,%eax,4) | |
237 | movl %eax,EAX(%esp) # store the return value | |
238 | syscall_exit: | |
239 | cli # make sure we don't miss an interrupt | |
240 | # setting need_resched or sigpending | |
241 | # between sampling and the iret | |
242 | movl TI_flags(%ebp), %ecx | |
243 | testw $_TIF_ALLWORK_MASK, %cx # current->work | |
244 | jne syscall_exit_work | |
245 | ||
246 | restore_all: | |
247 | movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS | |
5df24082 SS |
248 | # Warning: OLDSS(%esp) contains the wrong/random values if we |
249 | # are returning to the kernel. | |
250 | # See comments in process.c:copy_thread() for details. | |
1da177e4 LT |
251 | movb OLDSS(%esp), %ah |
252 | movb CS(%esp), %al | |
253 | andl $(VM_MASK | (4 << 8) | 3), %eax | |
254 | cmpl $((4 << 8) | 3), %eax | |
255 | je ldt_ss # returning to user-space with LDT SS | |
256 | restore_nocheck: | |
257 | RESTORE_REGS | |
258 | addl $4, %esp | |
259 | 1: iret | |
260 | .section .fixup,"ax" | |
261 | iret_exc: | |
262 | sti | |
a879cbbb LT |
263 | pushl $0 # no error code |
264 | pushl $do_iret_error | |
265 | jmp error_code | |
1da177e4 LT |
266 | .previous |
267 | .section __ex_table,"a" | |
268 | .align 4 | |
269 | .long 1b,iret_exc | |
270 | .previous | |
271 | ||
272 | ldt_ss: | |
273 | larl OLDSS(%esp), %eax | |
274 | jnz restore_nocheck | |
275 | testl $0x00400000, %eax # returning to 32bit stack? | |
276 | jnz restore_nocheck # allright, normal return | |
277 | /* If returning to userspace with 16bit stack, | |
278 | * try to fix the higher word of ESP, as the CPU | |
279 | * won't restore it. | |
280 | * This is an "official" bug of all the x86-compatible | |
281 | * CPUs, which we can try to work around to make | |
282 | * dosemu and wine happy. */ | |
283 | subl $8, %esp # reserve space for switch16 pointer | |
284 | cli | |
285 | movl %esp, %eax | |
286 | /* Set up the 16bit stack frame with switch32 pointer on top, | |
287 | * and a switch16 pointer on top of the current frame. */ | |
288 | call setup_x86_bogus_stack | |
289 | RESTORE_REGS | |
290 | lss 20+4(%esp), %esp # switch to 16bit stack | |
291 | 1: iret | |
292 | .section __ex_table,"a" | |
293 | .align 4 | |
294 | .long 1b,iret_exc | |
295 | .previous | |
296 | ||
297 | # perform work that needs to be done immediately before resumption | |
298 | ALIGN | |
299 | work_pending: | |
300 | testb $_TIF_NEED_RESCHED, %cl | |
301 | jz work_notifysig | |
302 | work_resched: | |
303 | call schedule | |
304 | cli # make sure we don't miss an interrupt | |
305 | # setting need_resched or sigpending | |
306 | # between sampling and the iret | |
307 | movl TI_flags(%ebp), %ecx | |
308 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done other | |
309 | # than syscall tracing? | |
310 | jz restore_all | |
311 | testb $_TIF_NEED_RESCHED, %cl | |
312 | jnz work_resched | |
313 | ||
314 | work_notifysig: # deal with pending signals and | |
315 | # notify-resume requests | |
316 | testl $VM_MASK, EFLAGS(%esp) | |
317 | movl %esp, %eax | |
318 | jne work_notifysig_v86 # returning to kernel-space or | |
319 | # vm86-space | |
320 | xorl %edx, %edx | |
321 | call do_notify_resume | |
322 | jmp restore_all | |
323 | ||
324 | ALIGN | |
325 | work_notifysig_v86: | |
326 | pushl %ecx # save ti_flags for do_notify_resume | |
327 | call save_v86_state # %eax contains pt_regs pointer | |
328 | popl %ecx | |
329 | movl %eax, %esp | |
330 | xorl %edx, %edx | |
331 | call do_notify_resume | |
332 | jmp restore_all | |
333 | ||
334 | # perform syscall exit tracing | |
335 | ALIGN | |
336 | syscall_trace_entry: | |
337 | movl $-ENOSYS,EAX(%esp) | |
338 | movl %esp, %eax | |
339 | xorl %edx,%edx | |
340 | call do_syscall_trace | |
341 | movl ORIG_EAX(%esp), %eax | |
342 | cmpl $(nr_syscalls), %eax | |
343 | jnae syscall_call | |
344 | jmp syscall_exit | |
345 | ||
346 | # perform syscall exit tracing | |
347 | ALIGN | |
348 | syscall_exit_work: | |
349 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl | |
350 | jz work_pending | |
351 | sti # could let do_syscall_trace() call | |
352 | # schedule() instead | |
353 | movl %esp, %eax | |
354 | movl $1, %edx | |
355 | call do_syscall_trace | |
356 | jmp resume_userspace | |
357 | ||
358 | ALIGN | |
359 | syscall_fault: | |
360 | pushl %eax # save orig_eax | |
361 | SAVE_ALL | |
362 | GET_THREAD_INFO(%ebp) | |
363 | movl $-EFAULT,EAX(%esp) | |
364 | jmp resume_userspace | |
365 | ||
366 | ALIGN | |
367 | syscall_badsys: | |
368 | movl $-ENOSYS,EAX(%esp) | |
369 | jmp resume_userspace | |
370 | ||
371 | #define FIXUP_ESPFIX_STACK \ | |
372 | movl %esp, %eax; \ | |
373 | /* switch to 32bit stack using the pointer on top of 16bit stack */ \ | |
374 | lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \ | |
375 | /* copy data from 16bit stack to 32bit stack */ \ | |
376 | call fixup_x86_bogus_stack; \ | |
377 | /* put ESP to the proper location */ \ | |
378 | movl %eax, %esp; | |
379 | #define UNWIND_ESPFIX_STACK \ | |
380 | pushl %eax; \ | |
381 | movl %ss, %eax; \ | |
382 | /* see if on 16bit stack */ \ | |
383 | cmpw $__ESPFIX_SS, %ax; \ | |
384 | jne 28f; \ | |
385 | movl $__KERNEL_DS, %edx; \ | |
386 | movl %edx, %ds; \ | |
387 | movl %edx, %es; \ | |
388 | /* switch to 32bit stack */ \ | |
389 | FIXUP_ESPFIX_STACK \ | |
390 | 28: popl %eax; | |
391 | ||
392 | /* | |
393 | * Build the entry stubs and pointer table with | |
394 | * some assembler magic. | |
395 | */ | |
396 | .data | |
397 | ENTRY(interrupt) | |
398 | .text | |
399 | ||
400 | vector=0 | |
401 | ENTRY(irq_entries_start) | |
402 | .rept NR_IRQS | |
403 | ALIGN | |
404 | 1: pushl $vector-256 | |
405 | jmp common_interrupt | |
406 | .data | |
407 | .long 1b | |
408 | .text | |
409 | vector=vector+1 | |
410 | .endr | |
411 | ||
412 | ALIGN | |
413 | common_interrupt: | |
414 | SAVE_ALL | |
415 | movl %esp,%eax | |
416 | call do_IRQ | |
417 | jmp ret_from_intr | |
418 | ||
419 | #define BUILD_INTERRUPT(name, nr) \ | |
420 | ENTRY(name) \ | |
421 | pushl $nr-256; \ | |
422 | SAVE_ALL \ | |
423 | movl %esp,%eax; \ | |
424 | call smp_/**/name; \ | |
425 | jmp ret_from_intr; | |
426 | ||
427 | /* The include is where all of the SMP etc. interrupts come from */ | |
428 | #include "entry_arch.h" | |
429 | ||
430 | ENTRY(divide_error) | |
431 | pushl $0 # no error code | |
432 | pushl $do_divide_error | |
433 | ALIGN | |
434 | error_code: | |
435 | pushl %ds | |
436 | pushl %eax | |
437 | xorl %eax, %eax | |
438 | pushl %ebp | |
439 | pushl %edi | |
440 | pushl %esi | |
441 | pushl %edx | |
442 | decl %eax # eax = -1 | |
443 | pushl %ecx | |
444 | pushl %ebx | |
445 | cld | |
446 | pushl %es | |
447 | UNWIND_ESPFIX_STACK | |
448 | popl %ecx | |
449 | movl ES(%esp), %edi # get the function address | |
450 | movl ORIG_EAX(%esp), %edx # get the error code | |
451 | movl %eax, ORIG_EAX(%esp) | |
452 | movl %ecx, ES(%esp) | |
453 | movl $(__USER_DS), %ecx | |
454 | movl %ecx, %ds | |
455 | movl %ecx, %es | |
456 | movl %esp,%eax # pt_regs pointer | |
457 | call *%edi | |
458 | jmp ret_from_exception | |
459 | ||
460 | ENTRY(coprocessor_error) | |
461 | pushl $0 | |
462 | pushl $do_coprocessor_error | |
463 | jmp error_code | |
464 | ||
465 | ENTRY(simd_coprocessor_error) | |
466 | pushl $0 | |
467 | pushl $do_simd_coprocessor_error | |
468 | jmp error_code | |
469 | ||
470 | ENTRY(device_not_available) | |
471 | pushl $-1 # mark this as an int | |
472 | SAVE_ALL | |
473 | movl %cr0, %eax | |
474 | testl $0x4, %eax # EM (math emulation bit) | |
475 | jne device_not_available_emulate | |
476 | preempt_stop | |
477 | call math_state_restore | |
478 | jmp ret_from_exception | |
479 | device_not_available_emulate: | |
480 | pushl $0 # temporary storage for ORIG_EIP | |
481 | call math_emulate | |
482 | addl $4, %esp | |
483 | jmp ret_from_exception | |
484 | ||
485 | /* | |
486 | * Debug traps and NMI can happen at the one SYSENTER instruction | |
487 | * that sets up the real kernel stack. Check here, since we can't | |
488 | * allow the wrong stack to be used. | |
489 | * | |
490 | * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have | |
491 | * already pushed 3 words if it hits on the sysenter instruction: | |
492 | * eflags, cs and eip. | |
493 | * | |
494 | * We just load the right stack, and push the three (known) values | |
495 | * by hand onto the new stack - while updating the return eip past | |
496 | * the instruction that would have done it for sysenter. | |
497 | */ | |
498 | #define FIX_STACK(offset, ok, label) \ | |
499 | cmpw $__KERNEL_CS,4(%esp); \ | |
500 | jne ok; \ | |
501 | label: \ | |
502 | movl TSS_sysenter_esp0+offset(%esp),%esp; \ | |
503 | pushfl; \ | |
504 | pushl $__KERNEL_CS; \ | |
505 | pushl $sysenter_past_esp | |
506 | ||
507 | ENTRY(debug) | |
508 | cmpl $sysenter_entry,(%esp) | |
509 | jne debug_stack_correct | |
510 | FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) | |
511 | debug_stack_correct: | |
512 | pushl $-1 # mark this as an int | |
513 | SAVE_ALL | |
514 | xorl %edx,%edx # error code 0 | |
515 | movl %esp,%eax # pt_regs pointer | |
516 | call do_debug | |
1da177e4 LT |
517 | jmp ret_from_exception |
518 | ||
519 | /* | |
520 | * NMI is doubly nasty. It can happen _while_ we're handling | |
521 | * a debug fault, and the debug fault hasn't yet been able to | |
522 | * clear up the stack. So we first check whether we got an | |
523 | * NMI on the sysenter entry path, but after that we need to | |
524 | * check whether we got an NMI on the debug path where the debug | |
525 | * fault happened on the sysenter path. | |
526 | */ | |
527 | ENTRY(nmi) | |
528 | pushl %eax | |
529 | movl %ss, %eax | |
530 | cmpw $__ESPFIX_SS, %ax | |
531 | popl %eax | |
532 | je nmi_16bit_stack | |
533 | cmpl $sysenter_entry,(%esp) | |
534 | je nmi_stack_fixup | |
535 | pushl %eax | |
536 | movl %esp,%eax | |
537 | /* Do not access memory above the end of our stack page, | |
538 | * it might not exist. | |
539 | */ | |
540 | andl $(THREAD_SIZE-1),%eax | |
541 | cmpl $(THREAD_SIZE-20),%eax | |
542 | popl %eax | |
543 | jae nmi_stack_correct | |
544 | cmpl $sysenter_entry,12(%esp) | |
545 | je nmi_debug_stack_check | |
546 | nmi_stack_correct: | |
547 | pushl %eax | |
548 | SAVE_ALL | |
549 | xorl %edx,%edx # zero error code | |
550 | movl %esp,%eax # pt_regs pointer | |
551 | call do_nmi | |
552 | jmp restore_all | |
553 | ||
554 | nmi_stack_fixup: | |
555 | FIX_STACK(12,nmi_stack_correct, 1) | |
556 | jmp nmi_stack_correct | |
557 | nmi_debug_stack_check: | |
558 | cmpw $__KERNEL_CS,16(%esp) | |
559 | jne nmi_stack_correct | |
560 | cmpl $debug - 1,(%esp) | |
561 | jle nmi_stack_correct | |
562 | cmpl $debug_esp_fix_insn,(%esp) | |
563 | jle nmi_debug_stack_fixup | |
564 | nmi_debug_stack_fixup: | |
565 | FIX_STACK(24,nmi_stack_correct, 1) | |
566 | jmp nmi_stack_correct | |
567 | ||
568 | nmi_16bit_stack: | |
569 | /* create the pointer to lss back */ | |
570 | pushl %ss | |
571 | pushl %esp | |
572 | movzwl %sp, %esp | |
573 | addw $4, (%esp) | |
574 | /* copy the iret frame of 12 bytes */ | |
575 | .rept 3 | |
576 | pushl 16(%esp) | |
577 | .endr | |
578 | pushl %eax | |
579 | SAVE_ALL | |
580 | FIXUP_ESPFIX_STACK # %eax == %esp | |
581 | xorl %edx,%edx # zero error code | |
582 | call do_nmi | |
583 | RESTORE_REGS | |
584 | lss 12+4(%esp), %esp # back to 16bit stack | |
585 | 1: iret | |
586 | .section __ex_table,"a" | |
587 | .align 4 | |
588 | .long 1b,iret_exc | |
589 | .previous | |
590 | ||
591 | ENTRY(int3) | |
592 | pushl $-1 # mark this as an int | |
593 | SAVE_ALL | |
594 | xorl %edx,%edx # zero error code | |
595 | movl %esp,%eax # pt_regs pointer | |
596 | call do_int3 | |
1da177e4 LT |
597 | jmp ret_from_exception |
598 | ||
599 | ENTRY(overflow) | |
600 | pushl $0 | |
601 | pushl $do_overflow | |
602 | jmp error_code | |
603 | ||
604 | ENTRY(bounds) | |
605 | pushl $0 | |
606 | pushl $do_bounds | |
607 | jmp error_code | |
608 | ||
609 | ENTRY(invalid_op) | |
610 | pushl $0 | |
611 | pushl $do_invalid_op | |
612 | jmp error_code | |
613 | ||
614 | ENTRY(coprocessor_segment_overrun) | |
615 | pushl $0 | |
616 | pushl $do_coprocessor_segment_overrun | |
617 | jmp error_code | |
618 | ||
619 | ENTRY(invalid_TSS) | |
620 | pushl $do_invalid_TSS | |
621 | jmp error_code | |
622 | ||
623 | ENTRY(segment_not_present) | |
624 | pushl $do_segment_not_present | |
625 | jmp error_code | |
626 | ||
627 | ENTRY(stack_segment) | |
628 | pushl $do_stack_segment | |
629 | jmp error_code | |
630 | ||
631 | ENTRY(general_protection) | |
632 | pushl $do_general_protection | |
633 | jmp error_code | |
634 | ||
635 | ENTRY(alignment_check) | |
636 | pushl $do_alignment_check | |
637 | jmp error_code | |
638 | ||
639 | ENTRY(page_fault) | |
640 | pushl $do_page_fault | |
641 | jmp error_code | |
642 | ||
643 | #ifdef CONFIG_X86_MCE | |
644 | ENTRY(machine_check) | |
645 | pushl $0 | |
646 | pushl machine_check_vector | |
647 | jmp error_code | |
648 | #endif | |
649 | ||
650 | ENTRY(spurious_interrupt_bug) | |
651 | pushl $0 | |
652 | pushl $do_spurious_interrupt_bug | |
653 | jmp error_code | |
654 | ||
5e7b83ff | 655 | #include "syscall_table.S" |
1da177e4 LT |
656 | |
657 | syscall_table_size=(.-sys_call_table) |