Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
3 | * | |
4 | * This file contains the lowest level x86-specific interrupt | |
5 | * entry, irq-stacks and irq statistics code. All the remaining | |
6 | * irq logic is done by the generic kernel/irq/ code and | |
7 | * by the x86-specific irq controller code. (e.g. i8259.c and | |
8 | * io_apic.c.) | |
9 | */ | |
10 | ||
1da177e4 LT |
11 | #include <linux/module.h> |
12 | #include <linux/seq_file.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/kernel_stat.h> | |
f3705136 ZM |
15 | #include <linux/notifier.h> |
16 | #include <linux/cpu.h> | |
17 | #include <linux/delay.h> | |
1da177e4 | 18 | |
e05d723f TG |
19 | #include <asm/apic.h> |
20 | #include <asm/uaccess.h> | |
21 | ||
f34e3b61 | 22 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
1da177e4 LT |
23 | EXPORT_PER_CPU_SYMBOL(irq_stat); |
24 | ||
7c3576d2 JF |
25 | DEFINE_PER_CPU(struct pt_regs *, irq_regs); |
26 | EXPORT_PER_CPU_SYMBOL(irq_regs); | |
27 | ||
1da177e4 LT |
28 | /* |
29 | * 'what should we do if we get a hw irq event on an illegal vector'. | |
30 | * each architecture has to answer this themselves. | |
31 | */ | |
32 | void ack_bad_irq(unsigned int irq) | |
33 | { | |
e05d723f TG |
34 | printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq); |
35 | ||
36 | #ifdef CONFIG_X86_LOCAL_APIC | |
37 | /* | |
38 | * Currently unexpected vectors happen only on SMP and APIC. | |
39 | * We _must_ ack these because every local APIC has only N | |
40 | * irq slots per priority level, and a 'hanging, unacked' IRQ | |
41 | * holds up an irq slot - in excessive cases (when multiple | |
42 | * unexpected vectors occur) that might lock up the APIC | |
43 | * completely. | |
44 | * But only ack when the APIC is enabled -AK | |
45 | */ | |
46 | if (cpu_has_apic) | |
47 | ack_APIC_irq(); | |
1da177e4 | 48 | #endif |
e05d723f | 49 | } |
1da177e4 LT |
50 | |
51 | #ifdef CONFIG_4KSTACKS | |
52 | /* | |
53 | * per-CPU IRQ handling contexts (thread information and stack) | |
54 | */ | |
55 | union irq_ctx { | |
56 | struct thread_info tinfo; | |
57 | u32 stack[THREAD_SIZE/sizeof(u32)]; | |
58 | }; | |
59 | ||
22722051 AM |
60 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; |
61 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; | |
1da177e4 LT |
62 | #endif |
63 | ||
64 | /* | |
65 | * do_IRQ handles all normal device IRQ's (the special | |
66 | * SMP cross-CPU interrupts have their own specific | |
67 | * handlers). | |
68 | */ | |
69 | fastcall unsigned int do_IRQ(struct pt_regs *regs) | |
70 | { | |
7d12e780 | 71 | struct pt_regs *old_regs; |
19eadf98 RR |
72 | /* high bit used in ret_from_ code */ |
73 | int irq = ~regs->orig_eax; | |
f5b9ed7a | 74 | struct irq_desc *desc = irq_desc + irq; |
1da177e4 LT |
75 | #ifdef CONFIG_4KSTACKS |
76 | union irq_ctx *curctx, *irqctx; | |
77 | u32 *isp; | |
78 | #endif | |
79 | ||
a052b68b AM |
80 | if (unlikely((unsigned)irq >= NR_IRQS)) { |
81 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | |
82 | __FUNCTION__, irq); | |
83 | BUG(); | |
84 | } | |
85 | ||
7d12e780 | 86 | old_regs = set_irq_regs(regs); |
1da177e4 LT |
87 | irq_enter(); |
88 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
89 | /* Debugging check for stack overflow: is there less than 1KB free? */ | |
90 | { | |
91 | long esp; | |
92 | ||
93 | __asm__ __volatile__("andl %%esp,%0" : | |
94 | "=r" (esp) : "0" (THREAD_SIZE - 1)); | |
95 | if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) { | |
96 | printk("do_IRQ: stack overflow: %ld\n", | |
97 | esp - sizeof(struct thread_info)); | |
98 | dump_stack(); | |
99 | } | |
100 | } | |
101 | #endif | |
102 | ||
103 | #ifdef CONFIG_4KSTACKS | |
104 | ||
105 | curctx = (union irq_ctx *) current_thread_info(); | |
106 | irqctx = hardirq_ctx[smp_processor_id()]; | |
107 | ||
108 | /* | |
109 | * this is where we switch to the IRQ stack. However, if we are | |
110 | * already using the IRQ stack (because we interrupted a hardirq | |
111 | * handler) we can't do that and just have to keep using the | |
112 | * current stack (which is the irq stack already after all) | |
113 | */ | |
114 | if (curctx != irqctx) { | |
7d12e780 | 115 | int arg1, arg2, ebx; |
1da177e4 LT |
116 | |
117 | /* build the stack frame on the IRQ stack */ | |
118 | isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); | |
119 | irqctx->tinfo.task = curctx->tinfo.task; | |
120 | irqctx->tinfo.previous_esp = current_stack_pointer; | |
121 |