Commit | Line | Data |
---|---|---|
6b39ba77 TG |
1 | /* |
2 | * Common interrupt code for 32 and 64 bit | |
3 | */ | |
4 | #include <linux/cpu.h> | |
5 | #include <linux/interrupt.h> | |
6 | #include <linux/kernel_stat.h> | |
7 | #include <linux/seq_file.h> | |
6a02e710 | 8 | #include <linux/smp.h> |
7c1d7cdc | 9 | #include <linux/ftrace.h> |
6b39ba77 | 10 | |
7b6aa335 | 11 | #include <asm/apic.h> |
6b39ba77 | 12 | #include <asm/io_apic.h> |
c3d80000 | 13 | #include <asm/irq.h> |
7c1d7cdc | 14 | #include <asm/idle.h> |
01ca79f1 | 15 | #include <asm/mce.h> |
2c1b284e | 16 | #include <asm/hw_irq.h> |
6b39ba77 TG |
17 | |
18 | atomic_t irq_err_count; | |
19 | ||
acaabe79 | 20 | /* Function pointer for generic interrupt vector handling */ |
4a4de9c7 | 21 | void (*x86_platform_ipi_callback)(void) = NULL; |
acaabe79 | 22 | |
249f6d9e TG |
23 | /* |
24 | * 'what should we do if we get a hw irq event on an illegal vector'. | |
25 | * each architecture has to answer this themselves. | |
26 | */ | |
27 | void ack_bad_irq(unsigned int irq) | |
28 | { | |
edea7148 CG |
29 | if (printk_ratelimit()) |
30 | pr_err("unexpected IRQ trap at vector %02x\n", irq); | |
249f6d9e | 31 | |
249f6d9e TG |
32 | /* |
33 | * Currently unexpected vectors happen only on SMP and APIC. | |
34 | * We _must_ ack these because every local APIC has only N | |
35 | * irq slots per priority level, and a 'hanging, unacked' IRQ | |
36 | * holds up an irq slot - in excessive cases (when multiple | |
37 | * unexpected vectors occur) that might lock up the APIC | |
38 | * completely. | |
39 | * But only ack when the APIC is enabled -AK | |
40 | */ | |
08306ce6 | 41 | ack_APIC_irq(); |
249f6d9e TG |
42 | } |
43 | ||
1b437c8c | 44 | #define irq_stats(x) (&per_cpu(irq_stat, x)) |
6b39ba77 TG |
45 | /* |
46 | * /proc/interrupts printing: | |
47 | */ | |
7a81d9a7 | 48 | static int show_other_interrupts(struct seq_file *p, int prec) |
6b39ba77 TG |
49 | { |
50 | int j; | |
51 | ||
7a81d9a7 | 52 | seq_printf(p, "%*s: ", prec, "NMI"); |
6b39ba77 TG |
53 | for_each_online_cpu(j) |
54 | seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); | |
55 | seq_printf(p, " Non-maskable interrupts\n"); | |
56 | #ifdef CONFIG_X86_LOCAL_APIC | |
7a81d9a7 | 57 | seq_printf(p, "%*s: ", prec, "LOC"); |
6b39ba77 TG |
58 | for_each_online_cpu(j) |
59 | seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); | |
60 | seq_printf(p, " Local timer interrupts\n"); | |
474e56b8 JSR |
61 | |
62 | seq_printf(p, "%*s: ", prec, "SPU"); | |
63 | for_each_online_cpu(j) | |
64 | seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); | |
65 | seq_printf(p, " Spurious interrupts\n"); | |
89ccf465 | 66 | seq_printf(p, "%*s: ", prec, "PMI"); |
241771ef IM |
67 | for_each_online_cpu(j) |
68 | seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); | |
89ccf465 | 69 | seq_printf(p, " Performance monitoring interrupts\n"); |
e360adbe | 70 | seq_printf(p, "%*s: ", prec, "IWI"); |
b6276f35 | 71 | for_each_online_cpu(j) |
e360adbe PZ |
72 | seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); |
73 | seq_printf(p, " IRQ work interrupts\n"); | |
6b39ba77 | 74 | #endif |
4a4de9c7 | 75 | if (x86_platform_ipi_callback) { |
59d13812 | 76 | seq_printf(p, "%*s: ", prec, "PLT"); |
acaabe79 | 77 | for_each_online_cpu(j) |
4a4de9c7 | 78 | seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis); |
acaabe79 DS |
79 | seq_printf(p, " Platform interrupts\n"); |
80 | } | |
6b39ba77 | 81 | #ifdef CONFIG_SMP |
7a81d9a7 | 82 | seq_printf(p, "%*s: ", prec, "RES"); |
6b39ba77 TG |
83 | for_each_online_cpu(j) |
84 | seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); | |
85 | seq_printf(p, " Rescheduling interrupts\n"); | |
7a81d9a7 | 86 | seq_printf(p, "%*s: ", prec, "CAL"); |
6b39ba77 TG |
87 | for_each_online_cpu(j) |
88 | seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); | |
89 | seq_printf(p, " Function call interrupts\n"); | |
7a81d9a7 | 90 | seq_printf(p, "%*s: ", prec, "TLB"); |
6b39ba77 TG |
91 | for_each_online_cpu(j) |
92 | seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); | |
93 | seq_printf(p, " TLB shootdowns\n"); | |
94 | #endif | |
0444c9bd | 95 | #ifdef CONFIG_X86_THERMAL_VECTOR |
7a81d9a7 | 96 | seq_printf(p, "%*s: ", prec, "TRM"); |
6b39ba77 TG |
97 | for_each_online_cpu(j) |
98 | seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); | |
99 | seq_printf(p, " Thermal event interrupts\n"); | |
0444c9bd JB |
100 | #endif |
101 | #ifdef CONFIG_X86_MCE_THRESHOLD | |
7a81d9a7 | 102 | seq_printf(p, "%*s: ", prec, "THR"); |
6b39ba77 TG |
103 | for_each_online_cpu(j) |
104 | seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); | |
105 | seq_printf(p, " Threshold APIC interrupts\n"); | |
01ca79f1 | 106 | #endif |
c1ebf835 | 107 | #ifdef CONFIG_X86_MCE |
01ca79f1 AK |
108 | seq_printf(p, "%*s: ", prec, "MCE"); |
109 | for_each_online_cpu(j) | |
110 | seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); | |
111 | seq_printf(p, " Machine check exceptions\n"); | |
ca84f696 AK |
112 | seq_printf(p, "%*s: ", prec, "MCP"); |
113 | for_each_online_cpu(j) | |
114 | seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); | |
115 | seq_printf(p, " Machine check polls\n"); | |
6b39ba77 | 116 | #endif |
7a81d9a7 | 117 | seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); |
6b39ba77 | 118 | #if defined(CONFIG_X86_IO_APIC) |
7a81d9a7 | 119 | seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); |
6b39ba77 TG |
120 | #endif |
121 | return 0; | |
122 | } | |
123 | ||
124 | int show_interrupts(struct seq_file *p, void *v) | |
125 | { | |
126 | unsigned long flags, any_count = 0; | |
7a81d9a7 | 127 | int i = *(loff_t *) v, j, prec; |
6b39ba77 TG |
128 | struct irqaction *action; |
129 | struct irq_desc *desc; | |
130 | ||
131 | if (i > nr_irqs) | |
132 | return 0; | |
133 | ||
7a81d9a7 JB |
134 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) |
135 | j *= 10; | |
136 | ||
6b39ba77 | 137 | if (i == nr_irqs) |
7a81d9a7 | 138 | return show_other_interrupts(p, prec); |
6b39ba77 TG |
139 | |
140 | /* print header */ | |
141 | if (i == 0) { | |
7a81d9a7 | 142 | seq_printf(p, "%*s", prec + 8, ""); |
6b39ba77 | 143 | for_each_online_cpu(j) |
e9f95e63 | 144 | seq_printf(p, "CPU%-8d", j); |
6b39ba77 TG |
145 | seq_putc(p, '\n'); |
146 | } | |
147 | ||
148 | desc = irq_to_desc(i); | |
0b8f1efa YL |
149 | if (!desc) |
150 | return 0; | |
151 | ||
239007b8 | 152 | raw_spin_lock_irqsave(&desc->lock, flags); |
6b39ba77 TG |
153 | for_each_online_cpu(j) |
154 | any_count |= kstat_irqs_cpu(i, j); | |
6b39ba77 TG |
155 | action = desc->action; |
156 | if (!action && !any_count) | |
157 | goto out; | |
158 | ||
7a81d9a7 | 159 | seq_printf(p, "%*d: ", prec, i); |
6b39ba77 TG |
160 | for_each_online_cpu(j) |
161 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | |
a3c08e5d | 162 | seq_printf(p, " %8s", desc->irq_data.chip->name); |
6b39ba77 TG |
163 | seq_printf(p, "-%-8s", desc->name); |
164 | ||
165 | if (action) { | |
166 | seq_printf(p, " %s", action->name); | |
167 | while ((action = action->next) != NULL) | |
168 | seq_printf(p, ", %s", action->name); | |
169 | } | |
170 | ||
171 | seq_putc(p, '\n'); | |
172 | out: | |
239007b8 | 173 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
6b39ba77 TG |
174 | return 0; |
175 | } | |
176 | ||
177 | /* | |
178 | * /proc/stat helpers | |
179 | */ | |
180 | u64 arch_irq_stat_cpu(unsigned int cpu) | |
181 | { | |
182 | u64 sum = irq_stats(cpu)->__nmi_count; | |
183 | ||
184 | #ifdef CONFIG_X86_LOCAL_APIC | |
185 | sum += irq_stats(cpu)->apic_timer_irqs; | |
474e56b8 | 186 | sum += irq_stats(cpu)->irq_spurious_count; |
241771ef | 187 | sum += irq_stats(cpu)->apic_perf_irqs; |
e360adbe | 188 | sum += irq_stats(cpu)->apic_irq_work_irqs; |
6b39ba77 | 189 | #endif |
4a4de9c7 DS |
190 | if (x86_platform_ipi_callback) |
191 | sum += irq_stats(cpu)->x86_platform_ipis; | |
6b39ba77 TG |
192 | #ifdef CONFIG_SMP |
193 | sum += irq_stats(cpu)->irq_resched_count; | |
194 | sum += irq_stats(cpu)->irq_call_count; | |
195 | sum += irq_stats(cpu)->irq_tlb_count; | |
196 | #endif | |
0444c9bd | 197 | #ifdef CONFIG_X86_THERMAL_VECTOR |
6b39ba77 | 198 | sum += irq_stats(cpu)->irq_thermal_count; |
0444c9bd JB |
199 | #endif |
200 | #ifdef CONFIG_X86_MCE_THRESHOLD | |
6b39ba77 | 201 | sum += irq_stats(cpu)->irq_threshold_count; |
8051dbd2 | 202 | #endif |
c1ebf835 | 203 | #ifdef CONFIG_X86_MCE |
8051dbd2 HS |
204 | sum += per_cpu(mce_exception_count, cpu); |
205 | sum += per_cpu(mce_poll_count, cpu); | |
6b39ba77 TG |
206 | #endif |
207 | return sum; | |
208 | } | |
209 | ||
210 | u64 arch_irq_stat(void) | |
211 | { | |
212 | u64 sum = atomic_read(&irq_err_count); | |
213 | ||
214 | #ifdef CONFIG_X86_IO_APIC | |
215 | sum += atomic_read(&irq_mis_count); | |
216 | #endif | |
217 | return sum; | |
218 | } | |
c3d80000 | 219 | |
7c1d7cdc JF |
220 | |
221 | /* | |
222 | * do_IRQ handles all normal device IRQ's (the special | |
223 | * SMP cross-CPU interrupts have their own specific | |
224 | * handlers). | |
225 | */ | |
226 | unsigned int __irq_entry do_IRQ(struct pt_regs *regs) | |
227 | { | |
228 | struct pt_regs *old_regs = set_irq_regs(regs); | |
229 | ||
230 | /* high bit used in ret_from_ code */ | |
231 | unsigned vector = ~regs->orig_ax; | |
232 | unsigned irq; | |
233 | ||
234 | exit_idle(); | |
235 | irq_enter(); | |
236 | ||
237 | irq = __get_cpu_var(vector_irq)[vector]; | |
238 | ||
239 | if (!handle_irq(irq, regs)) { | |
08306ce6 | 240 | ack_APIC_irq(); |
7c1d7cdc JF |
241 | |
242 | if (printk_ratelimit()) | |
edea7148 CG |
243 | pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n", |
244 | __func__, smp_processor_id(), vector, irq); | |
7c1d7cdc JF |
245 | } |
246 | ||
247 | irq_exit(); | |
248 | ||
249 | set_irq_regs(old_regs); | |
250 | return 1; | |
251 | } | |
252 | ||
acaabe79 | 253 | /* |
4a4de9c7 | 254 | * Handler for X86_PLATFORM_IPI_VECTOR. |
acaabe79 | 255 | */ |
4a4de9c7 | 256 | void smp_x86_platform_ipi(struct pt_regs *regs) |
acaabe79 DS |
257 | { |
258 | struct pt_regs *old_regs = set_irq_regs(regs); | |
259 | ||
260 | ack_APIC_irq(); | |
261 | ||
262 | exit_idle(); | |
263 | ||
264 | irq_enter(); | |
265 | ||
4a4de9c7 | 266 | inc_irq_stat(x86_platform_ipis); |
acaabe79 | 267 | |
4a4de9c7 DS |
268 | if (x86_platform_ipi_callback) |
269 | x86_platform_ipi_callback(); | |
acaabe79 DS |
270 | |
271 | irq_exit(); | |
272 | ||
273 | set_irq_regs(old_regs); | |
274 | } | |
275 | ||
c3d80000 | 276 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); |
7a7732bc SS |
277 | |
278 | #ifdef CONFIG_HOTPLUG_CPU | |
279 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ | |
280 | void fixup_irqs(void) | |
281 | { | |
5231a686 | 282 | unsigned int irq, vector; |
7a7732bc SS |
283 | static int warned; |
284 | struct irq_desc *desc; | |
a3c08e5d | 285 | struct irq_data *data; |
7a7732bc SS |
286 | |
287 | for_each_irq_desc(irq, desc) { | |
288 | int break_affinity = 0; | |
289 | int set_affinity = 1; | |
290 | const struct cpumask *affinity; | |
291 | ||
292 | if (!desc) | |
293 | continue; | |
294 | if (irq == 2) | |
295 | continue; | |
296 | ||
297 | /* interrupt's are disabled at this point */ | |
239007b8 | 298 | raw_spin_lock(&desc->lock); |
7a7732bc | 299 | |
a3c08e5d TG |
300 | data = &desc->irq_data; |
301 | affinity = data->affinity; | |
7a7732bc SS |
302 | if (!irq_has_action(irq) || |
303 | cpumask_equal(affinity, cpu_online_mask)) { | |
239007b8 | 304 | raw_spin_unlock(&desc->lock); |
7a7732bc SS |
305 | continue; |
306 | } | |
307 | ||
a5e74b84 SS |
308 | /* |
309 | * Complete the irq move. This cpu is going down and for | |
310 | * non intr-remapping case, we can't wait till this interrupt | |
311 | * arrives at this cpu before completing the irq move. | |
312 | */ | |
313 | irq_force_complete_move(irq); | |
314 | ||
7a7732bc SS |
315 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
316 | break_affinity = 1; | |
317 | affinity = cpu_all_mask; | |
318 | } | |
319 | ||
a3c08e5d TG |
320 | if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask) |
321 | data->chip->irq_mask(data); | |
7a7732bc | 322 | |
a3c08e5d TG |
323 | if (data->chip->irq_set_affinity) |
324 | data->chip->irq_set_affinity(data, affinity, true); | |
7a7732bc SS |
325 | else if (!(warned++)) |
326 | set_affinity = 0; | |
327 | ||
a3c08e5d TG |
328 | if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask) |
329 | data->chip->irq_unmask(data); | |
7a7732bc | 330 | |
239007b8 | 331 | raw_spin_unlock(&desc->lock); |
7a7732bc SS |
332 | |
333 | if (break_affinity && set_affinity) | |
334 | printk("Broke affinity for irq %i\n", irq); | |
335 | else if (!set_affinity) | |
336 | printk("Cannot set affinity for irq %i\n", irq); | |
337 | } | |
338 | ||
5231a686 SS |
339 | /* |
340 | * We can remove mdelay() and then send spuriuous interrupts to | |
341 | * new cpu targets for all the irqs that were handled previously by | |
342 | * this cpu. While it works, I have seen spurious interrupt messages | |
343 | * (nothing wrong but still...). | |
344 | * | |
345 | * So for now, retain mdelay(1) and check the IRR and then send those | |
346 | * interrupts to new targets as this cpu is already offlined... | |
347 | */ | |
7a7732bc | 348 | mdelay(1); |
5231a686 SS |
349 | |
350 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | |
351 | unsigned int irr; | |
352 | ||
353 | if (__get_cpu_var(vector_irq)[vector] < 0) | |
354 | continue; | |
355 | ||
356 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); | |
357 | if (irr & (1 << (vector % 32))) { | |
358 | irq = __get_cpu_var(vector_irq)[vector]; | |
359 | ||
a3c08e5d | 360 | data = irq_get_irq_data(irq); |
239007b8 | 361 | raw_spin_lock(&desc->lock); |
a3c08e5d TG |
362 | if (data->chip->irq_retrigger) |
363 | data->chip->irq_retrigger(data); | |
239007b8 | 364 | raw_spin_unlock(&desc->lock); |
5231a686 SS |
365 | } |
366 | } | |
7a7732bc SS |
367 | } |
368 | #endif |