Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/irq/handle.c | |
3 | * | |
a34db9b2 IM |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
5 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | |
1da177e4 LT |
6 | * |
7 | * This file contains the core interrupt handling code. | |
a34db9b2 IM |
8 | * |
9 | * Detailed information is available in Documentation/DocBook/genericirq | |
10 | * | |
1da177e4 LT |
11 | */ |
12 | ||
13 | #include <linux/irq.h> | |
d43c36dc | 14 | #include <linux/sched.h> |
948cd529 | 15 | #include <linux/slab.h> |
1da177e4 LT |
16 | #include <linux/module.h> |
17 | #include <linux/random.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/kernel_stat.h> | |
0b8f1efa YL |
20 | #include <linux/rculist.h> |
21 | #include <linux/hash.h> | |
0fa0ebbf | 22 | #include <linux/bootmem.h> |
ad8d75ff | 23 | #include <trace/events/irq.h> |
1da177e4 LT |
24 | |
25 | #include "internals.h" | |
26 | ||
0b8f1efa YL |
27 | /* |
28 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | |
29 | */ | |
48a1b10a | 30 | struct lock_class_key irq_desc_lock_class; |
0b8f1efa | 31 | |
6a6de9ef TG |
32 | /** |
33 | * handle_bad_irq - handle spurious and unhandled irqs | |
43a1dd50 HK |
34 | * @irq: the interrupt number |
35 | * @desc: description of the interrupt | |
43a1dd50 HK |
36 | * |
37 | * Handles spurious and unhandled IRQ's. It also prints a debugmessage. | |
6a6de9ef | 38 | */ |
d6c88a50 | 39 | void handle_bad_irq(unsigned int irq, struct irq_desc *desc) |
6a6de9ef | 40 | { |
43f77759 | 41 | print_irq_desc(irq, desc); |
d6c88a50 | 42 | kstat_incr_irqs_this_cpu(irq, desc); |
6a6de9ef TG |
43 | ack_bad_irq(irq); |
44 | } | |
45 | ||
97179fd4 DD |
46 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) |
47 | static void __init init_irq_default_affinity(void) | |
48 | { | |
28be225b | 49 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
97179fd4 DD |
50 | cpumask_setall(irq_default_affinity); |
51 | } | |
52 | #else | |
53 | static void __init init_irq_default_affinity(void) | |
54 | { | |
55 | } | |
56 | #endif | |
57 | ||
1da177e4 LT |
58 | /* |
59 | * Linux has a controller-independent interrupt architecture. | |
60 | * Every controller has a 'controller-template', that is used | |
61 | * by the main code to do the right thing. Each driver-visible | |
06fcb0c6 | 62 | * interrupt source is transparently wired to the appropriate |
1da177e4 LT |
63 | * controller. Thus drivers need not be aware of the |
64 | * interrupt-controller. | |
65 | * | |
66 | * The code is designed to be easily extended with new/different | |
67 | * interrupt controllers, without having to do assembly magic or | |
68 | * having to touch the generic code. | |
69 | * | |
70 | * Controller mappings for all interrupt sources: | |
71 | */ | |
85c0f909 | 72 | int nr_irqs = NR_IRQS; |
fa42d10d | 73 | EXPORT_SYMBOL_GPL(nr_irqs); |
d60458b2 | 74 | |
0b8f1efa | 75 | #ifdef CONFIG_SPARSE_IRQ |
92296c6d | 76 | |
0b8f1efa YL |
77 | static struct irq_desc irq_desc_init = { |
78 | .irq = -1, | |
79 | .status = IRQ_DISABLED, | |
80 | .chip = &no_irq_chip, | |
81 | .handle_irq = handle_bad_irq, | |
82 | .depth = 1, | |
83 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | |
0b8f1efa YL |
84 | }; |
85 | ||
948cd529 | 86 | void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) |
0b8f1efa | 87 | { |
005bf0e6 | 88 | void *ptr; |
0b8f1efa | 89 | |
948cd529 PM |
90 | if (slab_is_available()) |
91 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), | |
92 | GFP_ATOMIC, node); | |
93 | else | |
94 | ptr = alloc_bootmem_node(NODE_DATA(node), | |
95 | nr * sizeof(*desc->kstat_irqs)); | |
0b8f1efa | 96 | |
005bf0e6 YL |
97 | /* |
98 | * don't overwite if can not get new one | |
99 | * init_copy_kstat_irqs() could still use old one | |
100 | */ | |
101 | if (ptr) { | |
85ac16d0 | 102 | printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); |
005bf0e6 YL |
103 | desc->kstat_irqs = ptr; |
104 | } | |
0b8f1efa YL |
105 | } |
106 | ||
85ac16d0 | 107 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) |
0b8f1efa YL |
108 | { |
109 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | |
793f7b12 IM |
110 | |
111 | spin_lock_init(&desc->lock); | |
0b8f1efa YL |
112 | desc->irq = irq; |
113 | #ifdef CONFIG_SMP | |
85ac16d0 | 114 | desc->node = node; |
0b8f1efa YL |
115 | #endif |
116 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | |
85ac16d0 | 117 | init_kstat_irqs(desc, node, nr_cpu_ids); |
0b8f1efa YL |
118 | if (!desc->kstat_irqs) { |
119 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | |
120 | BUG_ON(1); | |
121 | } | |
85ac16d0 | 122 | if (!alloc_desc_masks(desc, node, false)) { |
7f7ace0c MT |
123 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); |
124 | BUG_ON(1); | |
125 | } | |
9ec4fa27 | 126 | init_desc_masks(desc); |
85ac16d0 | 127 | arch_init_chip_data(desc, node); |
0b8f1efa YL |
128 | } |
129 | ||
130 | /* | |
131 | * Protect the sparse_irqs: | |
132 | */ | |
48a1b10a | 133 | DEFINE_SPINLOCK(sparse_irq_lock); |
0b8f1efa | 134 | |
0fa0ebbf | 135 | struct irq_desc **irq_desc_ptrs __read_mostly; |
0b8f1efa | 136 | |
99d093d1 YL |
137 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { |
138 | [0 ... NR_IRQS_LEGACY-1] = { | |
0b8f1efa YL |
139 | .irq = -1, |
140 | .status = IRQ_DISABLED, | |
141 | .chip = &no_irq_chip, | |
142 | .handle_irq = handle_bad_irq, | |
143 | .depth = 1, | |
144 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | |
0b8f1efa YL |
145 | } |
146 | }; | |
147 | ||
542d865b | 148 | static unsigned int *kstat_irqs_legacy; |
0b8f1efa | 149 | |
13a0c3c2 | 150 | int __init early_irq_init(void) |
0b8f1efa YL |
151 | { |
152 | struct irq_desc *desc; | |
153 | int legacy_count; | |
dad213ae | 154 | int node; |
0b8f1efa YL |
155 | int i; |
156 | ||
97179fd4 DD |
157 | init_irq_default_affinity(); |
158 | ||
4a046d17 YL |
159 | /* initialize nr_irqs based on nr_cpu_ids */ |
160 | arch_probe_nr_irqs(); | |
9594949b MT |
161 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); |
162 | ||
0b8f1efa YL |
163 | desc = irq_desc_legacy; |
164 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | |
372e24b0 | 165 | node = first_online_node; |
0b8f1efa | 166 | |
0fa0ebbf | 167 | /* allocate irq_desc_ptrs array based on nr_irqs */ |
22fb4e71 | 168 | irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT); |
0fa0ebbf | 169 | |
542d865b | 170 | /* allocate based on nr_cpu_ids */ |
dad213ae YL |
171 | kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * |
172 | sizeof(int), GFP_NOWAIT, node); | |
542d865b | 173 | |
0b8f1efa YL |
174 | for (i = 0; i < legacy_count; i++) { |
175 | desc[i].irq = i; | |
372e24b0 YL |
176 | #ifdef CONFIG_SMP |
177 | desc[i].node = node; | |
178 | #endif | |
542d865b | 179 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; |
fa6beb37 | 180 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
dad213ae | 181 | alloc_desc_masks(&desc[i], node, true); |
9ec4fa27 | 182 | init_desc_masks(&desc[i]); |
0b8f1efa YL |
183 | irq_desc_ptrs[i] = desc + i; |
184 | } | |
185 | ||
9594949b | 186 | for (i = legacy_count; i < nr_irqs; i++) |
0b8f1efa YL |
187 | irq_desc_ptrs[i] = NULL; |
188 | ||
13a0c3c2 | 189 | return arch_early_irq_init(); |
0b8f1efa YL |
190 | } |
191 | ||
192 | struct irq_desc *irq_to_desc(unsigned int irq) | |
193 | { | |
0fa0ebbf MT |
194 | if (irq_desc_ptrs && irq < nr_irqs) |
195 | return irq_desc_ptrs[irq]; | |
196 | ||
197 | return NULL; | |
0b8f1efa YL |
198 | } |
199 | ||
948cd529 | 200 | struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) |
0b8f1efa YL |
201 | { |
202 | struct irq_desc *desc; | |
203 | unsigned long flags; | |
0b8f1efa | 204 | |
9594949b | 205 | if (irq >= nr_irqs) { |
e2f4d065 MT |
206 | WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", |
207 | irq, nr_irqs); | |
0b8f1efa YL |
208 | return NULL; |
209 | } | |
210 | ||
211 | desc = irq_desc_ptrs[irq]; | |
212 | if (desc) | |
213 | return desc; | |
214 | ||
215 | spin_lock_irqsave(&sparse_irq_lock, flags); | |
216 | ||
217 | /* We have to check it to avoid races with another CPU */ | |
218 | desc = irq_desc_ptrs[irq]; | |
219 | if (desc) | |
220 | goto out_unlock; | |
221 | ||
948cd529 PM |
222 | if (slab_is_available()) |
223 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | |
224 | else | |
225 | desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc)); | |
226 | ||
85ac16d0 | 227 | printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); |
0b8f1efa YL |
228 | if (!desc) { |
229 | printk(KERN_ERR "can not alloc irq_desc\n"); | |
230 | BUG_ON(1); | |
231 | } | |
85ac16d0 | 232 | init_one_irq_desc(irq, desc, node); |
0b8f1efa YL |
233 | |
234 | irq_desc_ptrs[irq] = desc; | |
235 | ||
236 | out_unlock: | |
237 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | |
238 | ||
239 | return desc; | |
240 | } | |
241 | ||
f9af0e70 | 242 | #else /* !CONFIG_SPARSE_IRQ */ |
0b8f1efa | 243 | |
e729aa16 | 244 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
1da177e4 | 245 | [0 ... NR_IRQS-1] = { |
4f167fb4 | 246 | .status = IRQ_DISABLED, |
f1c2662c | 247 | .chip = &no_irq_chip, |
7a55713a | 248 | .handle_irq = handle_bad_irq, |
94d39e1f | 249 | .depth = 1, |
aac3f2b6 | 250 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), |
1da177e4 LT |
251 | } |
252 | }; | |
08678b08 | 253 | |
d7e51e66 | 254 | static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; |
12026ea1 YL |
255 | int __init early_irq_init(void) |
256 | { | |
257 | struct irq_desc *desc; | |
258 | int count; | |
259 | int i; | |
260 | ||
97179fd4 DD |
261 | init_irq_default_affinity(); |
262 | ||
9594949b MT |
263 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); |
264 | ||
12026ea1 YL |
265 | desc = irq_desc; |
266 | count = ARRAY_SIZE(irq_desc); | |
267 | ||
d7e51e66 | 268 | for (i = 0; i < count; i++) { |
12026ea1 | 269 | desc[i].irq = i; |
9ec4fa27 YL |
270 | alloc_desc_masks(&desc[i], 0, true); |
271 | init_desc_masks(&desc[i]); | |
d7e51e66 YL |
272 | desc[i].kstat_irqs = kstat_irqs_all[i]; |
273 | } | |
12026ea1 YL |
274 | return arch_early_irq_init(); |
275 | } | |
276 | ||
f9af0e70 KM |
277 | struct irq_desc *irq_to_desc(unsigned int irq) |
278 | { | |
279 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | |
280 | } | |
281 | ||
85ac16d0 | 282 | struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) |
f9af0e70 KM |
283 | { |
284 | return irq_to_desc(irq); | |
285 | } | |
286 | #endif /* !CONFIG_SPARSE_IRQ */ | |
0b8f1efa | 287 | |
0f3c2a89 YL |
288 | void clear_kstat_irqs(struct irq_desc *desc) |
289 | { | |
290 | memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); | |
291 | } | |
292 | ||
1da177e4 | 293 | /* |
77a5afec IM |
294 | * What should we do if we get a hw irq event on an illegal vector? |
295 | * Each architecture has to answer this themself. | |
1da177e4 | 296 | */ |
77a5afec | 297 | static void ack_bad(unsigned int irq) |
1da177e4 | 298 | { |
d3c60047 | 299 | struct irq_desc *desc = irq_to_desc(irq); |
08678b08 | 300 | |
08678b08 | 301 | print_irq_desc(irq, desc); |
1da177e4 LT |
302 | ack_bad_irq(irq); |
303 | } | |
304 | ||
77a5afec IM |
305 | /* |
306 | * NOP functions | |
307 | */ | |
308 | static void noop(unsigned int irq) | |
309 | { | |
310 | } | |
311 | ||
312 | static unsigned int noop_ret(unsigned int irq) | |
313 | { | |
314 | return 0; | |
315 | } | |
316 | ||
317 | /* | |
318 | * Generic no controller implementation | |
319 | */ | |
f1c2662c IM |
320 | struct irq_chip no_irq_chip = { |
321 | .name = "none", | |
77a5afec IM |
322 | .startup = noop_ret, |
323 | .shutdown = noop, | |
324 | .enable = noop, | |
325 | .disable = noop, | |
326 | .ack = ack_bad, | |
327 | .end = noop, | |
1da177e4 LT |
328 | }; |
329 | ||
f8b5473f TG |
330 | /* |
331 | * Generic dummy implementation which can be used for | |
332 | * real dumb interrupt sources | |
333 | */ | |
334 | struct irq_chip dummy_irq_chip = { | |
335 | .name = "dummy", | |
336 | .startup = noop_ret, | |
337 | .shutdown = noop, | |
338 | .enable = noop, | |
339 | .disable = noop, | |
340 | .ack = noop, | |
341 | .mask = noop, | |
342 | .unmask = noop, | |
343 | .end = noop, | |
344 | }; | |
345 | ||
1da177e4 LT |
346 | /* |
347 | * Special, empty irq handler: | |
348 | */ | |
7d12e780 | 349 | irqreturn_t no_action(int cpl, void *dev_id) |
1da177e4 LT |
350 | { |
351 | return IRQ_NONE; | |
352 | } | |
353 | ||
f48fe81e TG |
354 | static void warn_no_thread(unsigned int irq, struct irqaction *action) |
355 | { | |
356 | if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags)) | |
357 | return; | |
358 | ||
359 | printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD " | |
360 | "but no thread function available.", irq, action->name); | |
361 | } | |
362 | ||
8d28bc75 IM |
363 | /** |
364 | * handle_IRQ_event - irq action chain handler | |
365 | * @irq: the interrupt number | |
8d28bc75 IM |
366 | * @action: the interrupt action chain for this irq |
367 | * | |
368 | * Handles the action chain of an irq event | |
1da177e4 | 369 | */ |
7d12e780 | 370 | irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) |
1da177e4 | 371 | { |
908dcecd JB |
372 | irqreturn_t ret, retval = IRQ_NONE; |
373 | unsigned int status = 0; | |
1da177e4 | 374 | |
3cca53b0 | 375 | if (!(action->flags & IRQF_DISABLED)) |
366c7f55 | 376 | local_irq_enable_in_hardirq(); |
1da177e4 LT |
377 | |
378 | do { | |
af39241b | 379 | trace_irq_handler_entry(irq, action); |
7d12e780 | 380 | ret = action->handler(irq, action->dev_id); |
af39241b | 381 | trace_irq_handler_exit(irq, action, ret); |
3aa551c9 TG |
382 | |
383 | switch (ret) { | |
384 | case IRQ_WAKE_THREAD: | |
f48fe81e TG |
385 | /* |
386 | * Set result to handled so the spurious check | |
387 | * does not trigger. | |
388 | */ | |
389 | ret = IRQ_HANDLED; | |
390 | ||
391 | /* | |
392 | * Catch drivers which return WAKE_THREAD but | |
393 | * did not set up a thread function | |
394 | */ | |
395 | if (unlikely(!action->thread_fn)) { | |
396 | warn_no_thread(irq, action); | |
397 | break; | |
398 | } | |
399 | ||
3aa551c9 TG |
400 | /* |
401 | * Wake up the handler thread for this | |
402 | * action. In case the thread crashed and was | |
403 | * killed we just pretend that we handled the | |
404 | * interrupt. The hardirq handler above has | |
405 | * disabled the device interrupt, so no irq | |
406 | * storm is lurking. | |
407 | */ | |
408 | if (likely(!test_bit(IRQTF_DIED, | |
409 | &action->thread_flags))) { | |
410 | set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | |
411 | wake_up_process(action->thread); | |
412 | } | |
413 | ||
3aa551c9 TG |
414 | /* Fall through to add to randomness */ |
415 | case IRQ_HANDLED: | |
1da177e4 | 416 | status |= action->flags; |
3aa551c9 TG |
417 | break; |
418 | ||
419 | default: | |
420 | break; | |
421 | } | |
422 | ||
1da177e4 LT |
423 | retval |= ret; |
424 | action = action->next; | |
425 | } while (action); | |
426 | ||
3cca53b0 | 427 | if (status & IRQF_SAMPLE_RANDOM) |
1da177e4 LT |
428 | add_interrupt_randomness(irq); |
429 | local_irq_disable(); | |
430 | ||
431 | return retval; | |
432 | } | |
433 | ||
af8c65b5 | 434 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ |
0e57aa11 TG |
435 | |
436 | #ifdef CONFIG_ENABLE_WARN_DEPRECATED | |
437 | # warning __do_IRQ is deprecated. Please convert to proper flow handlers | |
438 | #endif | |
439 | ||
8d28bc75 IM |
440 | /** |
441 | * __do_IRQ - original all in one highlevel IRQ handler | |
442 | * @irq: the interrupt number | |
8d28bc75 IM |
443 | * |
444 | * __do_IRQ handles all normal device IRQ's (the special | |
1da177e4 LT |
445 | * SMP cross-CPU interrupts have their own specific |
446 | * handlers). | |
8d28bc75 IM |
447 | * |
448 | * This is the original x86 implementation which is used for every | |
449 | * interrupt type. | |
1da177e4 | 450 | */ |
7ad5b3a5 | 451 | unsigned int __do_IRQ(unsigned int irq) |
1da177e4 | 452 | { |
08678b08 | 453 | struct irq_desc *desc = irq_to_desc(irq); |
06fcb0c6 | 454 | struct irqaction *action; |
1da177e4 LT |
455 | unsigned int status; |
456 | ||
d6c88a50 TG |
457 | kstat_incr_irqs_this_cpu(irq, desc); |
458 | ||
f26fdd59 | 459 | if (CHECK_IRQ_PER_CPU(desc->status)) { |
1da177e4 LT |
460 | irqreturn_t action_ret; |
461 | ||
462 | /* | |
463 | * No locking required for CPU-local interrupts: | |
464 | */ | |
fcef5911 | 465 | if (desc->chip->ack) |
d1bef4ed | 466 | desc->chip->ack(irq); |
c642b839 RA |
467 | if (likely(!(desc->status & IRQ_DISABLED))) { |
468 | action_ret = handle_IRQ_event(irq, desc->action); | |
469 | if (!noirqdebug) | |
470 | note_interrupt(irq, desc, action_ret); | |
471 | } | |
d1bef4ed | 472 | desc->chip->end(irq); |
1da177e4 LT |
473 | return 1; |
474 | } | |
475 | ||
476 | spin_lock(&desc->lock); | |
fcef5911 | 477 | if (desc->chip->ack) |
d1bef4ed | 478 | desc->chip->ack(irq); |
1da177e4 LT |
479 | /* |
480 | * REPLAY is when Linux resends an IRQ that was dropped earlier | |
481 | * WAITING is used by probe to mark irqs that are being tested | |
482 | */ | |
483 | status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); | |
484 | status |= IRQ_PENDING; /* we _want_ to handle it */ | |
485 | ||
486 | /* | |
487 | * If the IRQ is disabled for whatever reason, we cannot | |
488 | * use the action we have. | |
489 | */ | |
490 | action = NULL; | |
491 | if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { | |
492 | action = desc->action; | |
493 | status &= ~IRQ_PENDING; /* we commit to handling */ | |
494 | status |= IRQ_INPROGRESS; /* we are handling it */ | |
495 | } | |
496 | desc->status = status; | |
497 | ||
498 | /* | |
499 | * If there is no IRQ handler or it was disabled, exit early. | |
500 | * Since we set PENDING, if another processor is handling | |
501 | * a different instance of this same irq, the other processor | |
502 | * will take care of it. | |
503 | */ | |
504 | if (unlikely(!action)) | |
505 | goto out; | |
506 | ||
507 | /* | |
508 | * Edge triggered interrupts need to remember | |
509 | * pending events. | |
510 | * This applies to any hw interrupts that allow a second | |
511 | * instance of the same irq to arrive while we are in do_IRQ | |
512 | * or in the handler. But the code here only handles the _second_ | |
513 | * instance of the irq, not the third or fourth. So it is mostly | |
514 | * useful for irq hardware that does not mask cleanly in an | |
515 | * SMP environment. | |
516 | */ | |
517 | for (;;) { | |
518 | irqreturn_t action_ret; | |
519 | ||
520 | spin_unlock(&desc->lock); | |
521 | ||
7d12e780 | 522 | action_ret = handle_IRQ_event(irq, action); |
1da177e4 | 523 | if (!noirqdebug) |
7d12e780 | 524 | note_interrupt(irq, desc, action_ret); |
b42172fc LT |
525 | |
526 | spin_lock(&desc->lock); | |
1da177e4 LT |
527 | if (likely(!(desc->status & IRQ_PENDING))) |
528 | break; | |
529 | desc->status &= ~IRQ_PENDING; | |
530 | } | |
531 | desc->status &= ~IRQ_INPROGRESS; | |
532 | ||
533 | out: | |
534 | /* | |
535 | * The ->end() handler has to deal with interrupts which got | |
536 | * disabled while the handler was running. | |
537 | */ | |
d1bef4ed | 538 | desc->chip->end(irq); |
1da177e4 LT |
539 | spin_unlock(&desc->lock); |
540 | ||
541 | return 1; | |
542 | } | |
af8c65b5 | 543 | #endif |
1da177e4 | 544 | |
243c7621 IM |
545 | void early_init_irq_lock_class(void) |
546 | { | |
10e58084 | 547 | struct irq_desc *desc; |
243c7621 IM |
548 | int i; |
549 | ||
0b8f1efa | 550 | for_each_irq_desc(i, desc) { |
10e58084 | 551 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
0b8f1efa | 552 | } |
0b8f1efa | 553 | } |
0b8f1efa | 554 | |
0b8f1efa YL |
555 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
556 | { | |
557 | struct irq_desc *desc = irq_to_desc(irq); | |
26ddd8d5 | 558 | return desc ? desc->kstat_irqs[cpu] : 0; |
243c7621 | 559 | } |
0b8f1efa YL |
560 | EXPORT_SYMBOL(kstat_irqs_cpu); |
561 |