Commit | Line | Data |
---|---|---|
dd87eb3a TG |
1 | /* |
2 | * linux/kernel/irq/chip.c | |
3 | * | |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | |
5 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | |
6 | * | |
7 | * This file contains the core interrupt handling code, for irq-chip | |
8 | * based architectures. | |
9 | * | |
10 | * Detailed information is available in Documentation/DocBook/genericirq | |
11 | */ | |
12 | ||
13 | #include <linux/irq.h> | |
7fe3730d | 14 | #include <linux/msi.h> |
dd87eb3a TG |
15 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/kernel_stat.h> | |
18 | ||
19 | #include "internals.h" | |
20 | ||
21 | /** | |
a0cd9ca2 | 22 | * irq_set_chip - set the irq chip for an irq |
dd87eb3a TG |
23 | * @irq: irq number |
24 | * @chip: pointer to irq chip description structure | |
25 | */ | |
a0cd9ca2 | 26 | int irq_set_chip(unsigned int irq, struct irq_chip *chip) |
dd87eb3a | 27 | { |
dd87eb3a | 28 | unsigned long flags; |
31d9d9b6 | 29 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 30 | |
02725e74 | 31 | if (!desc) |
dd87eb3a | 32 | return -EINVAL; |
dd87eb3a TG |
33 | |
34 | if (!chip) | |
35 | chip = &no_irq_chip; | |
36 | ||
6b8ff312 | 37 | desc->irq_data.chip = chip; |
02725e74 | 38 | irq_put_desc_unlock(desc, flags); |
d72274e5 DD |
39 | /* |
40 | * For !CONFIG_SPARSE_IRQ make the irq show up in | |
41 | * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is | |
42 | * already marked, and this call is harmless. | |
43 | */ | |
44 | irq_reserve_irq(irq); | |
dd87eb3a TG |
45 | return 0; |
46 | } | |
a0cd9ca2 | 47 | EXPORT_SYMBOL(irq_set_chip); |
dd87eb3a TG |
48 | |
49 | /** | |
a0cd9ca2 | 50 | * irq_set_type - set the irq trigger type for an irq |
dd87eb3a | 51 | * @irq: irq number |
0c5d1eb7 | 52 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h |
dd87eb3a | 53 | */ |
a0cd9ca2 | 54 | int irq_set_irq_type(unsigned int irq, unsigned int type) |
dd87eb3a | 55 | { |
dd87eb3a | 56 | unsigned long flags; |
31d9d9b6 | 57 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
02725e74 | 58 | int ret = 0; |
dd87eb3a | 59 | |
02725e74 TG |
60 | if (!desc) |
61 | return -EINVAL; | |
dd87eb3a | 62 | |
f2b662da | 63 | type &= IRQ_TYPE_SENSE_MASK; |
02725e74 TG |
64 | if (type != IRQ_TYPE_NONE) |
65 | ret = __irq_set_trigger(desc, irq, type); | |
66 | irq_put_desc_busunlock(desc, flags); | |
dd87eb3a TG |
67 | return ret; |
68 | } | |
a0cd9ca2 | 69 | EXPORT_SYMBOL(irq_set_irq_type); |
dd87eb3a TG |
70 | |
71 | /** | |
a0cd9ca2 | 72 | * irq_set_handler_data - set irq handler data for an irq |
dd87eb3a TG |
73 | * @irq: Interrupt number |
74 | * @data: Pointer to interrupt specific data | |
75 | * | |
76 | * Set the hardware irq controller data for an irq | |
77 | */ | |
a0cd9ca2 | 78 | int irq_set_handler_data(unsigned int irq, void *data) |
dd87eb3a | 79 | { |
dd87eb3a | 80 | unsigned long flags; |
31d9d9b6 | 81 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 82 | |
02725e74 | 83 | if (!desc) |
dd87eb3a | 84 | return -EINVAL; |
6b8ff312 | 85 | desc->irq_data.handler_data = data; |
02725e74 | 86 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
87 | return 0; |
88 | } | |
a0cd9ca2 | 89 | EXPORT_SYMBOL(irq_set_handler_data); |
dd87eb3a | 90 | |
5b912c10 | 91 | /** |
a0cd9ca2 | 92 | * irq_set_msi_desc - set MSI descriptor data for an irq |
5b912c10 | 93 | * @irq: Interrupt number |
472900b8 | 94 | * @entry: Pointer to MSI descriptor data |
5b912c10 | 95 | * |
24b26d42 | 96 | * Set the MSI descriptor entry for an irq |
5b912c10 | 97 | */ |
a0cd9ca2 | 98 | int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) |
5b912c10 | 99 | { |
5b912c10 | 100 | unsigned long flags; |
31d9d9b6 | 101 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
5b912c10 | 102 | |
02725e74 | 103 | if (!desc) |
5b912c10 | 104 | return -EINVAL; |
6b8ff312 | 105 | desc->irq_data.msi_desc = entry; |
7fe3730d ME |
106 | if (entry) |
107 | entry->irq = irq; | |
02725e74 | 108 | irq_put_desc_unlock(desc, flags); |
5b912c10 EB |
109 | return 0; |
110 | } | |
111 | ||
dd87eb3a | 112 | /** |
a0cd9ca2 | 113 | * irq_set_chip_data - set irq chip data for an irq |
dd87eb3a TG |
114 | * @irq: Interrupt number |
115 | * @data: Pointer to chip specific data | |
116 | * | |
117 | * Set the hardware irq chip data for an irq | |
118 | */ | |
a0cd9ca2 | 119 | int irq_set_chip_data(unsigned int irq, void *data) |
dd87eb3a | 120 | { |
dd87eb3a | 121 | unsigned long flags; |
31d9d9b6 | 122 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 123 | |
02725e74 | 124 | if (!desc) |
dd87eb3a | 125 | return -EINVAL; |
6b8ff312 | 126 | desc->irq_data.chip_data = data; |
02725e74 | 127 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
128 | return 0; |
129 | } | |
a0cd9ca2 | 130 | EXPORT_SYMBOL(irq_set_chip_data); |
dd87eb3a | 131 | |
f303a6dd TG |
132 | struct irq_data *irq_get_irq_data(unsigned int irq) |
133 | { | |
134 | struct irq_desc *desc = irq_to_desc(irq); | |
135 | ||
136 | return desc ? &desc->irq_data : NULL; | |
137 | } | |
138 | EXPORT_SYMBOL_GPL(irq_get_irq_data); | |
139 | ||
c1594b77 TG |
140 | static void irq_state_clr_disabled(struct irq_desc *desc) |
141 | { | |
801a0e9a | 142 | irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); |
c1594b77 TG |
143 | } |
144 | ||
145 | static void irq_state_set_disabled(struct irq_desc *desc) | |
146 | { | |
801a0e9a | 147 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
c1594b77 TG |
148 | } |
149 | ||
6e40262e TG |
150 | static void irq_state_clr_masked(struct irq_desc *desc) |
151 | { | |
32f4125e | 152 | irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); |
6e40262e TG |
153 | } |
154 | ||
155 | static void irq_state_set_masked(struct irq_desc *desc) | |
156 | { | |
32f4125e | 157 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
6e40262e TG |
158 | } |
159 | ||
46999238 TG |
160 | int irq_startup(struct irq_desc *desc) |
161 | { | |
c1594b77 | 162 | irq_state_clr_disabled(desc); |
46999238 TG |
163 | desc->depth = 0; |
164 | ||
3aae994f TG |
165 | if (desc->irq_data.chip->irq_startup) { |
166 | int ret = desc->irq_data.chip->irq_startup(&desc->irq_data); | |
6e40262e | 167 | irq_state_clr_masked(desc); |
3aae994f TG |
168 | return ret; |
169 | } | |
46999238 | 170 | |
87923470 | 171 | irq_enable(desc); |
46999238 TG |
172 | return 0; |
173 | } | |
174 | ||
175 | void irq_shutdown(struct irq_desc *desc) | |
176 | { | |
c1594b77 | 177 | irq_state_set_disabled(desc); |
46999238 | 178 | desc->depth = 1; |
50f7c032 TG |
179 | if (desc->irq_data.chip->irq_shutdown) |
180 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | |
ed585a65 | 181 | else if (desc->irq_data.chip->irq_disable) |
50f7c032 TG |
182 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
183 | else | |
184 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
6e40262e | 185 | irq_state_set_masked(desc); |
46999238 TG |
186 | } |
187 | ||
87923470 TG |
188 | void irq_enable(struct irq_desc *desc) |
189 | { | |
c1594b77 | 190 | irq_state_clr_disabled(desc); |
50f7c032 TG |
191 | if (desc->irq_data.chip->irq_enable) |
192 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
193 | else | |
194 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
6e40262e | 195 | irq_state_clr_masked(desc); |
dd87eb3a TG |
196 | } |
197 | ||
50f7c032 | 198 | void irq_disable(struct irq_desc *desc) |
89d694b9 | 199 | { |
c1594b77 | 200 | irq_state_set_disabled(desc); |
50f7c032 TG |
201 | if (desc->irq_data.chip->irq_disable) { |
202 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
a61d8258 | 203 | irq_state_set_masked(desc); |
50f7c032 | 204 | } |
89d694b9 TG |
205 | } |
206 | ||
31d9d9b6 MZ |
207 | void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) |
208 | { | |
209 | if (desc->irq_data.chip->irq_enable) | |
210 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
211 | else | |
212 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
213 | cpumask_set_cpu(cpu, desc->percpu_enabled); | |
214 | } | |
215 | ||
216 | void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) | |
217 | { | |
218 | if (desc->irq_data.chip->irq_disable) | |
219 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
220 | else | |
221 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
222 | cpumask_clear_cpu(cpu, desc->percpu_enabled); | |
223 | } | |
224 | ||
9205e31d | 225 | static inline void mask_ack_irq(struct irq_desc *desc) |
dd87eb3a | 226 | { |
9205e31d TG |
227 | if (desc->irq_data.chip->irq_mask_ack) |
228 | desc->irq_data.chip->irq_mask_ack(&desc->irq_data); | |
dd87eb3a | 229 | else { |
e2c0f8ff | 230 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
22a49163 TG |
231 | if (desc->irq_data.chip->irq_ack) |
232 | desc->irq_data.chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 233 | } |
6e40262e | 234 | irq_state_set_masked(desc); |
0b1adaa0 TG |
235 | } |
236 | ||
d4d5e089 | 237 | void mask_irq(struct irq_desc *desc) |
0b1adaa0 | 238 | { |
e2c0f8ff TG |
239 | if (desc->irq_data.chip->irq_mask) { |
240 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
6e40262e | 241 | irq_state_set_masked(desc); |
0b1adaa0 TG |
242 | } |
243 | } | |
244 | ||
d4d5e089 | 245 | void unmask_irq(struct irq_desc *desc) |
0b1adaa0 | 246 | { |
0eda58b7 TG |
247 | if (desc->irq_data.chip->irq_unmask) { |
248 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
6e40262e | 249 | irq_state_clr_masked(desc); |
0b1adaa0 | 250 | } |
dd87eb3a TG |
251 | } |
252 | ||
399b5da2 TG |
253 | /* |
254 | * handle_nested_irq - Handle a nested irq from a irq thread | |
255 | * @irq: the interrupt number | |
256 | * | |
257 | * Handle interrupts which are nested into a threaded interrupt | |
258 | * handler. The handler function is called inside the calling | |
259 | * threads context. | |
260 | */ | |
261 | void handle_nested_irq(unsigned int irq) | |
262 | { | |
263 | struct irq_desc *desc = irq_to_desc(irq); | |
264 | struct irqaction *action; | |
265 | irqreturn_t action_ret; | |
266 | ||
267 | might_sleep(); | |
268 | ||
239007b8 | 269 | raw_spin_lock_irq(&desc->lock); |
399b5da2 TG |
270 | |
271 | kstat_incr_irqs_this_cpu(irq, desc); | |
272 | ||
273 | action = desc->action; | |
32f4125e | 274 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) |
399b5da2 TG |
275 | goto out_unlock; |
276 | ||
32f4125e | 277 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
239007b8 | 278 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 TG |
279 | |
280 | action_ret = action->thread_fn(action->irq, action->dev_id); | |
281 | if (!noirqdebug) | |
282 | note_interrupt(irq, desc, action_ret); | |
283 | ||
239007b8 | 284 | raw_spin_lock_irq(&desc->lock); |
32f4125e | 285 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
399b5da2 TG |
286 | |
287 | out_unlock: | |
239007b8 | 288 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 TG |
289 | } |
290 | EXPORT_SYMBOL_GPL(handle_nested_irq); | |
291 | ||
fe200ae4 TG |
292 | static bool irq_check_poll(struct irq_desc *desc) |
293 | { | |
6954b75b | 294 | if (!(desc->istate & IRQS_POLL_INPROGRESS)) |
fe200ae4 TG |
295 | return false; |
296 | return irq_wait_for_poll(desc); | |
297 | } | |
298 | ||
dd87eb3a TG |
299 | /** |
300 | * handle_simple_irq - Simple and software-decoded IRQs. | |
301 | * @irq: the interrupt number | |
302 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
303 | * |
304 | * Simple interrupts are either sent from a demultiplexing interrupt | |
305 | * handler or come from hardware, where no interrupt hardware control | |
306 | * is necessary. | |
307 | * | |
308 | * Note: The caller is expected to handle the ack, clear, mask and | |
309 | * unmask issues if necessary. | |
310 | */ | |
7ad5b3a5 | 311 | void |
7d12e780 | 312 | handle_simple_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 313 | { |
239007b8 | 314 | raw_spin_lock(&desc->lock); |
dd87eb3a | 315 | |
32f4125e | 316 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
fe200ae4 TG |
317 | if (!irq_check_poll(desc)) |
318 | goto out_unlock; | |
319 | ||
163ef309 | 320 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
d6c88a50 | 321 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a | 322 | |
32f4125e | 323 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) |
dd87eb3a TG |
324 | goto out_unlock; |
325 | ||
107781e7 | 326 | handle_irq_event(desc); |
dd87eb3a | 327 | |
dd87eb3a | 328 | out_unlock: |
239007b8 | 329 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 330 | } |
edf76f83 | 331 | EXPORT_SYMBOL_GPL(handle_simple_irq); |
dd87eb3a TG |
332 | |
333 | /** | |
334 | * handle_level_irq - Level type irq handler | |
335 | * @irq: the interrupt number | |
336 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
337 | * |
338 | * Level type interrupts are active as long as the hardware line has | |
339 | * the active level. This may require to mask the interrupt and unmask | |
340 | * it after the associated handler has acknowledged the device, so the | |
341 | * interrupt line is back to inactive. | |
342 | */ | |
7ad5b3a5 | 343 | void |
7d12e780 | 344 | handle_level_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 345 | { |
239007b8 | 346 | raw_spin_lock(&desc->lock); |
9205e31d | 347 | mask_ack_irq(desc); |
dd87eb3a | 348 | |
32f4125e | 349 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
fe200ae4 TG |
350 | if (!irq_check_poll(desc)) |
351 | goto out_unlock; | |
352 | ||
163ef309 | 353 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
d6c88a50 | 354 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a TG |
355 | |
356 | /* | |
357 | * If its disabled or no action available | |
358 | * keep it masked and get out of here | |
359 | */ | |
32f4125e | 360 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) |
86998aa6 | 361 | goto out_unlock; |
dd87eb3a | 362 | |
1529866c | 363 | handle_irq_event(desc); |
b25c340c | 364 | |
32f4125e | 365 | if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT)) |
0eda58b7 | 366 | unmask_irq(desc); |
86998aa6 | 367 | out_unlock: |
239007b8 | 368 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 369 | } |
14819ea1 | 370 | EXPORT_SYMBOL_GPL(handle_level_irq); |
dd87eb3a | 371 | |
78129576 TG |
372 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI |
373 | static inline void preflow_handler(struct irq_desc *desc) | |
374 | { | |
375 | if (desc->preflow_handler) | |
376 | desc->preflow_handler(&desc->irq_data); | |
377 | } | |
378 | #else | |
379 | static inline void preflow_handler(struct irq_desc *desc) { } | |
380 | #endif | |
381 | ||
dd87eb3a | 382 | /** |
47c2a3aa | 383 | * handle_fasteoi_irq - irq handler for transparent controllers |
dd87eb3a TG |
384 | * @irq: the interrupt number |
385 | * @desc: the interrupt description structure for this irq | |
dd87eb3a | 386 | * |
47c2a3aa | 387 | * Only a single callback will be issued to the chip: an ->eoi() |
dd87eb3a TG |
388 | * call when the interrupt has been serviced. This enables support |
389 | * for modern forms of interrupt handlers, which handle the flow | |
390 | * details in hardware, transparently. | |
391 | */ | |
7ad5b3a5 | 392 | void |
7d12e780 | 393 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 394 | { |
239007b8 | 395 | raw_spin_lock(&desc->lock); |
dd87eb3a | 396 | |
32f4125e | 397 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
fe200ae4 TG |
398 | if (!irq_check_poll(desc)) |
399 | goto out; | |
dd87eb3a | 400 | |
163ef309 | 401 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
d6c88a50 | 402 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a TG |
403 | |
404 | /* | |
405 | * If its disabled or no action available | |
76d21601 | 406 | * then mask it and get out of here: |
dd87eb3a | 407 | */ |
32f4125e | 408 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
2a0d6fb3 | 409 | desc->istate |= IRQS_PENDING; |
e2c0f8ff | 410 | mask_irq(desc); |
dd87eb3a | 411 | goto out; |
98bb244b | 412 | } |
c69e3758 TG |
413 | |
414 | if (desc->istate & IRQS_ONESHOT) | |
415 | mask_irq(desc); | |
416 | ||
78129576 | 417 | preflow_handler(desc); |
a7ae4de5 | 418 | handle_irq_event(desc); |
77694b40 TG |
419 | |
420 | out_eoi: | |
0c5c1557 | 421 | desc->irq_data.chip->irq_eoi(&desc->irq_data); |
77694b40 | 422 | out_unlock: |
239007b8 | 423 | raw_spin_unlock(&desc->lock); |
77694b40 TG |
424 | return; |
425 | out: | |
426 | if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) | |
427 | goto out_eoi; | |
428 | goto out_unlock; | |
dd87eb3a TG |
429 | } |
430 | ||
431 | /** | |
432 | * handle_edge_irq - edge type IRQ handler | |
433 | * @irq: the interrupt number | |
434 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
435 | * |
436 | * Interrupt occures on the falling and/or rising edge of a hardware | |
25985edc | 437 | * signal. The occurrence is latched into the irq controller hardware |
dd87eb3a TG |
438 | * and must be acked in order to be reenabled. After the ack another |
439 | * interrupt can happen on the same source even before the first one | |
dfff0615 | 440 | * is handled by the associated event handler. If this happens it |
dd87eb3a TG |
441 | * might be necessary to disable (mask) the interrupt depending on the |
442 | * controller hardware. This requires to reenable the interrupt inside | |
443 | * of the loop which handles the interrupts which have arrived while | |
444 | * the handler was running. If all pending interrupts are handled, the | |
445 | * loop is left. | |
446 | */ | |
7ad5b3a5 | 447 | void |
7d12e780 | 448 | handle_edge_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 449 | { |
239007b8 | 450 | raw_spin_lock(&desc->lock); |
dd87eb3a | 451 | |
163ef309 | 452 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
dd87eb3a TG |
453 | /* |
454 | * If we're currently running this IRQ, or its disabled, | |
455 | * we shouldn't process the IRQ. Mark it pending, handle | |
456 | * the necessary masking and go out | |
457 | */ | |
32f4125e TG |
458 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || |
459 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | |
fe200ae4 | 460 | if (!irq_check_poll(desc)) { |
2a0d6fb3 | 461 | desc->istate |= IRQS_PENDING; |
fe200ae4 TG |
462 | mask_ack_irq(desc); |
463 | goto out_unlock; | |
464 | } | |
dd87eb3a | 465 | } |
d6c88a50 | 466 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a TG |
467 | |
468 | /* Start handling the irq */ | |
22a49163 | 469 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
dd87eb3a | 470 | |
dd87eb3a | 471 | do { |
a60a5dc2 | 472 | if (unlikely(!desc->action)) { |
e2c0f8ff | 473 | mask_irq(desc); |
dd87eb3a TG |
474 | goto out_unlock; |
475 | } | |
476 | ||
477 | /* | |
478 | * When another irq arrived while we were handling | |
479 | * one, we could have masked the irq. | |
480 | * Renable it, if it was not disabled in meantime. | |
481 | */ | |
2a0d6fb3 | 482 | if (unlikely(desc->istate & IRQS_PENDING)) { |
32f4125e TG |
483 | if (!irqd_irq_disabled(&desc->irq_data) && |
484 | irqd_irq_masked(&desc->irq_data)) | |
c1594b77 | 485 | unmask_irq(desc); |
dd87eb3a TG |
486 | } |
487 | ||
a60a5dc2 | 488 | handle_irq_event(desc); |
dd87eb3a | 489 | |
2a0d6fb3 | 490 | } while ((desc->istate & IRQS_PENDING) && |
32f4125e | 491 | !irqd_irq_disabled(&desc->irq_data)); |
dd87eb3a | 492 | |
dd87eb3a | 493 | out_unlock: |
239007b8 | 494 | raw_spin_unlock(&desc->lock); |
dd87eb3a TG |
495 | } |
496 | ||
0521c8fb TG |
497 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER |
498 | /** | |
499 | * handle_edge_eoi_irq - edge eoi type IRQ handler | |
500 | * @irq: the interrupt number | |
501 | * @desc: the interrupt description structure for this irq | |
502 | * | |
503 | * Similar as the above handle_edge_irq, but using eoi and w/o the | |
504 | * mask/unmask logic. | |
505 | */ | |
506 | void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) | |
507 | { | |
508 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
509 | ||
510 | raw_spin_lock(&desc->lock); | |
511 | ||
512 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | |
513 | /* | |
514 | * If we're currently running this IRQ, or its disabled, | |
515 | * we shouldn't process the IRQ. Mark it pending, handle | |
516 | * the necessary masking and go out | |
517 | */ | |
518 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || | |
519 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | |
520 | if (!irq_check_poll(desc)) { | |
521 | desc->istate |= IRQS_PENDING; | |
522 | goto out_eoi; | |
523 | } | |
524 | } | |
525 | kstat_incr_irqs_this_cpu(irq, desc); | |
526 | ||
527 | do { | |
528 | if (unlikely(!desc->action)) | |
529 | goto out_eoi; | |
530 | ||
531 | handle_irq_event(desc); | |
532 | ||
533 | } while ((desc->istate & IRQS_PENDING) && | |
534 | !irqd_irq_disabled(&desc->irq_data)); | |
535 | ||
ac0e0447 | 536 | out_eoi: |
0521c8fb TG |
537 | chip->irq_eoi(&desc->irq_data); |
538 | raw_spin_unlock(&desc->lock); | |
539 | } | |
540 | #endif | |
541 | ||
dd87eb3a | 542 | /** |
24b26d42 | 543 | * handle_percpu_irq - Per CPU local irq handler |
dd87eb3a TG |
544 | * @irq: the interrupt number |
545 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
546 | * |
547 | * Per CPU interrupts on SMP machines without locking requirements | |
548 | */ | |
7ad5b3a5 | 549 | void |
7d12e780 | 550 | handle_percpu_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 551 | { |
35e857cb | 552 | struct irq_chip *chip = irq_desc_get_chip(desc); |
dd87eb3a | 553 | |
d6c88a50 | 554 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a | 555 | |
849f061c TG |
556 | if (chip->irq_ack) |
557 | chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 558 | |
849f061c | 559 | handle_irq_event_percpu(desc, desc->action); |
dd87eb3a | 560 | |
849f061c TG |
561 | if (chip->irq_eoi) |
562 | chip->irq_eoi(&desc->irq_data); | |
dd87eb3a TG |
563 | } |
564 | ||
31d9d9b6 MZ |
565 | /** |
566 | * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids | |
567 | * @irq: the interrupt number | |
568 | * @desc: the interrupt description structure for this irq | |
569 | * | |
570 | * Per CPU interrupts on SMP machines without locking requirements. Same as | |
571 | * handle_percpu_irq() above but with the following extras: | |
572 | * | |
573 | * action->percpu_dev_id is a pointer to percpu variables which | |
574 | * contain the real device id for the cpu on which this handler is | |
575 | * called | |
576 | */ | |
577 | void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) | |
578 | { | |
579 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
580 | struct irqaction *action = desc->action; | |
581 | void *dev_id = __this_cpu_ptr(action->percpu_dev_id); | |
582 | irqreturn_t res; | |
583 | ||
584 | kstat_incr_irqs_this_cpu(irq, desc); | |
585 | ||
586 | if (chip->irq_ack) | |
587 | chip->irq_ack(&desc->irq_data); | |
588 | ||
589 | trace_irq_handler_entry(irq, action); | |
590 | res = action->handler(irq, dev_id); | |
591 | trace_irq_handler_exit(irq, action, res); | |
592 | ||
593 | if (chip->irq_eoi) | |
594 | chip->irq_eoi(&desc->irq_data); | |
595 | } | |
596 | ||
dd87eb3a | 597 | void |
3836ca08 | 598 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
a460e745 | 599 | const char *name) |
dd87eb3a | 600 | { |
dd87eb3a | 601 | unsigned long flags; |
31d9d9b6 | 602 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); |
dd87eb3a | 603 | |
02725e74 | 604 | if (!desc) |
dd87eb3a | 605 | return; |
dd87eb3a | 606 | |
091738a2 | 607 | if (!handle) { |
dd87eb3a | 608 | handle = handle_bad_irq; |
091738a2 TG |
609 | } else { |
610 | if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) | |
02725e74 | 611 | goto out; |
f8b5473f | 612 | } |
dd87eb3a | 613 | |
dd87eb3a TG |
614 | /* Uninstall? */ |
615 | if (handle == handle_bad_irq) { | |
6b8ff312 | 616 | if (desc->irq_data.chip != &no_irq_chip) |
9205e31d | 617 | mask_ack_irq(desc); |
801a0e9a | 618 | irq_state_set_disabled(desc); |
dd87eb3a TG |
619 | desc->depth = 1; |
620 | } | |
621 | desc->handle_irq = handle; | |
a460e745 | 622 | desc->name = name; |
dd87eb3a TG |
623 | |
624 | if (handle != handle_bad_irq && is_chained) { | |
1ccb4e61 TG |
625 | irq_settings_set_noprobe(desc); |
626 | irq_settings_set_norequest(desc); | |
7f1b1244 | 627 | irq_settings_set_nothread(desc); |
46999238 | 628 | irq_startup(desc); |
dd87eb3a | 629 | } |
02725e74 TG |
630 | out: |
631 | irq_put_desc_busunlock(desc, flags); | |
dd87eb3a | 632 | } |
3836ca08 | 633 | EXPORT_SYMBOL_GPL(__irq_set_handler); |
dd87eb3a TG |
634 | |
635 | void | |
3836ca08 | 636 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
a460e745 | 637 | irq_flow_handler_t handle, const char *name) |
dd87eb3a | 638 | { |
35e857cb | 639 | irq_set_chip(irq, chip); |
3836ca08 | 640 | __irq_set_handler(irq, handle, 0, name); |
dd87eb3a | 641 | } |
46f4f8f6 | 642 | |
44247184 | 643 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
46f4f8f6 | 644 | { |
46f4f8f6 | 645 | unsigned long flags; |
31d9d9b6 | 646 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
46f4f8f6 | 647 | |
44247184 | 648 | if (!desc) |
46f4f8f6 | 649 | return; |
a005677b TG |
650 | irq_settings_clr_and_set(desc, clr, set); |
651 | ||
876dbd4c | 652 | irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | |
e1ef8241 | 653 | IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); |
a005677b TG |
654 | if (irq_settings_has_no_balance_set(desc)) |
655 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | |
656 | if (irq_settings_is_per_cpu(desc)) | |
657 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | |
e1ef8241 TG |
658 | if (irq_settings_can_move_pcntxt(desc)) |
659 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); | |
0ef5ca1e TG |
660 | if (irq_settings_is_level(desc)) |
661 | irqd_set(&desc->irq_data, IRQD_LEVEL); | |
a005677b | 662 | |
876dbd4c TG |
663 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); |
664 | ||
02725e74 | 665 | irq_put_desc_unlock(desc, flags); |
46f4f8f6 | 666 | } |
edf76f83 | 667 | EXPORT_SYMBOL_GPL(irq_modify_status); |
0fdb4b25 DD |
668 | |
669 | /** | |
670 | * irq_cpu_online - Invoke all irq_cpu_online functions. | |
671 | * | |
672 | * Iterate through all irqs and invoke the chip.irq_cpu_online() | |
673 | * for each. | |
674 | */ | |
675 | void irq_cpu_online(void) | |
676 | { | |
677 | struct irq_desc *desc; | |
678 | struct irq_chip *chip; | |
679 | unsigned long flags; | |
680 | unsigned int irq; | |
681 | ||
682 | for_each_active_irq(irq) { | |
683 | desc = irq_to_desc(irq); | |
684 | if (!desc) | |
685 | continue; | |
686 | ||
687 | raw_spin_lock_irqsave(&desc->lock, flags); | |
688 | ||
689 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
690 | if (chip && chip->irq_cpu_online && |
691 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 692 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
693 | chip->irq_cpu_online(&desc->irq_data); |
694 | ||
695 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
696 | } | |
697 | } | |
698 | ||
699 | /** | |
700 | * irq_cpu_offline - Invoke all irq_cpu_offline functions. | |
701 | * | |
702 | * Iterate through all irqs and invoke the chip.irq_cpu_offline() | |
703 | * for each. | |
704 | */ | |
705 | void irq_cpu_offline(void) | |
706 | { | |
707 | struct irq_desc *desc; | |
708 | struct irq_chip *chip; | |
709 | unsigned long flags; | |
710 | unsigned int irq; | |
711 | ||
712 | for_each_active_irq(irq) { | |
713 | desc = irq_to_desc(irq); | |
714 | if (!desc) | |
715 | continue; | |
716 | ||
717 | raw_spin_lock_irqsave(&desc->lock, flags); | |
718 | ||
719 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
720 | if (chip && chip->irq_cpu_offline && |
721 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 722 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
723 | chip->irq_cpu_offline(&desc->irq_data); |
724 | ||
725 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
726 | } | |
727 | } |