Commit | Line | Data |
---|---|---|
dd87eb3a TG |
1 | /* |
2 | * linux/kernel/irq/chip.c | |
3 | * | |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | |
5 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | |
6 | * | |
7 | * This file contains the core interrupt handling code, for irq-chip | |
8 | * based architectures. | |
9 | * | |
10 | * Detailed information is available in Documentation/DocBook/genericirq | |
11 | */ | |
12 | ||
13 | #include <linux/irq.h> | |
7fe3730d | 14 | #include <linux/msi.h> |
dd87eb3a TG |
15 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/kernel_stat.h> | |
18 | ||
f069686e SR |
19 | #include <trace/events/irq.h> |
20 | ||
dd87eb3a TG |
21 | #include "internals.h" |
22 | ||
23 | /** | |
a0cd9ca2 | 24 | * irq_set_chip - set the irq chip for an irq |
dd87eb3a TG |
25 | * @irq: irq number |
26 | * @chip: pointer to irq chip description structure | |
27 | */ | |
a0cd9ca2 | 28 | int irq_set_chip(unsigned int irq, struct irq_chip *chip) |
dd87eb3a | 29 | { |
dd87eb3a | 30 | unsigned long flags; |
31d9d9b6 | 31 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 32 | |
02725e74 | 33 | if (!desc) |
dd87eb3a | 34 | return -EINVAL; |
dd87eb3a TG |
35 | |
36 | if (!chip) | |
37 | chip = &no_irq_chip; | |
38 | ||
6b8ff312 | 39 | desc->irq_data.chip = chip; |
02725e74 | 40 | irq_put_desc_unlock(desc, flags); |
d72274e5 DD |
41 | /* |
42 | * For !CONFIG_SPARSE_IRQ make the irq show up in | |
43 | * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is | |
44 | * already marked, and this call is harmless. | |
45 | */ | |
46 | irq_reserve_irq(irq); | |
dd87eb3a TG |
47 | return 0; |
48 | } | |
a0cd9ca2 | 49 | EXPORT_SYMBOL(irq_set_chip); |
dd87eb3a TG |
50 | |
51 | /** | |
a0cd9ca2 | 52 | * irq_set_type - set the irq trigger type for an irq |
dd87eb3a | 53 | * @irq: irq number |
0c5d1eb7 | 54 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h |
dd87eb3a | 55 | */ |
a0cd9ca2 | 56 | int irq_set_irq_type(unsigned int irq, unsigned int type) |
dd87eb3a | 57 | { |
dd87eb3a | 58 | unsigned long flags; |
31d9d9b6 | 59 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
02725e74 | 60 | int ret = 0; |
dd87eb3a | 61 | |
02725e74 TG |
62 | if (!desc) |
63 | return -EINVAL; | |
dd87eb3a | 64 | |
f2b662da | 65 | type &= IRQ_TYPE_SENSE_MASK; |
a09b659c | 66 | ret = __irq_set_trigger(desc, irq, type); |
02725e74 | 67 | irq_put_desc_busunlock(desc, flags); |
dd87eb3a TG |
68 | return ret; |
69 | } | |
a0cd9ca2 | 70 | EXPORT_SYMBOL(irq_set_irq_type); |
dd87eb3a TG |
71 | |
72 | /** | |
a0cd9ca2 | 73 | * irq_set_handler_data - set irq handler data for an irq |
dd87eb3a TG |
74 | * @irq: Interrupt number |
75 | * @data: Pointer to interrupt specific data | |
76 | * | |
77 | * Set the hardware irq controller data for an irq | |
78 | */ | |
a0cd9ca2 | 79 | int irq_set_handler_data(unsigned int irq, void *data) |
dd87eb3a | 80 | { |
dd87eb3a | 81 | unsigned long flags; |
31d9d9b6 | 82 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 83 | |
02725e74 | 84 | if (!desc) |
dd87eb3a | 85 | return -EINVAL; |
6b8ff312 | 86 | desc->irq_data.handler_data = data; |
02725e74 | 87 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
88 | return 0; |
89 | } | |
a0cd9ca2 | 90 | EXPORT_SYMBOL(irq_set_handler_data); |
dd87eb3a | 91 | |
5b912c10 | 92 | /** |
a0cd9ca2 | 93 | * irq_set_msi_desc - set MSI descriptor data for an irq |
5b912c10 | 94 | * @irq: Interrupt number |
472900b8 | 95 | * @entry: Pointer to MSI descriptor data |
5b912c10 | 96 | * |
24b26d42 | 97 | * Set the MSI descriptor entry for an irq |
5b912c10 | 98 | */ |
a0cd9ca2 | 99 | int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) |
5b912c10 | 100 | { |
5b912c10 | 101 | unsigned long flags; |
31d9d9b6 | 102 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
5b912c10 | 103 | |
02725e74 | 104 | if (!desc) |
5b912c10 | 105 | return -EINVAL; |
6b8ff312 | 106 | desc->irq_data.msi_desc = entry; |
7fe3730d ME |
107 | if (entry) |
108 | entry->irq = irq; | |
02725e74 | 109 | irq_put_desc_unlock(desc, flags); |
5b912c10 EB |
110 | return 0; |
111 | } | |
112 | ||
dd87eb3a | 113 | /** |
a0cd9ca2 | 114 | * irq_set_chip_data - set irq chip data for an irq |
dd87eb3a TG |
115 | * @irq: Interrupt number |
116 | * @data: Pointer to chip specific data | |
117 | * | |
118 | * Set the hardware irq chip data for an irq | |
119 | */ | |
a0cd9ca2 | 120 | int irq_set_chip_data(unsigned int irq, void *data) |
dd87eb3a | 121 | { |
dd87eb3a | 122 | unsigned long flags; |
31d9d9b6 | 123 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 124 | |
02725e74 | 125 | if (!desc) |
dd87eb3a | 126 | return -EINVAL; |
6b8ff312 | 127 | desc->irq_data.chip_data = data; |
02725e74 | 128 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
129 | return 0; |
130 | } | |
a0cd9ca2 | 131 | EXPORT_SYMBOL(irq_set_chip_data); |
dd87eb3a | 132 | |
f303a6dd TG |
133 | struct irq_data *irq_get_irq_data(unsigned int irq) |
134 | { | |
135 | struct irq_desc *desc = irq_to_desc(irq); | |
136 | ||
137 | return desc ? &desc->irq_data : NULL; | |
138 | } | |
139 | EXPORT_SYMBOL_GPL(irq_get_irq_data); | |
140 | ||
c1594b77 TG |
141 | static void irq_state_clr_disabled(struct irq_desc *desc) |
142 | { | |
801a0e9a | 143 | irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); |
c1594b77 TG |
144 | } |
145 | ||
146 | static void irq_state_set_disabled(struct irq_desc *desc) | |
147 | { | |
801a0e9a | 148 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
c1594b77 TG |
149 | } |
150 | ||
6e40262e TG |
151 | static void irq_state_clr_masked(struct irq_desc *desc) |
152 | { | |
32f4125e | 153 | irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); |
6e40262e TG |
154 | } |
155 | ||
156 | static void irq_state_set_masked(struct irq_desc *desc) | |
157 | { | |
32f4125e | 158 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
6e40262e TG |
159 | } |
160 | ||
b4bc724e | 161 | int irq_startup(struct irq_desc *desc, bool resend) |
46999238 | 162 | { |
b4bc724e TG |
163 | int ret = 0; |
164 | ||
c1594b77 | 165 | irq_state_clr_disabled(desc); |
46999238 TG |
166 | desc->depth = 0; |
167 | ||
3aae994f | 168 | if (desc->irq_data.chip->irq_startup) { |
b4bc724e | 169 | ret = desc->irq_data.chip->irq_startup(&desc->irq_data); |
6e40262e | 170 | irq_state_clr_masked(desc); |
b4bc724e TG |
171 | } else { |
172 | irq_enable(desc); | |
3aae994f | 173 | } |
b4bc724e TG |
174 | if (resend) |
175 | check_irq_resend(desc, desc->irq_data.irq); | |
176 | return ret; | |
46999238 TG |
177 | } |
178 | ||
179 | void irq_shutdown(struct irq_desc *desc) | |
180 | { | |
c1594b77 | 181 | irq_state_set_disabled(desc); |
46999238 | 182 | desc->depth = 1; |
50f7c032 TG |
183 | if (desc->irq_data.chip->irq_shutdown) |
184 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | |
ed585a65 | 185 | else if (desc->irq_data.chip->irq_disable) |
50f7c032 TG |
186 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
187 | else | |
188 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
6e40262e | 189 | irq_state_set_masked(desc); |
46999238 TG |
190 | } |
191 | ||
87923470 TG |
192 | void irq_enable(struct irq_desc *desc) |
193 | { | |
c1594b77 | 194 | irq_state_clr_disabled(desc); |
50f7c032 TG |
195 | if (desc->irq_data.chip->irq_enable) |
196 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
197 | else | |
198 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
6e40262e | 199 | irq_state_clr_masked(desc); |
dd87eb3a TG |
200 | } |
201 | ||
50f7c032 | 202 | void irq_disable(struct irq_desc *desc) |
89d694b9 | 203 | { |
c1594b77 | 204 | irq_state_set_disabled(desc); |
50f7c032 TG |
205 | if (desc->irq_data.chip->irq_disable) { |
206 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
a61d8258 | 207 | irq_state_set_masked(desc); |
50f7c032 | 208 | } |
89d694b9 TG |
209 | } |
210 | ||
31d9d9b6 MZ |
211 | void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) |
212 | { | |
213 | if (desc->irq_data.chip->irq_enable) | |
214 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
215 | else | |
216 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
217 | cpumask_set_cpu(cpu, desc->percpu_enabled); | |
218 | } | |
219 | ||
220 | void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) | |
221 | { | |
222 | if (desc->irq_data.chip->irq_disable) | |
223 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
224 | else | |
225 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
226 | cpumask_clear_cpu(cpu, desc->percpu_enabled); | |
227 | } | |
228 | ||
9205e31d | 229 | static inline void mask_ack_irq(struct irq_desc *desc) |
dd87eb3a | 230 | { |
9205e31d TG |
231 | if (desc->irq_data.chip->irq_mask_ack) |
232 | desc->irq_data.chip->irq_mask_ack(&desc->irq_data); | |
dd87eb3a | 233 | else { |
e2c0f8ff | 234 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
22a49163 TG |
235 | if (desc->irq_data.chip->irq_ack) |
236 | desc->irq_data.chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 237 | } |
6e40262e | 238 | irq_state_set_masked(desc); |
0b1adaa0 TG |
239 | } |
240 | ||
d4d5e089 | 241 | void mask_irq(struct irq_desc *desc) |
0b1adaa0 | 242 | { |
e2c0f8ff TG |
243 | if (desc->irq_data.chip->irq_mask) { |
244 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
6e40262e | 245 | irq_state_set_masked(desc); |
0b1adaa0 TG |
246 | } |
247 | } | |
248 | ||
d4d5e089 | 249 | void unmask_irq(struct irq_desc *desc) |
0b1adaa0 | 250 | { |
0eda58b7 TG |
251 | if (desc->irq_data.chip->irq_unmask) { |
252 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
6e40262e | 253 | irq_state_clr_masked(desc); |
0b1adaa0 | 254 | } |
dd87eb3a TG |
255 | } |
256 | ||
399b5da2 TG |
257 | /* |
258 | * handle_nested_irq - Handle a nested irq from a irq thread | |
259 | * @irq: the interrupt number | |
260 | * | |
261 | * Handle interrupts which are nested into a threaded interrupt | |
262 | * handler. The handler function is called inside the calling | |
263 | * threads context. | |
264 | */ | |
265 | void handle_nested_irq(unsigned int irq) | |
266 | { | |
267 | struct irq_desc *desc = irq_to_desc(irq); | |
268 | struct irqaction *action; | |
269 | irqreturn_t action_ret; | |
270 | ||
271 | might_sleep(); | |
272 | ||
239007b8 | 273 | raw_spin_lock_irq(&desc->lock); |
399b5da2 | 274 | |
293a7a0a | 275 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
399b5da2 TG |
276 | kstat_incr_irqs_this_cpu(irq, desc); |
277 | ||
278 | action = desc->action; | |
23812b9d NJ |
279 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { |
280 | desc->istate |= IRQS_PENDING; | |
399b5da2 | 281 | goto out_unlock; |
23812b9d | 282 | } |
399b5da2 | 283 | |
32f4125e | 284 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
239007b8 | 285 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 TG |
286 | |
287 | action_ret = action->thread_fn(action->irq, action->dev_id); | |
288 | if (!noirqdebug) | |
289 | note_interrupt(irq, desc, action_ret); | |
290 | ||
239007b8 | 291 | raw_spin_lock_irq(&desc->lock); |
32f4125e | 292 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
399b5da2 TG |
293 | |
294 | out_unlock: | |
239007b8 | 295 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 TG |
296 | } |
297 | EXPORT_SYMBOL_GPL(handle_nested_irq); | |
298 | ||
fe200ae4 TG |
299 | static bool irq_check_poll(struct irq_desc *desc) |
300 | { | |
6954b75b | 301 | if (!(desc->istate & IRQS_POLL_INPROGRESS)) |
fe200ae4 TG |
302 | return false; |
303 | return irq_wait_for_poll(desc); | |
304 | } | |
305 | ||
dd87eb3a TG |
306 | /** |
307 | * handle_simple_irq - Simple and software-decoded IRQs. | |
308 | * @irq: the interrupt number | |
309 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
310 | * |
311 | * Simple interrupts are either sent from a demultiplexing interrupt | |
312 | * handler or come from hardware, where no interrupt hardware control | |
313 | * is necessary. | |
314 | * | |
315 | * Note: The caller is expected to handle the ack, clear, mask and | |
316 | * unmask issues if necessary. | |
317 | */ | |
7ad5b3a5 | 318 | void |
7d12e780 | 319 | handle_simple_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 320 | { |
239007b8 | 321 | raw_spin_lock(&desc->lock); |
dd87eb3a | 322 | |
32f4125e | 323 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
fe200ae4 TG |
324 | if (!irq_check_poll(desc)) |
325 | goto out_unlock; | |
326 | ||
163ef309 | 327 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
d6c88a50 | 328 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a | 329 | |
23812b9d NJ |
330 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
331 | desc->istate |= IRQS_PENDING; | |
dd87eb3a | 332 | goto out_unlock; |
23812b9d | 333 | } |
dd87eb3a | 334 | |
107781e7 | 335 | handle_irq_event(desc); |
dd87eb3a | 336 | |
dd87eb3a | 337 | out_unlock: |
239007b8 | 338 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 339 | } |
edf76f83 | 340 | EXPORT_SYMBOL_GPL(handle_simple_irq); |
dd87eb3a | 341 | |
ac563761 TG |
342 | /* |
343 | * Called unconditionally from handle_level_irq() and only for oneshot | |
344 | * interrupts from handle_fasteoi_irq() | |
345 | */ | |
346 | static void cond_unmask_irq(struct irq_desc *desc) | |
347 | { | |
348 | /* | |
349 | * We need to unmask in the following cases: | |
350 | * - Standard level irq (IRQF_ONESHOT is not set) | |
351 | * - Oneshot irq which did not wake the thread (caused by a | |
352 | * spurious interrupt or a primary handler handling it | |
353 | * completely). | |
354 | */ | |
355 | if (!irqd_irq_disabled(&desc->irq_data) && | |
356 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) | |
357 | unmask_irq(desc); | |
358 | } | |
359 | ||
dd87eb3a TG |
360 | /** |
361 | * handle_level_irq - Level type irq handler | |
362 | * @irq: the interrupt number | |
363 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
364 | * |
365 | * Level type interrupts are active as long as the hardware line has | |
366 | * the active level. This may require to mask the interrupt and unmask | |
367 | * it after the associated handler has acknowledged the device, so the | |
368 | * interrupt line is back to inactive. | |
369 | */ | |
7ad5b3a5 | 370 | void |
7d12e780 | 371 | handle_level_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 372 | { |
239007b8 | 373 | raw_spin_lock(&desc->lock); |
9205e31d | 374 | mask_ack_irq(desc); |
dd87eb3a | 375 | |
32f4125e | 376 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
fe200ae4 TG |
377 | if (!irq_check_poll(desc)) |
378 | goto out_unlock; | |
379 | ||
163ef309 | 380 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
d6c88a50 | 381 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a TG |
382 | |
383 | /* | |
384 | * If its disabled or no action available | |
385 | * keep it masked and get out of here | |
386 | */ | |
d4dc0f90 TG |
387 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
388 | desc->istate |= IRQS_PENDING; | |
86998aa6 | 389 | goto out_unlock; |
d4dc0f90 | 390 | } |
dd87eb3a | 391 | |
1529866c | 392 | handle_irq_event(desc); |
b25c340c | 393 | |
ac563761 TG |
394 | cond_unmask_irq(desc); |
395 | ||
86998aa6 | 396 | out_unlock: |
239007b8 | 397 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 398 | } |
14819ea1 | 399 | EXPORT_SYMBOL_GPL(handle_level_irq); |
dd87eb3a | 400 | |
78129576 TG |
401 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI |
402 | static inline void preflow_handler(struct irq_desc *desc) | |
403 | { | |
404 | if (desc->preflow_handler) | |
405 | desc->preflow_handler(&desc->irq_data); | |
406 | } | |
407 | #else | |
408 | static inline void preflow_handler(struct irq_desc *desc) { } | |
409 | #endif | |
410 | ||
dd87eb3a | 411 | /** |
47c2a3aa | 412 | * handle_fasteoi_irq - irq handler for transparent controllers |
dd87eb3a TG |
413 | * @irq: the interrupt number |
414 | * @desc: the interrupt description structure for this irq | |
dd87eb3a | 415 | * |
47c2a3aa | 416 | * Only a single callback will be issued to the chip: an ->eoi() |
dd87eb3a TG |
417 | * call when the interrupt has been serviced. This enables support |
418 | * for modern forms of interrupt handlers, which handle the flow | |
419 | * details in hardware, transparently. | |
420 | */ | |
7ad5b3a5 | 421 | void |
7d12e780 | 422 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 423 | { |
239007b8 | 424 | raw_spin_lock(&desc->lock); |
dd87eb3a | 425 | |
32f4125e | 426 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
fe200ae4 TG |
427 | if (!irq_check_poll(desc)) |
428 | goto out; | |
dd87eb3a | 429 | |
163ef309 | 430 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
d6c88a50 | 431 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a TG |
432 | |
433 | /* | |
434 | * If its disabled or no action available | |
76d21601 | 435 | * then mask it and get out of here: |
dd87eb3a | 436 | */ |
32f4125e | 437 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
2a0d6fb3 | 438 | desc->istate |= IRQS_PENDING; |
e2c0f8ff | 439 | mask_irq(desc); |
dd87eb3a | 440 | goto out; |
98bb244b | 441 | } |
c69e3758 TG |
442 | |
443 | if (desc->istate & IRQS_ONESHOT) | |
444 | mask_irq(desc); | |
445 | ||
78129576 | 446 | preflow_handler(desc); |
a7ae4de5 | 447 | handle_irq_event(desc); |
77694b40 | 448 | |
ac563761 TG |
449 | if (desc->istate & IRQS_ONESHOT) |
450 | cond_unmask_irq(desc); | |
451 | ||
77694b40 | 452 | out_eoi: |
0c5c1557 | 453 | desc->irq_data.chip->irq_eoi(&desc->irq_data); |
77694b40 | 454 | out_unlock: |
239007b8 | 455 | raw_spin_unlock(&desc->lock); |
77694b40 TG |
456 | return; |
457 | out: | |
458 | if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) | |
459 | goto out_eoi; | |
460 | goto out_unlock; | |
dd87eb3a TG |
461 | } |
462 | ||
463 | /** | |
464 | * handle_edge_irq - edge type IRQ handler | |
465 | * @irq: the interrupt number | |
466 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
467 | * |
468 | * Interrupt occures on the falling and/or rising edge of a hardware | |
25985edc | 469 | * signal. The occurrence is latched into the irq controller hardware |
dd87eb3a TG |
470 | * and must be acked in order to be reenabled. After the ack another |
471 | * interrupt can happen on the same source even before the first one | |
dfff0615 | 472 | * is handled by the associated event handler. If this happens it |
dd87eb3a TG |
473 | * might be necessary to disable (mask) the interrupt depending on the |
474 | * controller hardware. This requires to reenable the interrupt inside | |
475 | * of the loop which handles the interrupts which have arrived while | |
476 | * the handler was running. If all pending interrupts are handled, the | |
477 | * loop is left. | |
478 | */ | |
7ad5b3a5 | 479 | void |
7d12e780 | 480 | handle_edge_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 481 | { |
239007b8 | 482 | raw_spin_lock(&desc->lock); |
dd87eb3a | 483 | |
163ef309 | 484 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
dd87eb3a TG |
485 | /* |
486 | * If we're currently running this IRQ, or its disabled, | |
487 | * we shouldn't process the IRQ. Mark it pending, handle | |
488 | * the necessary masking and go out | |
489 | */ | |
32f4125e TG |
490 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || |
491 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | |
fe200ae4 | 492 | if (!irq_check_poll(desc)) { |
2a0d6fb3 | 493 | desc->istate |= IRQS_PENDING; |
fe200ae4 TG |
494 | mask_ack_irq(desc); |
495 | goto out_unlock; | |
496 | } | |
dd87eb3a | 497 | } |
d6c88a50 | 498 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a TG |
499 | |
500 | /* Start handling the irq */ | |
22a49163 | 501 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
dd87eb3a | 502 | |
dd87eb3a | 503 | do { |
a60a5dc2 | 504 | if (unlikely(!desc->action)) { |
e2c0f8ff | 505 | mask_irq(desc); |
dd87eb3a TG |
506 | goto out_unlock; |
507 | } | |
508 | ||
509 | /* | |
510 | * When another irq arrived while we were handling | |
511 | * one, we could have masked the irq. | |
512 | * Renable it, if it was not disabled in meantime. | |
513 | */ | |
2a0d6fb3 | 514 | if (unlikely(desc->istate & IRQS_PENDING)) { |
32f4125e TG |
515 | if (!irqd_irq_disabled(&desc->irq_data) && |
516 | irqd_irq_masked(&desc->irq_data)) | |
c1594b77 | 517 | unmask_irq(desc); |
dd87eb3a TG |
518 | } |
519 | ||
a60a5dc2 | 520 | handle_irq_event(desc); |
dd87eb3a | 521 | |
2a0d6fb3 | 522 | } while ((desc->istate & IRQS_PENDING) && |
32f4125e | 523 | !irqd_irq_disabled(&desc->irq_data)); |
dd87eb3a | 524 | |
dd87eb3a | 525 | out_unlock: |
239007b8 | 526 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 527 | } |
3911ff30 | 528 | EXPORT_SYMBOL(handle_edge_irq); |
dd87eb3a | 529 | |
0521c8fb TG |
530 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER |
531 | /** | |
532 | * handle_edge_eoi_irq - edge eoi type IRQ handler | |
533 | * @irq: the interrupt number | |
534 | * @desc: the interrupt description structure for this irq | |
535 | * | |
536 | * Similar as the above handle_edge_irq, but using eoi and w/o the | |
537 | * mask/unmask logic. | |
538 | */ | |
539 | void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) | |
540 | { | |
541 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
542 | ||
543 | raw_spin_lock(&desc->lock); | |
544 | ||
545 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | |
546 | /* | |
547 | * If we're currently running this IRQ, or its disabled, | |
548 | * we shouldn't process the IRQ. Mark it pending, handle | |
549 | * the necessary masking and go out | |
550 | */ | |
551 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || | |
552 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | |
553 | if (!irq_check_poll(desc)) { | |
554 | desc->istate |= IRQS_PENDING; | |
555 | goto out_eoi; | |
556 | } | |
557 | } | |
558 | kstat_incr_irqs_this_cpu(irq, desc); | |
559 | ||
560 | do { | |
561 | if (unlikely(!desc->action)) | |
562 | goto out_eoi; | |
563 | ||
564 | handle_irq_event(desc); | |
565 | ||
566 | } while ((desc->istate & IRQS_PENDING) && | |
567 | !irqd_irq_disabled(&desc->irq_data)); | |
568 | ||
ac0e0447 | 569 | out_eoi: |
0521c8fb TG |
570 | chip->irq_eoi(&desc->irq_data); |
571 | raw_spin_unlock(&desc->lock); | |
572 | } | |
573 | #endif | |
574 | ||
dd87eb3a | 575 | /** |
24b26d42 | 576 | * handle_percpu_irq - Per CPU local irq handler |
dd87eb3a TG |
577 | * @irq: the interrupt number |
578 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
579 | * |
580 | * Per CPU interrupts on SMP machines without locking requirements | |
581 | */ | |
7ad5b3a5 | 582 | void |
7d12e780 | 583 | handle_percpu_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 584 | { |
35e857cb | 585 | struct irq_chip *chip = irq_desc_get_chip(desc); |
dd87eb3a | 586 | |
d6c88a50 | 587 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a | 588 | |
849f061c TG |
589 | if (chip->irq_ack) |
590 | chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 591 | |
849f061c | 592 | handle_irq_event_percpu(desc, desc->action); |
dd87eb3a | 593 | |
849f061c TG |
594 | if (chip->irq_eoi) |
595 | chip->irq_eoi(&desc->irq_data); | |
dd87eb3a TG |
596 | } |
597 | ||
31d9d9b6 MZ |
598 | /** |
599 | * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids | |
600 | * @irq: the interrupt number | |
601 | * @desc: the interrupt description structure for this irq | |
602 | * | |
603 | * Per CPU interrupts on SMP machines without locking requirements. Same as | |
604 | * handle_percpu_irq() above but with the following extras: | |
605 | * | |
606 | * action->percpu_dev_id is a pointer to percpu variables which | |
607 | * contain the real device id for the cpu on which this handler is | |
608 | * called | |
609 | */ | |
610 | void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) | |
611 | { | |
612 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
613 | struct irqaction *action = desc->action; | |
614 | void *dev_id = __this_cpu_ptr(action->percpu_dev_id); | |
615 | irqreturn_t res; | |
616 | ||
617 | kstat_incr_irqs_this_cpu(irq, desc); | |
618 | ||
619 | if (chip->irq_ack) | |
620 | chip->irq_ack(&desc->irq_data); | |
621 | ||
622 | trace_irq_handler_entry(irq, action); | |
623 | res = action->handler(irq, dev_id); | |
624 | trace_irq_handler_exit(irq, action, res); | |
625 | ||
626 | if (chip->irq_eoi) | |
627 | chip->irq_eoi(&desc->irq_data); | |
628 | } | |
629 | ||
dd87eb3a | 630 | void |
3836ca08 | 631 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
a460e745 | 632 | const char *name) |
dd87eb3a | 633 | { |
dd87eb3a | 634 | unsigned long flags; |
31d9d9b6 | 635 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); |
dd87eb3a | 636 | |
02725e74 | 637 | if (!desc) |
dd87eb3a | 638 | return; |
dd87eb3a | 639 | |
091738a2 | 640 | if (!handle) { |
dd87eb3a | 641 | handle = handle_bad_irq; |
091738a2 TG |
642 | } else { |
643 | if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) | |
02725e74 | 644 | goto out; |
f8b5473f | 645 | } |
dd87eb3a | 646 | |
dd87eb3a TG |
647 | /* Uninstall? */ |
648 | if (handle == handle_bad_irq) { | |
6b8ff312 | 649 | if (desc->irq_data.chip != &no_irq_chip) |
9205e31d | 650 | mask_ack_irq(desc); |
801a0e9a | 651 | irq_state_set_disabled(desc); |
dd87eb3a TG |
652 | desc->depth = 1; |
653 | } | |
654 | desc->handle_irq = handle; | |
a460e745 | 655 | desc->name = name; |
dd87eb3a TG |
656 | |
657 | if (handle != handle_bad_irq && is_chained) { | |
1ccb4e61 TG |
658 | irq_settings_set_noprobe(desc); |
659 | irq_settings_set_norequest(desc); | |
7f1b1244 | 660 | irq_settings_set_nothread(desc); |
b4bc724e | 661 | irq_startup(desc, true); |
dd87eb3a | 662 | } |
02725e74 TG |
663 | out: |
664 | irq_put_desc_busunlock(desc, flags); | |
dd87eb3a | 665 | } |
3836ca08 | 666 | EXPORT_SYMBOL_GPL(__irq_set_handler); |
dd87eb3a TG |
667 | |
668 | void | |
3836ca08 | 669 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
a460e745 | 670 | irq_flow_handler_t handle, const char *name) |
dd87eb3a | 671 | { |
35e857cb | 672 | irq_set_chip(irq, chip); |
3836ca08 | 673 | __irq_set_handler(irq, handle, 0, name); |
dd87eb3a | 674 | } |
b3ae66f2 | 675 | EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); |
46f4f8f6 | 676 | |
44247184 | 677 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
46f4f8f6 | 678 | { |
46f4f8f6 | 679 | unsigned long flags; |
31d9d9b6 | 680 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
46f4f8f6 | 681 | |
44247184 | 682 | if (!desc) |
46f4f8f6 | 683 | return; |
a005677b TG |
684 | irq_settings_clr_and_set(desc, clr, set); |
685 | ||
876dbd4c | 686 | irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | |
e1ef8241 | 687 | IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); |
a005677b TG |
688 | if (irq_settings_has_no_balance_set(desc)) |
689 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | |
690 | if (irq_settings_is_per_cpu(desc)) | |
691 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | |
e1ef8241 TG |
692 | if (irq_settings_can_move_pcntxt(desc)) |
693 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); | |
0ef5ca1e TG |
694 | if (irq_settings_is_level(desc)) |
695 | irqd_set(&desc->irq_data, IRQD_LEVEL); | |
a005677b | 696 | |
876dbd4c TG |
697 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); |
698 | ||
02725e74 | 699 | irq_put_desc_unlock(desc, flags); |
46f4f8f6 | 700 | } |
edf76f83 | 701 | EXPORT_SYMBOL_GPL(irq_modify_status); |
0fdb4b25 DD |
702 | |
703 | /** | |
704 | * irq_cpu_online - Invoke all irq_cpu_online functions. | |
705 | * | |
706 | * Iterate through all irqs and invoke the chip.irq_cpu_online() | |
707 | * for each. | |
708 | */ | |
709 | void irq_cpu_online(void) | |
710 | { | |
711 | struct irq_desc *desc; | |
712 | struct irq_chip *chip; | |
713 | unsigned long flags; | |
714 | unsigned int irq; | |
715 | ||
716 | for_each_active_irq(irq) { | |
717 | desc = irq_to_desc(irq); | |
718 | if (!desc) | |
719 | continue; | |
720 | ||
721 | raw_spin_lock_irqsave(&desc->lock, flags); | |
722 | ||
723 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
724 | if (chip && chip->irq_cpu_online && |
725 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 726 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
727 | chip->irq_cpu_online(&desc->irq_data); |
728 | ||
729 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
730 | } | |
731 | } | |
732 | ||
733 | /** | |
734 | * irq_cpu_offline - Invoke all irq_cpu_offline functions. | |
735 | * | |
736 | * Iterate through all irqs and invoke the chip.irq_cpu_offline() | |
737 | * for each. | |
738 | */ | |
739 | void irq_cpu_offline(void) | |
740 | { | |
741 | struct irq_desc *desc; | |
742 | struct irq_chip *chip; | |
743 | unsigned long flags; | |
744 | unsigned int irq; | |
745 | ||
746 | for_each_active_irq(irq) { | |
747 | desc = irq_to_desc(irq); | |
748 | if (!desc) | |
749 | continue; | |
750 | ||
751 | raw_spin_lock_irqsave(&desc->lock, flags); | |
752 | ||
753 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
754 | if (chip && chip->irq_cpu_offline && |
755 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 756 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
757 | chip->irq_cpu_offline(&desc->irq_data); |
758 | ||
759 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
760 | } | |
761 | } |