Commit | Line | Data |
---|---|---|
dd87eb3a TG |
1 | /* |
2 | * linux/kernel/irq/chip.c | |
3 | * | |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | |
5 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | |
6 | * | |
7 | * This file contains the core interrupt handling code, for irq-chip | |
8 | * based architectures. | |
9 | * | |
10 | * Detailed information is available in Documentation/DocBook/genericirq | |
11 | */ | |
12 | ||
13 | #include <linux/irq.h> | |
7fe3730d | 14 | #include <linux/msi.h> |
dd87eb3a TG |
15 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/kernel_stat.h> | |
f8264e34 | 18 | #include <linux/irqdomain.h> |
dd87eb3a | 19 | |
f069686e SR |
20 | #include <trace/events/irq.h> |
21 | ||
dd87eb3a TG |
22 | #include "internals.h" |
23 | ||
24 | /** | |
a0cd9ca2 | 25 | * irq_set_chip - set the irq chip for an irq |
dd87eb3a TG |
26 | * @irq: irq number |
27 | * @chip: pointer to irq chip description structure | |
28 | */ | |
a0cd9ca2 | 29 | int irq_set_chip(unsigned int irq, struct irq_chip *chip) |
dd87eb3a | 30 | { |
dd87eb3a | 31 | unsigned long flags; |
31d9d9b6 | 32 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 33 | |
02725e74 | 34 | if (!desc) |
dd87eb3a | 35 | return -EINVAL; |
dd87eb3a TG |
36 | |
37 | if (!chip) | |
38 | chip = &no_irq_chip; | |
39 | ||
6b8ff312 | 40 | desc->irq_data.chip = chip; |
02725e74 | 41 | irq_put_desc_unlock(desc, flags); |
d72274e5 DD |
42 | /* |
43 | * For !CONFIG_SPARSE_IRQ make the irq show up in | |
f63b6a05 | 44 | * allocated_irqs. |
d72274e5 | 45 | */ |
f63b6a05 | 46 | irq_mark_irq(irq); |
dd87eb3a TG |
47 | return 0; |
48 | } | |
a0cd9ca2 | 49 | EXPORT_SYMBOL(irq_set_chip); |
dd87eb3a TG |
50 | |
51 | /** | |
a0cd9ca2 | 52 | * irq_set_type - set the irq trigger type for an irq |
dd87eb3a | 53 | * @irq: irq number |
0c5d1eb7 | 54 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h |
dd87eb3a | 55 | */ |
a0cd9ca2 | 56 | int irq_set_irq_type(unsigned int irq, unsigned int type) |
dd87eb3a | 57 | { |
dd87eb3a | 58 | unsigned long flags; |
31d9d9b6 | 59 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
02725e74 | 60 | int ret = 0; |
dd87eb3a | 61 | |
02725e74 TG |
62 | if (!desc) |
63 | return -EINVAL; | |
dd87eb3a | 64 | |
f2b662da | 65 | type &= IRQ_TYPE_SENSE_MASK; |
a09b659c | 66 | ret = __irq_set_trigger(desc, irq, type); |
02725e74 | 67 | irq_put_desc_busunlock(desc, flags); |
dd87eb3a TG |
68 | return ret; |
69 | } | |
a0cd9ca2 | 70 | EXPORT_SYMBOL(irq_set_irq_type); |
dd87eb3a TG |
71 | |
72 | /** | |
a0cd9ca2 | 73 | * irq_set_handler_data - set irq handler data for an irq |
dd87eb3a TG |
74 | * @irq: Interrupt number |
75 | * @data: Pointer to interrupt specific data | |
76 | * | |
77 | * Set the hardware irq controller data for an irq | |
78 | */ | |
a0cd9ca2 | 79 | int irq_set_handler_data(unsigned int irq, void *data) |
dd87eb3a | 80 | { |
dd87eb3a | 81 | unsigned long flags; |
31d9d9b6 | 82 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 83 | |
02725e74 | 84 | if (!desc) |
dd87eb3a | 85 | return -EINVAL; |
6b8ff312 | 86 | desc->irq_data.handler_data = data; |
02725e74 | 87 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
88 | return 0; |
89 | } | |
a0cd9ca2 | 90 | EXPORT_SYMBOL(irq_set_handler_data); |
dd87eb3a | 91 | |
5b912c10 | 92 | /** |
51906e77 AG |
93 | * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset |
94 | * @irq_base: Interrupt number base | |
95 | * @irq_offset: Interrupt number offset | |
96 | * @entry: Pointer to MSI descriptor data | |
5b912c10 | 97 | * |
51906e77 | 98 | * Set the MSI descriptor entry for an irq at offset |
5b912c10 | 99 | */ |
51906e77 AG |
100 | int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, |
101 | struct msi_desc *entry) | |
5b912c10 | 102 | { |
5b912c10 | 103 | unsigned long flags; |
51906e77 | 104 | struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
5b912c10 | 105 | |
02725e74 | 106 | if (!desc) |
5b912c10 | 107 | return -EINVAL; |
6b8ff312 | 108 | desc->irq_data.msi_desc = entry; |
51906e77 AG |
109 | if (entry && !irq_offset) |
110 | entry->irq = irq_base; | |
02725e74 | 111 | irq_put_desc_unlock(desc, flags); |
5b912c10 EB |
112 | return 0; |
113 | } | |
114 | ||
51906e77 AG |
115 | /** |
116 | * irq_set_msi_desc - set MSI descriptor data for an irq | |
117 | * @irq: Interrupt number | |
118 | * @entry: Pointer to MSI descriptor data | |
119 | * | |
120 | * Set the MSI descriptor entry for an irq | |
121 | */ | |
122 | int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) | |
123 | { | |
124 | return irq_set_msi_desc_off(irq, 0, entry); | |
125 | } | |
126 | ||
dd87eb3a | 127 | /** |
a0cd9ca2 | 128 | * irq_set_chip_data - set irq chip data for an irq |
dd87eb3a TG |
129 | * @irq: Interrupt number |
130 | * @data: Pointer to chip specific data | |
131 | * | |
132 | * Set the hardware irq chip data for an irq | |
133 | */ | |
a0cd9ca2 | 134 | int irq_set_chip_data(unsigned int irq, void *data) |
dd87eb3a | 135 | { |
dd87eb3a | 136 | unsigned long flags; |
31d9d9b6 | 137 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 138 | |
02725e74 | 139 | if (!desc) |
dd87eb3a | 140 | return -EINVAL; |
6b8ff312 | 141 | desc->irq_data.chip_data = data; |
02725e74 | 142 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
143 | return 0; |
144 | } | |
a0cd9ca2 | 145 | EXPORT_SYMBOL(irq_set_chip_data); |
dd87eb3a | 146 | |
f303a6dd TG |
147 | struct irq_data *irq_get_irq_data(unsigned int irq) |
148 | { | |
149 | struct irq_desc *desc = irq_to_desc(irq); | |
150 | ||
151 | return desc ? &desc->irq_data : NULL; | |
152 | } | |
153 | EXPORT_SYMBOL_GPL(irq_get_irq_data); | |
154 | ||
c1594b77 TG |
155 | static void irq_state_clr_disabled(struct irq_desc *desc) |
156 | { | |
801a0e9a | 157 | irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); |
c1594b77 TG |
158 | } |
159 | ||
160 | static void irq_state_set_disabled(struct irq_desc *desc) | |
161 | { | |
801a0e9a | 162 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
c1594b77 TG |
163 | } |
164 | ||
6e40262e TG |
165 | static void irq_state_clr_masked(struct irq_desc *desc) |
166 | { | |
32f4125e | 167 | irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); |
6e40262e TG |
168 | } |
169 | ||
170 | static void irq_state_set_masked(struct irq_desc *desc) | |
171 | { | |
32f4125e | 172 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
6e40262e TG |
173 | } |
174 | ||
b4bc724e | 175 | int irq_startup(struct irq_desc *desc, bool resend) |
46999238 | 176 | { |
b4bc724e TG |
177 | int ret = 0; |
178 | ||
c1594b77 | 179 | irq_state_clr_disabled(desc); |
46999238 TG |
180 | desc->depth = 0; |
181 | ||
f8264e34 | 182 | irq_domain_activate_irq(&desc->irq_data); |
3aae994f | 183 | if (desc->irq_data.chip->irq_startup) { |
b4bc724e | 184 | ret = desc->irq_data.chip->irq_startup(&desc->irq_data); |
6e40262e | 185 | irq_state_clr_masked(desc); |
b4bc724e TG |
186 | } else { |
187 | irq_enable(desc); | |
3aae994f | 188 | } |
b4bc724e TG |
189 | if (resend) |
190 | check_irq_resend(desc, desc->irq_data.irq); | |
191 | return ret; | |
46999238 TG |
192 | } |
193 | ||
194 | void irq_shutdown(struct irq_desc *desc) | |
195 | { | |
c1594b77 | 196 | irq_state_set_disabled(desc); |
46999238 | 197 | desc->depth = 1; |
50f7c032 TG |
198 | if (desc->irq_data.chip->irq_shutdown) |
199 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | |
ed585a65 | 200 | else if (desc->irq_data.chip->irq_disable) |
50f7c032 TG |
201 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
202 | else | |
203 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
f8264e34 | 204 | irq_domain_deactivate_irq(&desc->irq_data); |
6e40262e | 205 | irq_state_set_masked(desc); |
46999238 TG |
206 | } |
207 | ||
87923470 TG |
208 | void irq_enable(struct irq_desc *desc) |
209 | { | |
c1594b77 | 210 | irq_state_clr_disabled(desc); |
50f7c032 TG |
211 | if (desc->irq_data.chip->irq_enable) |
212 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
213 | else | |
214 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
6e40262e | 215 | irq_state_clr_masked(desc); |
dd87eb3a TG |
216 | } |
217 | ||
d671a605 | 218 | /** |
f788e7bf | 219 | * irq_disable - Mark interrupt disabled |
d671a605 AF |
220 | * @desc: irq descriptor which should be disabled |
221 | * | |
222 | * If the chip does not implement the irq_disable callback, we | |
223 | * use a lazy disable approach. That means we mark the interrupt | |
224 | * disabled, but leave the hardware unmasked. That's an | |
225 | * optimization because we avoid the hardware access for the | |
226 | * common case where no interrupt happens after we marked it | |
227 | * disabled. If an interrupt happens, then the interrupt flow | |
228 | * handler masks the line at the hardware level and marks it | |
229 | * pending. | |
230 | */ | |
50f7c032 | 231 | void irq_disable(struct irq_desc *desc) |
89d694b9 | 232 | { |
c1594b77 | 233 | irq_state_set_disabled(desc); |
50f7c032 TG |
234 | if (desc->irq_data.chip->irq_disable) { |
235 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
a61d8258 | 236 | irq_state_set_masked(desc); |
50f7c032 | 237 | } |
89d694b9 TG |
238 | } |
239 | ||
31d9d9b6 MZ |
240 | void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) |
241 | { | |
242 | if (desc->irq_data.chip->irq_enable) | |
243 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
244 | else | |
245 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
246 | cpumask_set_cpu(cpu, desc->percpu_enabled); | |
247 | } | |
248 | ||
249 | void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) | |
250 | { | |
251 | if (desc->irq_data.chip->irq_disable) | |
252 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
253 | else | |
254 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
255 | cpumask_clear_cpu(cpu, desc->percpu_enabled); | |
256 | } | |
257 | ||
9205e31d | 258 | static inline void mask_ack_irq(struct irq_desc *desc) |
dd87eb3a | 259 | { |
9205e31d TG |
260 | if (desc->irq_data.chip->irq_mask_ack) |
261 | desc->irq_data.chip->irq_mask_ack(&desc->irq_data); | |
dd87eb3a | 262 | else { |
e2c0f8ff | 263 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
22a49163 TG |
264 | if (desc->irq_data.chip->irq_ack) |
265 | desc->irq_data.chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 266 | } |
6e40262e | 267 | irq_state_set_masked(desc); |
0b1adaa0 TG |
268 | } |
269 | ||
d4d5e089 | 270 | void mask_irq(struct irq_desc *desc) |
0b1adaa0 | 271 | { |
e2c0f8ff TG |
272 | if (desc->irq_data.chip->irq_mask) { |
273 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
6e40262e | 274 | irq_state_set_masked(desc); |
0b1adaa0 TG |
275 | } |
276 | } | |
277 | ||
d4d5e089 | 278 | void unmask_irq(struct irq_desc *desc) |
0b1adaa0 | 279 | { |
0eda58b7 TG |
280 | if (desc->irq_data.chip->irq_unmask) { |
281 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
6e40262e | 282 | irq_state_clr_masked(desc); |
0b1adaa0 | 283 | } |
dd87eb3a TG |
284 | } |
285 | ||
328a4978 TG |
286 | void unmask_threaded_irq(struct irq_desc *desc) |
287 | { | |
288 | struct irq_chip *chip = desc->irq_data.chip; | |
289 | ||
290 | if (chip->flags & IRQCHIP_EOI_THREADED) | |
291 | chip->irq_eoi(&desc->irq_data); | |
292 | ||
293 | if (chip->irq_unmask) { | |
294 | chip->irq_unmask(&desc->irq_data); | |
295 | irq_state_clr_masked(desc); | |
296 | } | |
297 | } | |
298 | ||
399b5da2 TG |
299 | /* |
300 | * handle_nested_irq - Handle a nested irq from a irq thread | |
301 | * @irq: the interrupt number | |
302 | * | |
303 | * Handle interrupts which are nested into a threaded interrupt | |
304 | * handler. The handler function is called inside the calling | |
305 | * threads context. | |
306 | */ | |
307 | void handle_nested_irq(unsigned int irq) | |
308 | { | |
309 | struct irq_desc *desc = irq_to_desc(irq); | |
310 | struct irqaction *action; | |
311 | irqreturn_t action_ret; | |
312 | ||
313 | might_sleep(); | |
314 | ||
239007b8 | 315 | raw_spin_lock_irq(&desc->lock); |
399b5da2 | 316 | |
293a7a0a | 317 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
399b5da2 TG |
318 | kstat_incr_irqs_this_cpu(irq, desc); |
319 | ||
320 | action = desc->action; | |
23812b9d NJ |
321 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { |
322 | desc->istate |= IRQS_PENDING; | |
399b5da2 | 323 | goto out_unlock; |
23812b9d | 324 | } |
399b5da2 | 325 | |
32f4125e | 326 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
239007b8 | 327 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 TG |
328 | |
329 | action_ret = action->thread_fn(action->irq, action->dev_id); | |
330 | if (!noirqdebug) | |
331 | note_interrupt(irq, desc, action_ret); | |
332 | ||
239007b8 | 333 | raw_spin_lock_irq(&desc->lock); |
32f4125e | 334 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
399b5da2 TG |
335 | |
336 | out_unlock: | |
239007b8 | 337 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 TG |
338 | } |
339 | EXPORT_SYMBOL_GPL(handle_nested_irq); | |
340 | ||
fe200ae4 TG |
341 | static bool irq_check_poll(struct irq_desc *desc) |
342 | { | |
6954b75b | 343 | if (!(desc->istate & IRQS_POLL_INPROGRESS)) |
fe200ae4 TG |
344 | return false; |
345 | return irq_wait_for_poll(desc); | |
346 | } | |
347 | ||
c7bd3ec0 TG |
348 | static bool irq_may_run(struct irq_desc *desc) |
349 | { | |
9ce7a258 TG |
350 | unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; |
351 | ||
352 | /* | |
353 | * If the interrupt is not in progress and is not an armed | |
354 | * wakeup interrupt, proceed. | |
355 | */ | |
356 | if (!irqd_has_set(&desc->irq_data, mask)) | |
c7bd3ec0 | 357 | return true; |
9ce7a258 TG |
358 | |
359 | /* | |
360 | * If the interrupt is an armed wakeup source, mark it pending | |
361 | * and suspended, disable it and notify the pm core about the | |
362 | * event. | |
363 | */ | |
364 | if (irq_pm_check_wakeup(desc)) | |
365 | return false; | |
366 | ||
367 | /* | |
368 | * Handle a potential concurrent poll on a different core. | |
369 | */ | |
c7bd3ec0 TG |
370 | return irq_check_poll(desc); |
371 | } | |
372 | ||
dd87eb3a TG |
373 | /** |
374 | * handle_simple_irq - Simple and software-decoded IRQs. | |
375 | * @irq: the interrupt number | |
376 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
377 | * |
378 | * Simple interrupts are either sent from a demultiplexing interrupt | |
379 | * handler or come from hardware, where no interrupt hardware control | |
380 | * is necessary. | |
381 | * | |
382 | * Note: The caller is expected to handle the ack, clear, mask and | |
383 | * unmask issues if necessary. | |
384 | */ | |
7ad5b3a5 | 385 | void |
7d12e780 | 386 | handle_simple_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 387 | { |
239007b8 | 388 | raw_spin_lock(&desc->lock); |
dd87eb3a | 389 | |
c7bd3ec0 TG |
390 | if (!irq_may_run(desc)) |
391 | goto out_unlock; | |
fe200ae4 | 392 | |
163ef309 | 393 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
d6c88a50 | 394 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a | 395 | |
23812b9d NJ |
396 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
397 | desc->istate |= IRQS_PENDING; | |
dd87eb3a | 398 | goto out_unlock; |
23812b9d | 399 | } |
dd87eb3a | 400 | |
107781e7 | 401 | handle_irq_event(desc); |
dd87eb3a | 402 | |
dd87eb3a | 403 | out_unlock: |
239007b8 | 404 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 405 | } |
edf76f83 | 406 | EXPORT_SYMBOL_GPL(handle_simple_irq); |
dd87eb3a | 407 | |
ac563761 TG |
408 | /* |
409 | * Called unconditionally from handle_level_irq() and only for oneshot | |
410 | * interrupts from handle_fasteoi_irq() | |
411 | */ | |
412 | static void cond_unmask_irq(struct irq_desc *desc) | |
413 | { | |
414 | /* | |
415 | * We need to unmask in the following cases: | |
416 | * - Standard level irq (IRQF_ONESHOT is not set) | |
417 | * - Oneshot irq which did not wake the thread (caused by a | |
418 | * spurious interrupt or a primary handler handling it | |
419 | * completely). | |
420 | */ | |
421 | if (!irqd_irq_disabled(&desc->irq_data) && | |
422 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) | |
423 | unmask_irq(desc); | |
424 | } | |
425 | ||
dd87eb3a TG |
426 | /** |
427 | * handle_level_irq - Level type irq handler | |
428 | * @irq: the interrupt number | |
429 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
430 | * |
431 | * Level type interrupts are active as long as the hardware line has | |
432 | * the active level. This may require to mask the interrupt and unmask | |
433 | * it after the associated handler has acknowledged the device, so the | |
434 | * interrupt line is back to inactive. | |
435 | */ | |
7ad5b3a5 | 436 | void |
7d12e780 | 437 | handle_level_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 438 | { |
239007b8 | 439 | raw_spin_lock(&desc->lock); |
9205e31d | 440 | mask_ack_irq(desc); |
dd87eb3a | 441 | |
c7bd3ec0 TG |
442 | if (!irq_may_run(desc)) |
443 | goto out_unlock; | |
fe200ae4 | 444 | |
163ef309 | 445 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
d6c88a50 | 446 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a TG |
447 | |
448 | /* | |
449 | * If its disabled or no action available | |
450 | * keep it masked and get out of here | |
451 | */ | |
d4dc0f90 TG |
452 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
453 | desc->istate |= IRQS_PENDING; | |
86998aa6 | 454 | goto out_unlock; |
d4dc0f90 | 455 | } |
dd87eb3a | 456 | |
1529866c | 457 | handle_irq_event(desc); |
b25c340c | 458 | |
ac563761 TG |
459 | cond_unmask_irq(desc); |
460 | ||
86998aa6 | 461 | out_unlock: |
239007b8 | 462 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 463 | } |
14819ea1 | 464 | EXPORT_SYMBOL_GPL(handle_level_irq); |
dd87eb3a | 465 | |
78129576 TG |
466 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI |
467 | static inline void preflow_handler(struct irq_desc *desc) | |
468 | { | |
469 | if (desc->preflow_handler) | |
470 | desc->preflow_handler(&desc->irq_data); | |
471 | } | |
472 | #else | |
473 | static inline void preflow_handler(struct irq_desc *desc) { } | |
474 | #endif | |
475 | ||
328a4978 TG |
476 | static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) |
477 | { | |
478 | if (!(desc->istate & IRQS_ONESHOT)) { | |
479 | chip->irq_eoi(&desc->irq_data); | |
480 | return; | |
481 | } | |
482 | /* | |
483 | * We need to unmask in the following cases: | |
484 | * - Oneshot irq which did not wake the thread (caused by a | |
485 | * spurious interrupt or a primary handler handling it | |
486 | * completely). | |
487 | */ | |
488 | if (!irqd_irq_disabled(&desc->irq_data) && | |
489 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { | |
490 | chip->irq_eoi(&desc->irq_data); | |
491 | unmask_irq(desc); | |
492 | } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { | |
493 | chip->irq_eoi(&desc->irq_data); | |
494 | } | |
495 | } | |
496 | ||
dd87eb3a | 497 | /** |
47c2a3aa | 498 | * handle_fasteoi_irq - irq handler for transparent controllers |
dd87eb3a TG |
499 | * @irq: the interrupt number |
500 | * @desc: the interrupt description structure for this irq | |
dd87eb3a | 501 | * |
47c2a3aa | 502 | * Only a single callback will be issued to the chip: an ->eoi() |
dd87eb3a TG |
503 | * call when the interrupt has been serviced. This enables support |
504 | * for modern forms of interrupt handlers, which handle the flow | |
505 | * details in hardware, transparently. | |
506 | */ | |
7ad5b3a5 | 507 | void |
7d12e780 | 508 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 509 | { |
328a4978 TG |
510 | struct irq_chip *chip = desc->irq_data.chip; |
511 | ||
239007b8 | 512 | raw_spin_lock(&desc->lock); |
dd87eb3a | 513 | |
c7bd3ec0 TG |
514 | if (!irq_may_run(desc)) |
515 | goto out; | |
dd87eb3a | 516 | |
163ef309 | 517 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
d6c88a50 | 518 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a TG |
519 | |
520 | /* | |
521 | * If its disabled or no action available | |
76d21601 | 522 | * then mask it and get out of here: |
dd87eb3a | 523 | */ |
32f4125e | 524 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
2a0d6fb3 | 525 | desc->istate |= IRQS_PENDING; |
e2c0f8ff | 526 | mask_irq(desc); |
dd87eb3a | 527 | goto out; |
98bb244b | 528 | } |
c69e3758 TG |
529 | |
530 | if (desc->istate & IRQS_ONESHOT) | |
531 | mask_irq(desc); | |
532 | ||
78129576 | 533 | preflow_handler(desc); |
a7ae4de5 | 534 | handle_irq_event(desc); |
77694b40 | 535 | |
328a4978 | 536 | cond_unmask_eoi_irq(desc, chip); |
ac563761 | 537 | |
239007b8 | 538 | raw_spin_unlock(&desc->lock); |
77694b40 TG |
539 | return; |
540 | out: | |
328a4978 TG |
541 | if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) |
542 | chip->irq_eoi(&desc->irq_data); | |
543 | raw_spin_unlock(&desc->lock); | |
dd87eb3a | 544 | } |
7cad45ee | 545 | EXPORT_SYMBOL_GPL(handle_fasteoi_irq); |
dd87eb3a TG |
546 | |
547 | /** | |
548 | * handle_edge_irq - edge type IRQ handler | |
549 | * @irq: the interrupt number | |
550 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
551 | * |
552 | * Interrupt occures on the falling and/or rising edge of a hardware | |
25985edc | 553 | * signal. The occurrence is latched into the irq controller hardware |
dd87eb3a TG |
554 | * and must be acked in order to be reenabled. After the ack another |
555 | * interrupt can happen on the same source even before the first one | |
dfff0615 | 556 | * is handled by the associated event handler. If this happens it |
dd87eb3a TG |
557 | * might be necessary to disable (mask) the interrupt depending on the |
558 | * controller hardware. This requires to reenable the interrupt inside | |
559 | * of the loop which handles the interrupts which have arrived while | |
560 | * the handler was running. If all pending interrupts are handled, the | |
561 | * loop is left. | |
562 | */ | |
7ad5b3a5 | 563 | void |
7d12e780 | 564 | handle_edge_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 565 | { |
239007b8 | 566 | raw_spin_lock(&desc->lock); |
dd87eb3a | 567 | |
163ef309 | 568 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
c3d7acd0 | 569 | |
c7bd3ec0 TG |
570 | if (!irq_may_run(desc)) { |
571 | desc->istate |= IRQS_PENDING; | |
572 | mask_ack_irq(desc); | |
573 | goto out_unlock; | |
dd87eb3a | 574 | } |
c3d7acd0 | 575 | |
dd87eb3a | 576 | /* |
c3d7acd0 TG |
577 | * If its disabled or no action available then mask it and get |
578 | * out of here. | |
dd87eb3a | 579 | */ |
c3d7acd0 TG |
580 | if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
581 | desc->istate |= IRQS_PENDING; | |
582 | mask_ack_irq(desc); | |
583 | goto out_unlock; | |
dd87eb3a | 584 | } |
c3d7acd0 | 585 | |
d6c88a50 | 586 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a TG |
587 | |
588 | /* Start handling the irq */ | |
22a49163 | 589 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
dd87eb3a | 590 | |
dd87eb3a | 591 | do { |
a60a5dc2 | 592 | if (unlikely(!desc->action)) { |
e2c0f8ff | 593 | mask_irq(desc); |
dd87eb3a TG |
594 | goto out_unlock; |
595 | } | |
596 | ||
597 | /* | |
598 | * When another irq arrived while we were handling | |
599 | * one, we could have masked the irq. | |
600 | * Renable it, if it was not disabled in meantime. | |
601 | */ | |
2a0d6fb3 | 602 | if (unlikely(desc->istate & IRQS_PENDING)) { |
32f4125e TG |
603 | if (!irqd_irq_disabled(&desc->irq_data) && |
604 | irqd_irq_masked(&desc->irq_data)) | |
c1594b77 | 605 | unmask_irq(desc); |
dd87eb3a TG |
606 | } |
607 | ||
a60a5dc2 | 608 | handle_irq_event(desc); |
dd87eb3a | 609 | |
2a0d6fb3 | 610 | } while ((desc->istate & IRQS_PENDING) && |
32f4125e | 611 | !irqd_irq_disabled(&desc->irq_data)); |
dd87eb3a | 612 | |
dd87eb3a | 613 | out_unlock: |
239007b8 | 614 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 615 | } |
3911ff30 | 616 | EXPORT_SYMBOL(handle_edge_irq); |
dd87eb3a | 617 | |
0521c8fb TG |
618 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER |
619 | /** | |
620 | * handle_edge_eoi_irq - edge eoi type IRQ handler | |
621 | * @irq: the interrupt number | |
622 | * @desc: the interrupt description structure for this irq | |
623 | * | |
624 | * Similar as the above handle_edge_irq, but using eoi and w/o the | |
625 | * mask/unmask logic. | |
626 | */ | |
627 | void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) | |
628 | { | |
629 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
630 | ||
631 | raw_spin_lock(&desc->lock); | |
632 | ||
633 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | |
c3d7acd0 | 634 | |
c7bd3ec0 TG |
635 | if (!irq_may_run(desc)) { |
636 | desc->istate |= IRQS_PENDING; | |
637 | goto out_eoi; | |
0521c8fb | 638 | } |
c3d7acd0 | 639 | |
0521c8fb | 640 | /* |
c3d7acd0 TG |
641 | * If its disabled or no action available then mask it and get |
642 | * out of here. | |
0521c8fb | 643 | */ |
c3d7acd0 TG |
644 | if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
645 | desc->istate |= IRQS_PENDING; | |
646 | goto out_eoi; | |
0521c8fb | 647 | } |
c3d7acd0 | 648 | |
0521c8fb TG |
649 | kstat_incr_irqs_this_cpu(irq, desc); |
650 | ||
651 | do { | |
652 | if (unlikely(!desc->action)) | |
653 | goto out_eoi; | |
654 | ||
655 | handle_irq_event(desc); | |
656 | ||
657 | } while ((desc->istate & IRQS_PENDING) && | |
658 | !irqd_irq_disabled(&desc->irq_data)); | |
659 | ||
ac0e0447 | 660 | out_eoi: |
0521c8fb TG |
661 | chip->irq_eoi(&desc->irq_data); |
662 | raw_spin_unlock(&desc->lock); | |
663 | } | |
664 | #endif | |
665 | ||
dd87eb3a | 666 | /** |
24b26d42 | 667 | * handle_percpu_irq - Per CPU local irq handler |
dd87eb3a TG |
668 | * @irq: the interrupt number |
669 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
670 | * |
671 | * Per CPU interrupts on SMP machines without locking requirements | |
672 | */ | |
7ad5b3a5 | 673 | void |
7d12e780 | 674 | handle_percpu_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 675 | { |
35e857cb | 676 | struct irq_chip *chip = irq_desc_get_chip(desc); |
dd87eb3a | 677 | |
d6c88a50 | 678 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a | 679 | |
849f061c TG |
680 | if (chip->irq_ack) |
681 | chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 682 | |
849f061c | 683 | handle_irq_event_percpu(desc, desc->action); |
dd87eb3a | 684 | |
849f061c TG |
685 | if (chip->irq_eoi) |
686 | chip->irq_eoi(&desc->irq_data); | |
dd87eb3a TG |
687 | } |
688 | ||
31d9d9b6 MZ |
689 | /** |
690 | * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids | |
691 | * @irq: the interrupt number | |
692 | * @desc: the interrupt description structure for this irq | |
693 | * | |
694 | * Per CPU interrupts on SMP machines without locking requirements. Same as | |
695 | * handle_percpu_irq() above but with the following extras: | |
696 | * | |
697 | * action->percpu_dev_id is a pointer to percpu variables which | |
698 | * contain the real device id for the cpu on which this handler is | |
699 | * called | |
700 | */ | |
701 | void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) | |
702 | { | |
703 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
704 | struct irqaction *action = desc->action; | |
532d0d06 | 705 | void *dev_id = raw_cpu_ptr(action->percpu_dev_id); |
31d9d9b6 MZ |
706 | irqreturn_t res; |
707 | ||
708 | kstat_incr_irqs_this_cpu(irq, desc); | |
709 | ||
710 | if (chip->irq_ack) | |
711 | chip->irq_ack(&desc->irq_data); | |
712 | ||
713 | trace_irq_handler_entry(irq, action); | |
714 | res = action->handler(irq, dev_id); | |
715 | trace_irq_handler_exit(irq, action, res); | |
716 | ||
717 | if (chip->irq_eoi) | |
718 | chip->irq_eoi(&desc->irq_data); | |
719 | } | |
720 | ||
dd87eb3a | 721 | void |
3b0f95be RK |
722 | __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, |
723 | int is_chained, const char *name) | |
dd87eb3a | 724 | { |
091738a2 | 725 | if (!handle) { |
dd87eb3a | 726 | handle = handle_bad_irq; |
091738a2 | 727 | } else { |
f86eff22 MZ |
728 | struct irq_data *irq_data = &desc->irq_data; |
729 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
730 | /* | |
731 | * With hierarchical domains we might run into a | |
732 | * situation where the outermost chip is not yet set | |
733 | * up, but the inner chips are there. Instead of | |
734 | * bailing we install the handler, but obviously we | |
735 | * cannot enable/startup the interrupt at this point. | |
736 | */ | |
737 | while (irq_data) { | |
738 | if (irq_data->chip != &no_irq_chip) | |
739 | break; | |
740 | /* | |
741 | * Bail out if the outer chip is not set up | |
742 | * and the interrrupt supposed to be started | |
743 | * right away. | |
744 | */ | |
745 | if (WARN_ON(is_chained)) | |
3b0f95be | 746 | return; |
f86eff22 MZ |
747 | /* Try the parent */ |
748 | irq_data = irq_data->parent_data; | |
749 | } | |
750 | #endif | |
751 | if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) | |
3b0f95be | 752 | return; |
f8b5473f | 753 | } |
dd87eb3a | 754 | |
dd87eb3a TG |
755 | /* Uninstall? */ |
756 | if (handle == handle_bad_irq) { | |
6b8ff312 | 757 | if (desc->irq_data.chip != &no_irq_chip) |
9205e31d | 758 | mask_ack_irq(desc); |
801a0e9a | 759 | irq_state_set_disabled(desc); |
dd87eb3a TG |
760 | desc->depth = 1; |
761 | } | |
762 | desc->handle_irq = handle; | |
a460e745 | 763 | desc->name = name; |
dd87eb3a TG |
764 | |
765 | if (handle != handle_bad_irq && is_chained) { | |
1ccb4e61 TG |
766 | irq_settings_set_noprobe(desc); |
767 | irq_settings_set_norequest(desc); | |
7f1b1244 | 768 | irq_settings_set_nothread(desc); |
b4bc724e | 769 | irq_startup(desc, true); |
dd87eb3a | 770 | } |
3b0f95be RK |
771 | } |
772 | ||
773 | void | |
774 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |
775 | const char *name) | |
776 | { | |
777 | unsigned long flags; | |
778 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); | |
779 | ||
780 | if (!desc) | |
781 | return; | |
782 | ||
783 | __irq_do_set_handler(desc, handle, is_chained, name); | |
02725e74 | 784 | irq_put_desc_busunlock(desc, flags); |
dd87eb3a | 785 | } |
3836ca08 | 786 | EXPORT_SYMBOL_GPL(__irq_set_handler); |
dd87eb3a | 787 | |
3b0f95be RK |
788 | void |
789 | irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, | |
790 | void *data) | |
791 | { | |
792 | unsigned long flags; | |
793 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); | |
794 | ||
795 | if (!desc) | |
796 | return; | |
797 | ||
798 | __irq_do_set_handler(desc, handle, 1, NULL); | |
799 | desc->irq_data.handler_data = data; | |
800 | ||
801 | irq_put_desc_busunlock(desc, flags); | |
802 | } | |
803 | EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); | |
804 | ||
dd87eb3a | 805 | void |
3836ca08 | 806 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
a460e745 | 807 | irq_flow_handler_t handle, const char *name) |
dd87eb3a | 808 | { |
35e857cb | 809 | irq_set_chip(irq, chip); |
3836ca08 | 810 | __irq_set_handler(irq, handle, 0, name); |
dd87eb3a | 811 | } |
b3ae66f2 | 812 | EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); |
46f4f8f6 | 813 | |
44247184 | 814 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
46f4f8f6 | 815 | { |
46f4f8f6 | 816 | unsigned long flags; |
31d9d9b6 | 817 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
46f4f8f6 | 818 | |
44247184 | 819 | if (!desc) |
46f4f8f6 | 820 | return; |
a005677b TG |
821 | irq_settings_clr_and_set(desc, clr, set); |
822 | ||
876dbd4c | 823 | irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | |
e1ef8241 | 824 | IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); |
a005677b TG |
825 | if (irq_settings_has_no_balance_set(desc)) |
826 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | |
827 | if (irq_settings_is_per_cpu(desc)) | |
828 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | |
e1ef8241 TG |
829 | if (irq_settings_can_move_pcntxt(desc)) |
830 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); | |
0ef5ca1e TG |
831 | if (irq_settings_is_level(desc)) |
832 | irqd_set(&desc->irq_data, IRQD_LEVEL); | |
a005677b | 833 | |
876dbd4c TG |
834 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); |
835 | ||
02725e74 | 836 | irq_put_desc_unlock(desc, flags); |
46f4f8f6 | 837 | } |
edf76f83 | 838 | EXPORT_SYMBOL_GPL(irq_modify_status); |
0fdb4b25 DD |
839 | |
840 | /** | |
841 | * irq_cpu_online - Invoke all irq_cpu_online functions. | |
842 | * | |
843 | * Iterate through all irqs and invoke the chip.irq_cpu_online() | |
844 | * for each. | |
845 | */ | |
846 | void irq_cpu_online(void) | |
847 | { | |
848 | struct irq_desc *desc; | |
849 | struct irq_chip *chip; | |
850 | unsigned long flags; | |
851 | unsigned int irq; | |
852 | ||
853 | for_each_active_irq(irq) { | |
854 | desc = irq_to_desc(irq); | |
855 | if (!desc) | |
856 | continue; | |
857 | ||
858 | raw_spin_lock_irqsave(&desc->lock, flags); | |
859 | ||
860 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
861 | if (chip && chip->irq_cpu_online && |
862 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 863 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
864 | chip->irq_cpu_online(&desc->irq_data); |
865 | ||
866 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
867 | } | |
868 | } | |
869 | ||
870 | /** | |
871 | * irq_cpu_offline - Invoke all irq_cpu_offline functions. | |
872 | * | |
873 | * Iterate through all irqs and invoke the chip.irq_cpu_offline() | |
874 | * for each. | |
875 | */ | |
876 | void irq_cpu_offline(void) | |
877 | { | |
878 | struct irq_desc *desc; | |
879 | struct irq_chip *chip; | |
880 | unsigned long flags; | |
881 | unsigned int irq; | |
882 | ||
883 | for_each_active_irq(irq) { | |
884 | desc = irq_to_desc(irq); | |
885 | if (!desc) | |
886 | continue; | |
887 | ||
888 | raw_spin_lock_irqsave(&desc->lock, flags); | |
889 | ||
890 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
891 | if (chip && chip->irq_cpu_offline && |
892 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 893 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
894 | chip->irq_cpu_offline(&desc->irq_data); |
895 | ||
896 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
897 | } | |
898 | } | |
85f08c17 JL |
899 | |
900 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
3cfeffc2 SA |
901 | /** |
902 | * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if | |
903 | * NULL) | |
904 | * @data: Pointer to interrupt specific data | |
905 | */ | |
906 | void irq_chip_enable_parent(struct irq_data *data) | |
907 | { | |
908 | data = data->parent_data; | |
909 | if (data->chip->irq_enable) | |
910 | data->chip->irq_enable(data); | |
911 | else | |
912 | data->chip->irq_unmask(data); | |
913 | } | |
914 | ||
915 | /** | |
916 | * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if | |
917 | * NULL) | |
918 | * @data: Pointer to interrupt specific data | |
919 | */ | |
920 | void irq_chip_disable_parent(struct irq_data *data) | |
921 | { | |
922 | data = data->parent_data; | |
923 | if (data->chip->irq_disable) | |
924 | data->chip->irq_disable(data); | |
925 | else | |
926 | data->chip->irq_mask(data); | |
927 | } | |
928 | ||
85f08c17 JL |
929 | /** |
930 | * irq_chip_ack_parent - Acknowledge the parent interrupt | |
931 | * @data: Pointer to interrupt specific data | |
932 | */ | |
933 | void irq_chip_ack_parent(struct irq_data *data) | |
934 | { | |
935 | data = data->parent_data; | |
936 | data->chip->irq_ack(data); | |
937 | } | |
938 | ||
56e8abab YC |
939 | /** |
940 | * irq_chip_mask_parent - Mask the parent interrupt | |
941 | * @data: Pointer to interrupt specific data | |
942 | */ | |
943 | void irq_chip_mask_parent(struct irq_data *data) | |
944 | { | |
945 | data = data->parent_data; | |
946 | data->chip->irq_mask(data); | |
947 | } | |
948 | ||
949 | /** | |
950 | * irq_chip_unmask_parent - Unmask the parent interrupt | |
951 | * @data: Pointer to interrupt specific data | |
952 | */ | |
953 | void irq_chip_unmask_parent(struct irq_data *data) | |
954 | { | |
955 | data = data->parent_data; | |
956 | data->chip->irq_unmask(data); | |
957 | } | |
958 | ||
959 | /** | |
960 | * irq_chip_eoi_parent - Invoke EOI on the parent interrupt | |
961 | * @data: Pointer to interrupt specific data | |
962 | */ | |
963 | void irq_chip_eoi_parent(struct irq_data *data) | |
964 | { | |
965 | data = data->parent_data; | |
966 | data->chip->irq_eoi(data); | |
967 | } | |
968 | ||
969 | /** | |
970 | * irq_chip_set_affinity_parent - Set affinity on the parent interrupt | |
971 | * @data: Pointer to interrupt specific data | |
972 | * @dest: The affinity mask to set | |
973 | * @force: Flag to enforce setting (disable online checks) | |
974 | * | |
975 | * Conditinal, as the underlying parent chip might not implement it. | |
976 | */ | |
977 | int irq_chip_set_affinity_parent(struct irq_data *data, | |
978 | const struct cpumask *dest, bool force) | |
979 | { | |
980 | data = data->parent_data; | |
981 | if (data->chip->irq_set_affinity) | |
982 | return data->chip->irq_set_affinity(data, dest, force); | |
983 | ||
984 | return -ENOSYS; | |
985 | } | |
986 | ||
85f08c17 JL |
987 | /** |
988 | * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware | |
989 | * @data: Pointer to interrupt specific data | |
990 | * | |
991 | * Iterate through the domain hierarchy of the interrupt and check | |
992 | * whether a hw retrigger function exists. If yes, invoke it. | |
993 | */ | |
994 | int irq_chip_retrigger_hierarchy(struct irq_data *data) | |
995 | { | |
996 | for (data = data->parent_data; data; data = data->parent_data) | |
997 | if (data->chip && data->chip->irq_retrigger) | |
998 | return data->chip->irq_retrigger(data); | |
999 | ||
1000 | return -ENOSYS; | |
1001 | } | |
08b55e2a | 1002 | |
0a4377de JL |
1003 | /** |
1004 | * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt | |
1005 | * @data: Pointer to interrupt specific data | |
1006 | * @dest: The vcpu affinity information | |
1007 | */ | |
1008 | int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) | |
1009 | { | |
1010 | data = data->parent_data; | |
1011 | if (data->chip->irq_set_vcpu_affinity) | |
1012 | return data->chip->irq_set_vcpu_affinity(data, vcpu_info); | |
1013 | ||
1014 | return -ENOSYS; | |
1015 | } | |
1016 | ||
08b55e2a MZ |
1017 | /** |
1018 | * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt | |
1019 | * @data: Pointer to interrupt specific data | |
1020 | * @on: Whether to set or reset the wake-up capability of this irq | |
1021 | * | |
1022 | * Conditional, as the underlying parent chip might not implement it. | |
1023 | */ | |
1024 | int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) | |
1025 | { | |
1026 | data = data->parent_data; | |
1027 | if (data->chip->irq_set_wake) | |
1028 | return data->chip->irq_set_wake(data, on); | |
1029 | ||
1030 | return -ENOSYS; | |
1031 | } | |
85f08c17 | 1032 | #endif |
515085ef JL |
1033 | |
1034 | /** | |
1035 | * irq_chip_compose_msi_msg - Componse msi message for a irq chip | |
1036 | * @data: Pointer to interrupt specific data | |
1037 | * @msg: Pointer to the MSI message | |
1038 | * | |
1039 | * For hierarchical domains we find the first chip in the hierarchy | |
1040 | * which implements the irq_compose_msi_msg callback. For non | |
1041 | * hierarchical we use the top level chip. | |
1042 | */ | |
1043 | int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |
1044 | { | |
1045 | struct irq_data *pos = NULL; | |
1046 | ||
1047 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
1048 | for (; data; data = data->parent_data) | |
1049 | #endif | |
1050 | if (data->chip && data->chip->irq_compose_msi_msg) | |
1051 | pos = data; | |
1052 | if (!pos) | |
1053 | return -ENOSYS; | |
1054 | ||
1055 | pos->chip->irq_compose_msi_msg(pos, msg); | |
1056 | ||
1057 | return 0; | |
1058 | } |