Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/irq/manage.c | |
3 | * | |
a34db9b2 IM |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
5 | * Copyright (C) 2005-2006 Thomas Gleixner | |
1da177e4 LT |
6 | * |
7 | * This file contains driver APIs to the irq subsystem. | |
8 | */ | |
9 | ||
10 | #include <linux/irq.h> | |
3aa551c9 | 11 | #include <linux/kthread.h> |
1da177e4 LT |
12 | #include <linux/module.h> |
13 | #include <linux/random.h> | |
14 | #include <linux/interrupt.h> | |
1aeb272c | 15 | #include <linux/slab.h> |
3aa551c9 | 16 | #include <linux/sched.h> |
1da177e4 LT |
17 | |
18 | #include "internals.h" | |
19 | ||
1da177e4 LT |
20 | /** |
21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | |
1e5d5331 | 22 | * @irq: interrupt number to wait for |
1da177e4 LT |
23 | * |
24 | * This function waits for any pending IRQ handlers for this interrupt | |
25 | * to complete before returning. If you use this function while | |
26 | * holding a resource the IRQ handler may need you will deadlock. | |
27 | * | |
28 | * This function may be called - with care - from IRQ context. | |
29 | */ | |
30 | void synchronize_irq(unsigned int irq) | |
31 | { | |
cb5bc832 | 32 | struct irq_desc *desc = irq_to_desc(irq); |
a98ce5c6 | 33 | unsigned int status; |
1da177e4 | 34 | |
7d94f7ca | 35 | if (!desc) |
c2b5a251 MW |
36 | return; |
37 | ||
a98ce5c6 HX |
38 | do { |
39 | unsigned long flags; | |
40 | ||
41 | /* | |
42 | * Wait until we're out of the critical section. This might | |
43 | * give the wrong answer due to the lack of memory barriers. | |
44 | */ | |
45 | while (desc->status & IRQ_INPROGRESS) | |
46 | cpu_relax(); | |
47 | ||
48 | /* Ok, that indicated we're done: double-check carefully. */ | |
49 | spin_lock_irqsave(&desc->lock, flags); | |
50 | status = desc->status; | |
51 | spin_unlock_irqrestore(&desc->lock, flags); | |
52 | ||
53 | /* Oops, that failed? */ | |
54 | } while (status & IRQ_INPROGRESS); | |
3aa551c9 TG |
55 | |
56 | /* | |
57 | * We made sure that no hardirq handler is running. Now verify | |
58 | * that no threaded handlers are active. | |
59 | */ | |
60 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | |
1da177e4 | 61 | } |
1da177e4 LT |
62 | EXPORT_SYMBOL(synchronize_irq); |
63 | ||
3aa551c9 TG |
64 | #ifdef CONFIG_SMP |
65 | cpumask_var_t irq_default_affinity; | |
66 | ||
771ee3b0 TG |
67 | /** |
68 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | |
69 | * @irq: Interrupt to check | |
70 | * | |
71 | */ | |
72 | int irq_can_set_affinity(unsigned int irq) | |
73 | { | |
08678b08 | 74 | struct irq_desc *desc = irq_to_desc(irq); |
771ee3b0 TG |
75 | |
76 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || | |
77 | !desc->chip->set_affinity) | |
78 | return 0; | |
79 | ||
80 | return 1; | |
81 | } | |
82 | ||
3aa551c9 TG |
83 | static void |
84 | irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) | |
85 | { | |
86 | struct irqaction *action = desc->action; | |
87 | ||
88 | while (action) { | |
89 | if (action->thread) | |
90 | set_cpus_allowed_ptr(action->thread, cpumask); | |
91 | action = action->next; | |
92 | } | |
93 | } | |
94 | ||
771ee3b0 TG |
95 | /** |
96 | * irq_set_affinity - Set the irq affinity of a given irq | |
97 | * @irq: Interrupt to set affinity | |
98 | * @cpumask: cpumask | |
99 | * | |
100 | */ | |
0de26520 | 101 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
771ee3b0 | 102 | { |
08678b08 | 103 | struct irq_desc *desc = irq_to_desc(irq); |
f6d87f4b | 104 | unsigned long flags; |
771ee3b0 TG |
105 | |
106 | if (!desc->chip->set_affinity) | |
107 | return -EINVAL; | |
108 | ||
f6d87f4b TG |
109 | spin_lock_irqsave(&desc->lock, flags); |
110 | ||
771ee3b0 | 111 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
932775a4 | 112 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
7f7ace0c | 113 | cpumask_copy(desc->affinity, cpumask); |
72b1e22d | 114 | desc->chip->set_affinity(irq, cpumask); |
f6d87f4b TG |
115 | } else { |
116 | desc->status |= IRQ_MOVE_PENDING; | |
7f7ace0c | 117 | cpumask_copy(desc->pending_mask, cpumask); |
f6d87f4b | 118 | } |
771ee3b0 | 119 | #else |
7f7ace0c | 120 | cpumask_copy(desc->affinity, cpumask); |
771ee3b0 TG |
121 | desc->chip->set_affinity(irq, cpumask); |
122 | #endif | |
3aa551c9 | 123 | irq_set_thread_affinity(desc, cpumask); |
f6d87f4b TG |
124 | desc->status |= IRQ_AFFINITY_SET; |
125 | spin_unlock_irqrestore(&desc->lock, flags); | |
771ee3b0 TG |
126 | return 0; |
127 | } | |
128 | ||
18404756 MK |
129 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
130 | /* | |
131 | * Generic version of the affinity autoselector. | |
132 | */ | |
548c8933 | 133 | static int setup_affinity(unsigned int irq, struct irq_desc *desc) |
18404756 | 134 | { |
18404756 MK |
135 | if (!irq_can_set_affinity(irq)) |
136 | return 0; | |
137 | ||
f6d87f4b TG |
138 | /* |
139 | * Preserve an userspace affinity setup, but make sure that | |
140 | * one of the targets is online. | |
141 | */ | |
612e3684 | 142 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { |
7f7ace0c | 143 | if (cpumask_any_and(desc->affinity, cpu_online_mask) |
0de26520 RR |
144 | < nr_cpu_ids) |
145 | goto set_affinity; | |
f6d87f4b TG |
146 | else |
147 | desc->status &= ~IRQ_AFFINITY_SET; | |
148 | } | |
149 | ||
7f7ace0c | 150 | cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); |
0de26520 | 151 | set_affinity: |
7f7ace0c | 152 | desc->chip->set_affinity(irq, desc->affinity); |
18404756 | 153 | |
18404756 MK |
154 | return 0; |
155 | } | |
f6d87f4b | 156 | #else |
548c8933 | 157 | static inline int setup_affinity(unsigned int irq, struct irq_desc *d) |
f6d87f4b TG |
158 | { |
159 | return irq_select_affinity(irq); | |
160 | } | |
18404756 MK |
161 | #endif |
162 | ||
f6d87f4b TG |
163 | /* |
164 | * Called when affinity is set via /proc/irq | |
165 | */ | |
166 | int irq_select_affinity_usr(unsigned int irq) | |
167 | { | |
168 | struct irq_desc *desc = irq_to_desc(irq); | |
169 | unsigned long flags; | |
170 | int ret; | |
171 | ||
172 | spin_lock_irqsave(&desc->lock, flags); | |
548c8933 | 173 | ret = setup_affinity(irq, desc); |
3aa551c9 TG |
174 | if (!ret) |
175 | irq_set_thread_affinity(desc, desc->affinity); | |
f6d87f4b TG |
176 | spin_unlock_irqrestore(&desc->lock, flags); |
177 | ||
178 | return ret; | |
179 | } | |
180 | ||
181 | #else | |
548c8933 | 182 | static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) |
f6d87f4b TG |
183 | { |
184 | return 0; | |
185 | } | |
1da177e4 LT |
186 | #endif |
187 | ||
0a0c5168 RW |
188 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) |
189 | { | |
190 | if (suspend) { | |
191 | if (!desc->action || (desc->action->flags & IRQF_TIMER)) | |
192 | return; | |
193 | desc->status |= IRQ_SUSPENDED; | |
194 | } | |
195 | ||
196 | if (!desc->depth++) { | |
197 | desc->status |= IRQ_DISABLED; | |
198 | desc->chip->disable(irq); | |
199 | } | |
200 | } | |
201 | ||
1da177e4 LT |
202 | /** |
203 | * disable_irq_nosync - disable an irq without waiting | |
204 | * @irq: Interrupt to disable | |
205 | * | |
206 | * Disable the selected interrupt line. Disables and Enables are | |
207 | * nested. | |
208 | * Unlike disable_irq(), this function does not ensure existing | |
209 | * instances of the IRQ handler have completed before returning. | |
210 | * | |
211 | * This function may be called from IRQ context. | |
212 | */ | |
213 | void disable_irq_nosync(unsigned int irq) | |
214 | { | |
d3c60047 | 215 | struct irq_desc *desc = irq_to_desc(irq); |
1da177e4 LT |
216 | unsigned long flags; |
217 | ||
7d94f7ca | 218 | if (!desc) |
c2b5a251 MW |
219 | return; |
220 | ||
1da177e4 | 221 | spin_lock_irqsave(&desc->lock, flags); |
0a0c5168 | 222 | __disable_irq(desc, irq, false); |
1da177e4 LT |
223 | spin_unlock_irqrestore(&desc->lock, flags); |
224 | } | |
1da177e4 LT |
225 | EXPORT_SYMBOL(disable_irq_nosync); |
226 | ||
227 | /** | |
228 | * disable_irq - disable an irq and wait for completion | |
229 | * @irq: Interrupt to disable | |
230 | * | |
231 | * Disable the selected interrupt line. Enables and Disables are | |
232 | * nested. | |
233 | * This function waits for any pending IRQ handlers for this interrupt | |
234 | * to complete before returning. If you use this function while | |
235 | * holding a resource the IRQ handler may need you will deadlock. | |
236 | * | |
237 | * This function may be called - with care - from IRQ context. | |
238 | */ | |
239 | void disable_irq(unsigned int irq) | |
240 | { | |
d3c60047 | 241 | struct irq_desc *desc = irq_to_desc(irq); |
1da177e4 | 242 | |
7d94f7ca | 243 | if (!desc) |
c2b5a251 MW |
244 | return; |
245 | ||
1da177e4 LT |
246 | disable_irq_nosync(irq); |
247 | if (desc->action) | |
248 | synchronize_irq(irq); | |
249 | } | |
1da177e4 LT |
250 | EXPORT_SYMBOL(disable_irq); |
251 | ||
0a0c5168 | 252 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
1adb0850 | 253 | { |
0a0c5168 RW |
254 | if (resume) |
255 | desc->status &= ~IRQ_SUSPENDED; | |
256 | ||
1adb0850 TG |
257 | switch (desc->depth) { |
258 | case 0: | |
0a0c5168 | 259 | err_out: |
b8c512f6 | 260 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
1adb0850 TG |
261 | break; |
262 | case 1: { | |
263 | unsigned int status = desc->status & ~IRQ_DISABLED; | |
264 | ||
0a0c5168 RW |
265 | if (desc->status & IRQ_SUSPENDED) |
266 | goto err_out; | |
1adb0850 TG |
267 | /* Prevent probing on this irq: */ |
268 | desc->status = status | IRQ_NOPROBE; | |
269 | check_irq_resend(desc, irq); | |
270 | /* fall-through */ | |
271 | } | |
272 | default: | |
273 | desc->depth--; | |
274 | } | |
275 | } | |
276 | ||
1da177e4 LT |
277 | /** |
278 | * enable_irq - enable handling of an irq | |
279 | * @irq: Interrupt to enable | |
280 | * | |
281 | * Undoes the effect of one call to disable_irq(). If this | |
282 | * matches the last disable, processing of interrupts on this | |
283 | * IRQ line is re-enabled. | |
284 | * | |
285 | * This function may be called from IRQ context. | |
286 | */ | |
287 | void enable_irq(unsigned int irq) | |
288 | { | |
d3c60047 | 289 | struct irq_desc *desc = irq_to_desc(irq); |
1da177e4 LT |
290 | unsigned long flags; |
291 | ||
7d94f7ca | 292 | if (!desc) |
c2b5a251 MW |
293 | return; |
294 | ||
1da177e4 | 295 | spin_lock_irqsave(&desc->lock, flags); |
0a0c5168 | 296 | __enable_irq(desc, irq, false); |
1da177e4 LT |
297 | spin_unlock_irqrestore(&desc->lock, flags); |
298 | } | |
1da177e4 LT |
299 | EXPORT_SYMBOL(enable_irq); |
300 | ||
0c5d1eb7 | 301 | static int set_irq_wake_real(unsigned int irq, unsigned int on) |
2db87321 | 302 | { |
08678b08 | 303 | struct irq_desc *desc = irq_to_desc(irq); |
2db87321 UKK |
304 | int ret = -ENXIO; |
305 | ||
306 | if (desc->chip->set_wake) | |
307 | ret = desc->chip->set_wake(irq, on); | |
308 | ||
309 | return ret; | |
310 | } | |
311 | ||
ba9a2331 TG |
312 | /** |
313 | * set_irq_wake - control irq power management wakeup | |
314 | * @irq: interrupt to control | |
315 | * @on: enable/disable power management wakeup | |
316 | * | |
15a647eb DB |
317 | * Enable/disable power management wakeup mode, which is |
318 | * disabled by default. Enables and disables must match, | |
319 | * just as they match for non-wakeup mode support. | |
320 | * | |
321 | * Wakeup mode lets this IRQ wake the system from sleep | |
322 | * states like "suspend to RAM". | |
ba9a2331 TG |
323 | */ |
324 | int set_irq_wake(unsigned int irq, unsigned int on) | |
325 | { | |
08678b08 | 326 | struct irq_desc *desc = irq_to_desc(irq); |
ba9a2331 | 327 | unsigned long flags; |
2db87321 | 328 | int ret = 0; |
ba9a2331 | 329 | |
15a647eb DB |
330 | /* wakeup-capable irqs can be shared between drivers that |
331 | * don't need to have the same sleep mode behaviors. | |
332 | */ | |
ba9a2331 | 333 | spin_lock_irqsave(&desc->lock, flags); |
15a647eb | 334 | if (on) { |
2db87321 UKK |
335 | if (desc->wake_depth++ == 0) { |
336 | ret = set_irq_wake_real(irq, on); | |
337 | if (ret) | |
338 | desc->wake_depth = 0; | |
339 | else | |
340 | desc->status |= IRQ_WAKEUP; | |
341 | } | |
15a647eb DB |
342 | } else { |
343 | if (desc->wake_depth == 0) { | |
7a2c4770 | 344 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); |
2db87321 UKK |
345 | } else if (--desc->wake_depth == 0) { |
346 | ret = set_irq_wake_real(irq, on); | |
347 | if (ret) | |
348 | desc->wake_depth = 1; | |
349 | else | |
350 | desc->status &= ~IRQ_WAKEUP; | |
351 | } | |
15a647eb | 352 | } |
2db87321 | 353 | |
ba9a2331 TG |
354 | spin_unlock_irqrestore(&desc->lock, flags); |
355 | return ret; | |
356 | } | |
357 | EXPORT_SYMBOL(set_irq_wake); | |
358 | ||
1da177e4 LT |
359 | /* |
360 | * Internal function that tells the architecture code whether a | |
361 | * particular irq has been exclusively allocated or is available | |
362 | * for driver use. | |
363 | */ | |
364 | int can_request_irq(unsigned int irq, unsigned long irqflags) | |
365 | { | |
d3c60047 | 366 | struct irq_desc *desc = irq_to_desc(irq); |
1da177e4 LT |
367 | struct irqaction *action; |
368 | ||
7d94f7ca YL |
369 | if (!desc) |
370 | return 0; | |
371 | ||
372 | if (desc->status & IRQ_NOREQUEST) | |
1da177e4 LT |
373 | return 0; |
374 | ||
08678b08 | 375 | action = desc->action; |
1da177e4 | 376 | if (action) |
3cca53b0 | 377 | if (irqflags & action->flags & IRQF_SHARED) |
1da177e4 LT |
378 | action = NULL; |
379 | ||
380 | return !action; | |
381 | } | |
382 | ||
6a6de9ef TG |
383 | void compat_irq_chip_set_default_handler(struct irq_desc *desc) |
384 | { | |
385 | /* | |
386 | * If the architecture still has not overriden | |
387 | * the flow handler then zap the default. This | |
388 | * should catch incorrect flow-type setting. | |
389 | */ | |
390 | if (desc->handle_irq == &handle_bad_irq) | |
391 | desc->handle_irq = NULL; | |
392 | } | |
393 | ||
0c5d1eb7 | 394 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
82736f4d UKK |
395 | unsigned long flags) |
396 | { | |
397 | int ret; | |
0c5d1eb7 | 398 | struct irq_chip *chip = desc->chip; |
82736f4d UKK |
399 | |
400 | if (!chip || !chip->set_type) { | |
401 | /* | |
402 | * IRQF_TRIGGER_* but the PIC does not support multiple | |
403 | * flow-types? | |
404 | */ | |
3ff68a6a | 405 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, |
82736f4d UKK |
406 | chip ? (chip->name ? : "unknown") : "unknown"); |
407 | return 0; | |
408 | } | |
409 | ||
f2b662da DB |
410 | /* caller masked out all except trigger mode flags */ |
411 | ret = chip->set_type(irq, flags); | |
82736f4d UKK |
412 | |
413 | if (ret) | |
c69ad71b | 414 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", |
f2b662da | 415 | (int)flags, irq, chip->set_type); |
0c5d1eb7 | 416 | else { |
f2b662da DB |
417 | if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) |
418 | flags |= IRQ_LEVEL; | |
0c5d1eb7 | 419 | /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ |
f2b662da DB |
420 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); |
421 | desc->status |= flags; | |
0c5d1eb7 | 422 | } |
82736f4d UKK |
423 | |
424 | return ret; | |
425 | } | |
426 | ||
3aa551c9 TG |
427 | static int irq_wait_for_interrupt(struct irqaction *action) |
428 | { | |
429 | while (!kthread_should_stop()) { | |
430 | set_current_state(TASK_INTERRUPTIBLE); | |
f48fe81e TG |
431 | |
432 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | |
433 | &action->thread_flags)) { | |
3aa551c9 TG |
434 | __set_current_state(TASK_RUNNING); |
435 | return 0; | |
f48fe81e TG |
436 | } |
437 | schedule(); | |
3aa551c9 TG |
438 | } |
439 | return -1; | |
440 | } | |
441 | ||
442 | /* | |
443 | * Interrupt handler thread | |
444 | */ | |
445 | static int irq_thread(void *data) | |
446 | { | |
447 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | |
448 | struct irqaction *action = data; | |
449 | struct irq_desc *desc = irq_to_desc(action->irq); | |
450 | int wake; | |
451 | ||
452 | sched_setscheduler(current, SCHED_FIFO, ¶m); | |
453 | current->irqaction = action; | |
454 | ||
455 | while (!irq_wait_for_interrupt(action)) { | |
456 | ||
457 | atomic_inc(&desc->threads_active); | |
458 | ||
459 | spin_lock_irq(&desc->lock); | |
460 | if (unlikely(desc->status & IRQ_DISABLED)) { | |
461 | /* | |
462 | * CHECKME: We might need a dedicated | |
463 | * IRQ_THREAD_PENDING flag here, which | |
464 | * retriggers the thread in check_irq_resend() | |
465 | * but AFAICT IRQ_PENDING should be fine as it | |
466 | * retriggers the interrupt itself --- tglx | |
467 | */ | |
468 | desc->status |= IRQ_PENDING; | |
469 | spin_unlock_irq(&desc->lock); | |
470 | } else { | |
471 | spin_unlock_irq(&desc->lock); | |
472 | ||
473 | action->thread_fn(action->irq, action->dev_id); | |
474 | } | |
475 | ||
476 | wake = atomic_dec_and_test(&desc->threads_active); | |
477 | ||
478 | if (wake && waitqueue_active(&desc->wait_for_threads)) | |
479 | wake_up(&desc->wait_for_threads); | |
480 | } | |
481 | ||
482 | /* | |
483 | * Clear irqaction. Otherwise exit_irq_thread() would make | |
484 | * fuzz about an active irq thread going into nirvana. | |
485 | */ | |
486 | current->irqaction = NULL; | |
487 | return 0; | |
488 | } | |
489 | ||
490 | /* | |
491 | * Called from do_exit() | |
492 | */ | |
493 | void exit_irq_thread(void) | |
494 | { | |
495 | struct task_struct *tsk = current; | |
496 | ||
497 | if (!tsk->irqaction) | |
498 | return; | |
499 | ||
500 | printk(KERN_ERR | |
501 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | |
502 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | |
503 | ||
504 | /* | |
505 | * Set the THREAD DIED flag to prevent further wakeups of the | |
506 | * soon to be gone threaded handler. | |
507 | */ | |
508 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | |
509 | } | |
510 | ||
1da177e4 LT |
511 | /* |
512 | * Internal function to register an irqaction - typically used to | |
513 | * allocate special interrupts that are part of the architecture. | |
514 | */ | |
d3c60047 | 515 | static int |
327ec569 | 516 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) |
1da177e4 | 517 | { |
f17c7545 | 518 | struct irqaction *old, **old_ptr; |
8b126b77 | 519 | const char *old_name = NULL; |
1da177e4 LT |
520 | unsigned long flags; |
521 | int shared = 0; | |
82736f4d | 522 | int ret; |
1da177e4 | 523 | |
7d94f7ca | 524 | if (!desc) |
c2b5a251 MW |
525 | return -EINVAL; |
526 | ||
f1c2662c | 527 | if (desc->chip == &no_irq_chip) |
1da177e4 LT |
528 | return -ENOSYS; |
529 | /* | |
530 | * Some drivers like serial.c use request_irq() heavily, | |
531 | * so we have to be careful not to interfere with a | |
532 | * running system. | |
533 | */ | |
3cca53b0 | 534 | if (new->flags & IRQF_SAMPLE_RANDOM) { |
1da177e4 LT |
535 | /* |
536 | * This function might sleep, we want to call it first, | |
537 | * outside of the atomic block. | |
538 | * Yes, this might clear the entropy pool if the wrong | |
539 | * driver is attempted to be loaded, without actually | |
540 | * installing a new handler, but is this really a problem, | |
541 | * only the sysadmin is able to do this. | |
542 | */ | |
543 | rand_initialize_irq(irq); | |
544 | } | |
545 | ||
3aa551c9 TG |
546 | /* |
547 | * Threaded handler ? | |
548 | */ | |
549 | if (new->thread_fn) { | |
550 | struct task_struct *t; | |
551 | ||
552 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | |
553 | new->name); | |
554 | if (IS_ERR(t)) | |
555 | return PTR_ERR(t); | |
556 | /* | |
557 | * We keep the reference to the task struct even if | |
558 | * the thread dies to avoid that the interrupt code | |
559 | * references an already freed task_struct. | |
560 | */ | |
561 | get_task_struct(t); | |
562 | new->thread = t; | |
563 | wake_up_process(t); | |
564 | } | |
565 | ||
1da177e4 LT |
566 | /* |
567 | * The following block of code has to be executed atomically | |
568 | */ | |
06fcb0c6 | 569 | spin_lock_irqsave(&desc->lock, flags); |
f17c7545 IM |
570 | old_ptr = &desc->action; |
571 | old = *old_ptr; | |
06fcb0c6 | 572 | if (old) { |
e76de9f8 TG |
573 | /* |
574 | * Can't share interrupts unless both agree to and are | |
575 | * the same type (level, edge, polarity). So both flag | |
3cca53b0 | 576 | * fields must have IRQF_SHARED set and the bits which |
e76de9f8 TG |
577 | * set the trigger type must match. |
578 | */ | |
3cca53b0 | 579 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
8b126b77 AM |
580 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { |
581 | old_name = old->name; | |
f5163427 | 582 | goto mismatch; |
8b126b77 | 583 | } |
f5163427 | 584 | |
284c6680 | 585 | #if defined(CONFIG_IRQ_PER_CPU) |
f5163427 | 586 | /* All handlers must agree on per-cpuness */ |
3cca53b0 TG |
587 | if ((old->flags & IRQF_PERCPU) != |
588 | (new->flags & IRQF_PERCPU)) | |
f5163427 DS |
589 | goto mismatch; |
590 | #endif | |
1da177e4 LT |
591 | |
592 | /* add new interrupt at end of irq queue */ | |
593 | do { | |
f17c7545 IM |
594 | old_ptr = &old->next; |
595 | old = *old_ptr; | |
1da177e4 LT |
596 | } while (old); |
597 | shared = 1; | |
598 | } | |
599 | ||
1da177e4 | 600 | if (!shared) { |
6a6de9ef | 601 | irq_chip_set_defaults(desc->chip); |
e76de9f8 | 602 | |
3aa551c9 TG |
603 | init_waitqueue_head(&desc->wait_for_threads); |
604 | ||
e76de9f8 | 605 | /* Setup the type (level, edge polarity) if configured: */ |
3cca53b0 | 606 | if (new->flags & IRQF_TRIGGER_MASK) { |
f2b662da DB |
607 | ret = __irq_set_trigger(desc, irq, |
608 | new->flags & IRQF_TRIGGER_MASK); | |
82736f4d | 609 | |
3aa551c9 TG |
610 | if (ret) |
611 | goto out_thread; | |
e76de9f8 TG |
612 | } else |
613 | compat_irq_chip_set_default_handler(desc); | |
82736f4d UKK |
614 | #if defined(CONFIG_IRQ_PER_CPU) |
615 | if (new->flags & IRQF_PERCPU) | |
616 | desc->status |= IRQ_PER_CPU; | |
617 | #endif | |
6a6de9ef | 618 | |
94d39e1f | 619 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | |
1adb0850 | 620 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); |
94d39e1f TG |
621 | |
622 | if (!(desc->status & IRQ_NOAUTOEN)) { | |
623 | desc->depth = 0; | |
624 | desc->status &= ~IRQ_DISABLED; | |
7e6e178a | 625 | desc->chip->startup(irq); |
e76de9f8 TG |
626 | } else |
627 | /* Undo nested disables: */ | |
628 | desc->depth = 1; | |
18404756 | 629 | |
612e3684 TG |
630 | /* Exclude IRQ from balancing if requested */ |
631 | if (new->flags & IRQF_NOBALANCING) | |
632 | desc->status |= IRQ_NO_BALANCING; | |
633 | ||
18404756 | 634 | /* Set default affinity mask once everything is setup */ |
548c8933 | 635 | setup_affinity(irq, desc); |
0c5d1eb7 DB |
636 | |
637 | } else if ((new->flags & IRQF_TRIGGER_MASK) | |
638 | && (new->flags & IRQF_TRIGGER_MASK) | |
639 | != (desc->status & IRQ_TYPE_SENSE_MASK)) { | |
640 | /* hope the handler works with the actual trigger mode... */ | |
641 | pr_warning("IRQ %d uses trigger mode %d; requested %d\n", | |
642 | irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), | |
643 | (int)(new->flags & IRQF_TRIGGER_MASK)); | |
1da177e4 | 644 | } |
82736f4d | 645 | |
f17c7545 | 646 | *old_ptr = new; |
82736f4d | 647 | |
8528b0f1 LT |
648 | /* Reset broken irq detection when installing new handler */ |
649 | desc->irq_count = 0; | |
650 | desc->irqs_unhandled = 0; | |
1adb0850 TG |
651 | |
652 | /* | |
653 | * Check whether we disabled the irq via the spurious handler | |
654 | * before. Reenable it and give it another chance. | |
655 | */ | |
656 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | |
657 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | |
0a0c5168 | 658 | __enable_irq(desc, irq, false); |
1adb0850 TG |
659 | } |
660 | ||
06fcb0c6 | 661 | spin_unlock_irqrestore(&desc->lock, flags); |
1da177e4 LT |
662 | |
663 | new->irq = irq; | |
2c6927a3 | 664 | register_irq_proc(irq, desc); |
1da177e4 LT |
665 | new->dir = NULL; |
666 | register_handler_proc(irq, new); | |
667 | ||
668 | return 0; | |
f5163427 DS |
669 | |
670 | mismatch: | |
3f050447 | 671 | #ifdef CONFIG_DEBUG_SHIRQ |
3cca53b0 | 672 | if (!(new->flags & IRQF_PROBE_SHARED)) { |
e8c4b9d0 | 673 | printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); |
8b126b77 AM |
674 | if (old_name) |
675 | printk(KERN_ERR "current handler: %s\n", old_name); | |
13e87ec6 AM |
676 | dump_stack(); |
677 | } | |
3f050447 | 678 | #endif |
3aa551c9 TG |
679 | ret = -EBUSY; |
680 | ||
681 | out_thread: | |
8b126b77 | 682 | spin_unlock_irqrestore(&desc->lock, flags); |
3aa551c9 TG |
683 | if (new->thread) { |
684 | struct task_struct *t = new->thread; | |
685 | ||
686 | new->thread = NULL; | |
687 | if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | |
688 | kthread_stop(t); | |
689 | put_task_struct(t); | |
690 | } | |
691 | return ret; | |
1da177e4 LT |
692 | } |
693 | ||
d3c60047 TG |
694 | /** |
695 | * setup_irq - setup an interrupt | |
696 | * @irq: Interrupt line to setup | |
697 | * @act: irqaction for the interrupt | |
698 | * | |
699 | * Used to statically setup interrupts in the early boot process. | |
700 | */ | |
701 | int setup_irq(unsigned int irq, struct irqaction *act) | |
702 | { | |
703 | struct irq_desc *desc = irq_to_desc(irq); | |
704 | ||
705 | return __setup_irq(irq, desc, act); | |
706 | } | |
eb53b4e8 | 707 | EXPORT_SYMBOL_GPL(setup_irq); |
d3c60047 | 708 | |
cbf94f06 MD |
709 | /* |
710 | * Internal function to unregister an irqaction - used to free | |
711 | * regular and special interrupts that are part of the architecture. | |
1da177e4 | 712 | */ |
cbf94f06 | 713 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) |
1da177e4 | 714 | { |
d3c60047 | 715 | struct irq_desc *desc = irq_to_desc(irq); |
f17c7545 | 716 | struct irqaction *action, **action_ptr; |
3aa551c9 | 717 | struct task_struct *irqthread; |
1da177e4 LT |
718 | unsigned long flags; |
719 | ||
ae88a23b | 720 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
7d94f7ca | 721 | |
7d94f7ca | 722 | if (!desc) |
f21cfb25 | 723 | return NULL; |
1da177e4 | 724 | |
06fcb0c6 | 725 | spin_lock_irqsave(&desc->lock, flags); |
ae88a23b IM |
726 | |
727 | /* | |
728 | * There can be multiple actions per IRQ descriptor, find the right | |
729 | * one based on the dev_id: | |
730 | */ | |
f17c7545 | 731 | action_ptr = &desc->action; |
1da177e4 | 732 | for (;;) { |
f17c7545 | 733 | action = *action_ptr; |
1da177e4 | 734 | |
ae88a23b IM |
735 | if (!action) { |
736 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | |
737 | spin_unlock_irqrestore(&desc->lock, flags); | |
1da177e4 | 738 | |
f21cfb25 | 739 | return NULL; |
ae88a23b | 740 | } |
1da177e4 | 741 | |
8316e381 IM |
742 | if (action->dev_id == dev_id) |
743 | break; | |
f17c7545 | 744 | action_ptr = &action->next; |
ae88a23b | 745 | } |
dbce706e | 746 | |
ae88a23b | 747 | /* Found it - now remove it from the list of entries: */ |
f17c7545 | 748 | *action_ptr = action->next; |
ae88a23b IM |
749 | |
750 | /* Currently used only by UML, might disappear one day: */ | |
b77d6adc | 751 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
ae88a23b IM |
752 | if (desc->chip->release) |
753 | desc->chip->release(irq, dev_id); | |
b77d6adc | 754 | #endif |
dbce706e | 755 | |
ae88a23b IM |
756 | /* If this was the last handler, shut down the IRQ line: */ |
757 | if (!desc->action) { | |
758 | desc->status |= IRQ_DISABLED; | |
759 | if (desc->chip->shutdown) | |
760 | desc->chip->shutdown(irq); | |
761 | else | |
762 | desc->chip->disable(irq); | |
763 | } | |
3aa551c9 TG |
764 | |
765 | irqthread = action->thread; | |
766 | action->thread = NULL; | |
767 | ||
ae88a23b IM |
768 | spin_unlock_irqrestore(&desc->lock, flags); |
769 | ||
770 | unregister_handler_proc(irq, action); | |
771 | ||
772 | /* Make sure it's not being used on another CPU: */ | |
773 | synchronize_irq(irq); | |
1da177e4 | 774 | |
3aa551c9 TG |
775 | if (irqthread) { |
776 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | |
777 | kthread_stop(irqthread); | |
778 | put_task_struct(irqthread); | |
779 | } | |
780 | ||
70edcd77 | 781 | #ifdef CONFIG_DEBUG_SHIRQ |
ae88a23b IM |
782 | /* |
783 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | |
784 | * event to happen even now it's being freed, so let's make sure that | |
785 | * is so by doing an extra call to the handler .... | |
786 | * | |
787 | * ( We do this after actually deregistering it, to make sure that a | |
788 | * 'real' IRQ doesn't run in * parallel with our fake. ) | |
789 | */ | |
790 | if (action->flags & IRQF_SHARED) { | |
791 | local_irq_save(flags); | |
792 | action->handler(irq, dev_id); | |
793 | local_irq_restore(flags); | |
1da177e4 | 794 | } |
ae88a23b | 795 | #endif |
f21cfb25 MD |
796 | return action; |
797 | } | |
798 | ||
cbf94f06 MD |
799 | /** |
800 | * remove_irq - free an interrupt | |
801 | * @irq: Interrupt line to free | |
802 | * @act: irqaction for the interrupt | |
803 | * | |
804 | * Used to remove interrupts statically setup by the early boot process. | |
805 | */ | |
806 | void remove_irq(unsigned int irq, struct irqaction *act) | |
807 | { | |
808 | __free_irq(irq, act->dev_id); | |
809 | } | |
eb53b4e8 | 810 | EXPORT_SYMBOL_GPL(remove_irq); |
cbf94f06 | 811 | |
f21cfb25 MD |
812 | /** |
813 | * free_irq - free an interrupt allocated with request_irq | |
814 | * @irq: Interrupt line to free | |
815 | * @dev_id: Device identity to free | |
816 | * | |
817 | * Remove an interrupt handler. The handler is removed and if the | |
818 | * interrupt line is no longer in use by any driver it is disabled. | |
819 | * On a shared IRQ the caller must ensure the interrupt is disabled | |
820 | * on the card it drives before calling this function. The function | |
821 | * does not return until any executing interrupts for this IRQ | |
822 | * have completed. | |
823 | * | |
824 | * This function must not be called from interrupt context. | |
825 | */ | |
826 | void free_irq(unsigned int irq, void *dev_id) | |
827 | { | |
cbf94f06 | 828 | kfree(__free_irq(irq, dev_id)); |
1da177e4 | 829 | } |
1da177e4 LT |
830 | EXPORT_SYMBOL(free_irq); |
831 | ||
832 | /** | |
3aa551c9 | 833 | * request_threaded_irq - allocate an interrupt line |
1da177e4 | 834 | * @irq: Interrupt line to allocate |
3aa551c9 TG |
835 | * @handler: Function to be called when the IRQ occurs. |
836 | * Primary handler for threaded interrupts | |
f48fe81e TG |
837 | * @thread_fn: Function called from the irq handler thread |
838 | * If NULL, no irq thread is created | |
1da177e4 LT |
839 | * @irqflags: Interrupt type flags |
840 | * @devname: An ascii name for the claiming device | |
841 | * @dev_id: A cookie passed back to the handler function | |
842 | * | |
843 | * This call allocates interrupt resources and enables the | |
844 | * interrupt line and IRQ handling. From the point this | |
845 | * call is made your handler function may be invoked. Since | |
846 | * your handler function must clear any interrupt the board | |
847 | * raises, you must take care both to initialise your hardware | |
848 | * and to set up the interrupt handler in the right order. | |
849 | * | |
3aa551c9 TG |
850 | * If you want to set up a threaded irq handler for your device |
851 | * then you need to supply @handler and @thread_fn. @handler ist | |
852 | * still called in hard interrupt context and has to check | |
853 | * whether the interrupt originates from the device. If yes it | |
854 | * needs to disable the interrupt on the device and return | |
855 | * IRQ_THREAD_WAKE which will wake up the handler thread and run | |
856 | * @thread_fn. This split handler design is necessary to support | |
857 | * shared interrupts. | |
858 | * | |
1da177e4 LT |
859 | * Dev_id must be globally unique. Normally the address of the |
860 | * device data structure is used as the cookie. Since the handler | |
861 | * receives this value it makes sense to use it. | |
862 | * | |
863 | * If your interrupt is shared you must pass a non NULL dev_id | |
864 | * as this is required when freeing the interrupt. | |
865 | * | |
866 | * Flags: | |
867 | * | |
3cca53b0 TG |
868 | * IRQF_SHARED Interrupt is shared |
869 | * IRQF_DISABLED Disable local interrupts while processing | |
870 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy | |
0c5d1eb7 | 871 | * IRQF_TRIGGER_* Specify active edge(s) or level |
1da177e4 LT |
872 | * |
873 | */ | |
3aa551c9 TG |
874 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
875 | irq_handler_t thread_fn, unsigned long irqflags, | |
876 | const char *devname, void *dev_id) | |
1da177e4 | 877 | { |
06fcb0c6 | 878 | struct irqaction *action; |
08678b08 | 879 | struct irq_desc *desc; |
d3c60047 | 880 | int retval; |
1da177e4 | 881 | |
470c6623 DB |
882 | /* |
883 | * handle_IRQ_event() always ignores IRQF_DISABLED except for | |
884 | * the _first_ irqaction (sigh). That can cause oopsing, but | |
885 | * the behavior is classified as "will not fix" so we need to | |
886 | * start nudging drivers away from using that idiom. | |
887 | */ | |
327ec569 IM |
888 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == |
889 | (IRQF_SHARED|IRQF_DISABLED)) { | |
890 | pr_warning( | |
891 | "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", | |
892 | irq, devname); | |
893 | } | |
470c6623 | 894 | |
fbb9ce95 IM |
895 | #ifdef CONFIG_LOCKDEP |
896 | /* | |
897 | * Lockdep wants atomic interrupt handlers: | |
898 | */ | |
38515e90 | 899 | irqflags |= IRQF_DISABLED; |
fbb9ce95 | 900 | #endif |
1da177e4 LT |
901 | /* |
902 | * Sanity-check: shared interrupts must pass in a real dev-ID, | |
903 | * otherwise we'll have trouble later trying to figure out | |
904 | * which interrupt is which (messes up the interrupt freeing | |
905 | * logic etc). | |
906 | */ | |
3cca53b0 | 907 | if ((irqflags & IRQF_SHARED) && !dev_id) |
1da177e4 | 908 | return -EINVAL; |
7d94f7ca | 909 | |
cb5bc832 | 910 | desc = irq_to_desc(irq); |
7d94f7ca | 911 | if (!desc) |
1da177e4 | 912 | return -EINVAL; |
7d94f7ca | 913 | |
08678b08 | 914 | if (desc->status & IRQ_NOREQUEST) |
6550c775 | 915 | return -EINVAL; |
1da177e4 LT |
916 | if (!handler) |
917 | return -EINVAL; | |
918 | ||
45535732 | 919 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
1da177e4 LT |
920 | if (!action) |
921 | return -ENOMEM; | |
922 | ||
923 | action->handler = handler; | |
3aa551c9 | 924 | action->thread_fn = thread_fn; |
1da177e4 | 925 | action->flags = irqflags; |
1da177e4 | 926 | action->name = devname; |
1da177e4 LT |
927 | action->dev_id = dev_id; |
928 | ||
d3c60047 | 929 | retval = __setup_irq(irq, desc, action); |
377bf1e4 AV |
930 | if (retval) |
931 | kfree(action); | |
932 | ||
a304e1b8 DW |
933 | #ifdef CONFIG_DEBUG_SHIRQ |
934 | if (irqflags & IRQF_SHARED) { | |
935 | /* | |
936 | * It's a shared IRQ -- the driver ought to be prepared for it | |
937 | * to happen immediately, so let's make sure.... | |
377bf1e4 AV |
938 | * We disable the irq to make sure that a 'real' IRQ doesn't |
939 | * run in parallel with our fake. | |
a304e1b8 | 940 | */ |
59845b1f | 941 | unsigned long flags; |
a304e1b8 | 942 | |
377bf1e4 | 943 | disable_irq(irq); |
59845b1f | 944 | local_irq_save(flags); |
377bf1e4 | 945 | |
59845b1f | 946 | handler(irq, dev_id); |
377bf1e4 | 947 | |
59845b1f | 948 | local_irq_restore(flags); |
377bf1e4 | 949 | enable_irq(irq); |
a304e1b8 DW |
950 | } |
951 | #endif | |
1da177e4 LT |
952 | return retval; |
953 | } | |
3aa551c9 | 954 | EXPORT_SYMBOL(request_threaded_irq); |