tracing: extend sched_pi_setprio
[deliverable/linux.git] / kernel / irq / manage.c
1 /*
2 * linux/kernel/irq/manage.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
6 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
10 #define pr_fmt(fmt) "genirq: " fmt
11
12 #include <linux/irq.h>
13 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/task_work.h>
21
22 #include "internals.h"
23
24 #ifdef CONFIG_IRQ_FORCED_THREADING
25 __read_mostly bool force_irqthreads;
26
27 static int __init setup_forced_irqthreads(char *arg)
28 {
29 force_irqthreads = true;
30 return 0;
31 }
32 early_param("threadirqs", setup_forced_irqthreads);
33 #endif
34
35 static void __synchronize_hardirq(struct irq_desc *desc)
36 {
37 bool inprogress;
38
39 do {
40 unsigned long flags;
41
42 /*
43 * Wait until we're out of the critical section. This might
44 * give the wrong answer due to the lack of memory barriers.
45 */
46 while (irqd_irq_inprogress(&desc->irq_data))
47 cpu_relax();
48
49 /* Ok, that indicated we're done: double-check carefully. */
50 raw_spin_lock_irqsave(&desc->lock, flags);
51 inprogress = irqd_irq_inprogress(&desc->irq_data);
52 raw_spin_unlock_irqrestore(&desc->lock, flags);
53
54 /* Oops, that failed? */
55 } while (inprogress);
56 }
57
58 /**
59 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
60 * @irq: interrupt number to wait for
61 *
62 * This function waits for any pending hard IRQ handlers for this
63 * interrupt to complete before returning. If you use this
64 * function while holding a resource the IRQ handler may need you
65 * will deadlock. It does not take associated threaded handlers
66 * into account.
67 *
68 * Do not use this for shutdown scenarios where you must be sure
69 * that all parts (hardirq and threaded handler) have completed.
70 *
71 * Returns: false if a threaded handler is active.
72 *
73 * This function may be called - with care - from IRQ context.
74 */
75 bool synchronize_hardirq(unsigned int irq)
76 {
77 struct irq_desc *desc = irq_to_desc(irq);
78
79 if (desc) {
80 __synchronize_hardirq(desc);
81 return !atomic_read(&desc->threads_active);
82 }
83
84 return true;
85 }
86 EXPORT_SYMBOL(synchronize_hardirq);
87
88 /**
89 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
90 * @irq: interrupt number to wait for
91 *
92 * This function waits for any pending IRQ handlers for this interrupt
93 * to complete before returning. If you use this function while
94 * holding a resource the IRQ handler may need you will deadlock.
95 *
96 * This function may be called - with care - from IRQ context.
97 */
98 void synchronize_irq(unsigned int irq)
99 {
100 struct irq_desc *desc = irq_to_desc(irq);
101
102 if (desc) {
103 __synchronize_hardirq(desc);
104 /*
105 * We made sure that no hardirq handler is
106 * running. Now verify that no threaded handlers are
107 * active.
108 */
109 wait_event(desc->wait_for_threads,
110 !atomic_read(&desc->threads_active));
111 }
112 }
113 EXPORT_SYMBOL(synchronize_irq);
114
115 #ifdef CONFIG_SMP
116 cpumask_var_t irq_default_affinity;
117
118 static bool __irq_can_set_affinity(struct irq_desc *desc)
119 {
120 if (!desc || !irqd_can_balance(&desc->irq_data) ||
121 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
122 return false;
123 return true;
124 }
125
126 /**
127 * irq_can_set_affinity - Check if the affinity of a given irq can be set
128 * @irq: Interrupt to check
129 *
130 */
131 int irq_can_set_affinity(unsigned int irq)
132 {
133 return __irq_can_set_affinity(irq_to_desc(irq));
134 }
135
136 /**
137 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
138 * @irq: Interrupt to check
139 *
140 * Like irq_can_set_affinity() above, but additionally checks for the
141 * AFFINITY_MANAGED flag.
142 */
143 bool irq_can_set_affinity_usr(unsigned int irq)
144 {
145 struct irq_desc *desc = irq_to_desc(irq);
146
147 return __irq_can_set_affinity(desc) &&
148 !irqd_affinity_is_managed(&desc->irq_data);
149 }
150
151 /**
152 * irq_set_thread_affinity - Notify irq threads to adjust affinity
153 * @desc: irq descriptor which has affitnity changed
154 *
155 * We just set IRQTF_AFFINITY and delegate the affinity setting
156 * to the interrupt thread itself. We can not call
157 * set_cpus_allowed_ptr() here as we hold desc->lock and this
158 * code can be called from hard interrupt context.
159 */
160 void irq_set_thread_affinity(struct irq_desc *desc)
161 {
162 struct irqaction *action;
163
164 for_each_action_of_desc(desc, action)
165 if (action->thread)
166 set_bit(IRQTF_AFFINITY, &action->thread_flags);
167 }
168
169 #ifdef CONFIG_GENERIC_PENDING_IRQ
170 static inline bool irq_can_move_pcntxt(struct irq_data *data)
171 {
172 return irqd_can_move_in_process_context(data);
173 }
174 static inline bool irq_move_pending(struct irq_data *data)
175 {
176 return irqd_is_setaffinity_pending(data);
177 }
178 static inline void
179 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
180 {
181 cpumask_copy(desc->pending_mask, mask);
182 }
183 static inline void
184 irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
185 {
186 cpumask_copy(mask, desc->pending_mask);
187 }
188 #else
189 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
190 static inline bool irq_move_pending(struct irq_data *data) { return false; }
191 static inline void
192 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
193 static inline void
194 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
195 #endif
196
197 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
198 bool force)
199 {
200 struct irq_desc *desc = irq_data_to_desc(data);
201 struct irq_chip *chip = irq_data_get_irq_chip(data);
202 int ret;
203
204 ret = chip->irq_set_affinity(data, mask, force);
205 switch (ret) {
206 case IRQ_SET_MASK_OK:
207 case IRQ_SET_MASK_OK_DONE:
208 cpumask_copy(desc->irq_common_data.affinity, mask);
209 case IRQ_SET_MASK_OK_NOCOPY:
210 irq_set_thread_affinity(desc);
211 ret = 0;
212 }
213
214 return ret;
215 }
216
217 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
218 bool force)
219 {
220 struct irq_chip *chip = irq_data_get_irq_chip(data);
221 struct irq_desc *desc = irq_data_to_desc(data);
222 int ret = 0;
223
224 if (!chip || !chip->irq_set_affinity)
225 return -EINVAL;
226
227 if (irq_can_move_pcntxt(data)) {
228 ret = irq_do_set_affinity(data, mask, force);
229 } else {
230 irqd_set_move_pending(data);
231 irq_copy_pending(desc, mask);
232 }
233
234 if (desc->affinity_notify) {
235 kref_get(&desc->affinity_notify->kref);
236 schedule_work(&desc->affinity_notify->work);
237 }
238 irqd_set(data, IRQD_AFFINITY_SET);
239
240 return ret;
241 }
242
243 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
244 {
245 struct irq_desc *desc = irq_to_desc(irq);
246 unsigned long flags;
247 int ret;
248
249 if (!desc)
250 return -EINVAL;
251
252 raw_spin_lock_irqsave(&desc->lock, flags);
253 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
254 raw_spin_unlock_irqrestore(&desc->lock, flags);
255 return ret;
256 }
257
258 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
259 {
260 unsigned long flags;
261 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
262
263 if (!desc)
264 return -EINVAL;
265 desc->affinity_hint = m;
266 irq_put_desc_unlock(desc, flags);
267 /* set the initial affinity to prevent every interrupt being on CPU0 */
268 if (m)
269 __irq_set_affinity(irq, m, false);
270 return 0;
271 }
272 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
273
274 static void irq_affinity_notify(struct work_struct *work)
275 {
276 struct irq_affinity_notify *notify =
277 container_of(work, struct irq_affinity_notify, work);
278 struct irq_desc *desc = irq_to_desc(notify->irq);
279 cpumask_var_t cpumask;
280 unsigned long flags;
281
282 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
283 goto out;
284
285 raw_spin_lock_irqsave(&desc->lock, flags);
286 if (irq_move_pending(&desc->irq_data))
287 irq_get_pending(cpumask, desc);
288 else
289 cpumask_copy(cpumask, desc->irq_common_data.affinity);
290 raw_spin_unlock_irqrestore(&desc->lock, flags);
291
292 notify->notify(notify, cpumask);
293
294 free_cpumask_var(cpumask);
295 out:
296 kref_put(&notify->kref, notify->release);
297 }
298
299 /**
300 * irq_set_affinity_notifier - control notification of IRQ affinity changes
301 * @irq: Interrupt for which to enable/disable notification
302 * @notify: Context for notification, or %NULL to disable
303 * notification. Function pointers must be initialised;
304 * the other fields will be initialised by this function.
305 *
306 * Must be called in process context. Notification may only be enabled
307 * after the IRQ is allocated and must be disabled before the IRQ is
308 * freed using free_irq().
309 */
310 int
311 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
312 {
313 struct irq_desc *desc = irq_to_desc(irq);
314 struct irq_affinity_notify *old_notify;
315 unsigned long flags;
316
317 /* The release function is promised process context */
318 might_sleep();
319
320 if (!desc)
321 return -EINVAL;
322
323 /* Complete initialisation of *notify */
324 if (notify) {
325 notify->irq = irq;
326 kref_init(&notify->kref);
327 INIT_WORK(&notify->work, irq_affinity_notify);
328 }
329
330 raw_spin_lock_irqsave(&desc->lock, flags);
331 old_notify = desc->affinity_notify;
332 desc->affinity_notify = notify;
333 raw_spin_unlock_irqrestore(&desc->lock, flags);
334
335 if (old_notify)
336 kref_put(&old_notify->kref, old_notify->release);
337
338 return 0;
339 }
340 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
341
342 #ifndef CONFIG_AUTO_IRQ_AFFINITY
343 /*
344 * Generic version of the affinity autoselector.
345 */
346 static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
347 {
348 struct cpumask *set = irq_default_affinity;
349 int node = irq_desc_get_node(desc);
350
351 /* Excludes PER_CPU and NO_BALANCE interrupts */
352 if (!__irq_can_set_affinity(desc))
353 return 0;
354
355 /*
356 * Preserve the managed affinity setting and an userspace affinity
357 * setup, but make sure that one of the targets is online.
358 */
359 if (irqd_affinity_is_managed(&desc->irq_data) ||
360 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
361 if (cpumask_intersects(desc->irq_common_data.affinity,
362 cpu_online_mask))
363 set = desc->irq_common_data.affinity;
364 else
365 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
366 }
367
368 cpumask_and(mask, cpu_online_mask, set);
369 if (node != NUMA_NO_NODE) {
370 const struct cpumask *nodemask = cpumask_of_node(node);
371
372 /* make sure at least one of the cpus in nodemask is online */
373 if (cpumask_intersects(mask, nodemask))
374 cpumask_and(mask, mask, nodemask);
375 }
376 irq_do_set_affinity(&desc->irq_data, mask, false);
377 return 0;
378 }
379 #else
380 /* Wrapper for ALPHA specific affinity selector magic */
381 static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
382 {
383 return irq_select_affinity(irq_desc_get_irq(d));
384 }
385 #endif
386
387 /*
388 * Called when affinity is set via /proc/irq
389 */
390 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
391 {
392 struct irq_desc *desc = irq_to_desc(irq);
393 unsigned long flags;
394 int ret;
395
396 raw_spin_lock_irqsave(&desc->lock, flags);
397 ret = setup_affinity(desc, mask);
398 raw_spin_unlock_irqrestore(&desc->lock, flags);
399 return ret;
400 }
401
402 #else
403 static inline int
404 setup_affinity(struct irq_desc *desc, struct cpumask *mask)
405 {
406 return 0;
407 }
408 #endif
409
410 /**
411 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
412 * @irq: interrupt number to set affinity
413 * @vcpu_info: vCPU specific data
414 *
415 * This function uses the vCPU specific data to set the vCPU
416 * affinity for an irq. The vCPU specific data is passed from
417 * outside, such as KVM. One example code path is as below:
418 * KVM -> IOMMU -> irq_set_vcpu_affinity().
419 */
420 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
421 {
422 unsigned long flags;
423 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
424 struct irq_data *data;
425 struct irq_chip *chip;
426 int ret = -ENOSYS;
427
428 if (!desc)
429 return -EINVAL;
430
431 data = irq_desc_get_irq_data(desc);
432 chip = irq_data_get_irq_chip(data);
433 if (chip && chip->irq_set_vcpu_affinity)
434 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
435 irq_put_desc_unlock(desc, flags);
436
437 return ret;
438 }
439 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
440
441 void __disable_irq(struct irq_desc *desc)
442 {
443 if (!desc->depth++)
444 irq_disable(desc);
445 }
446
447 static int __disable_irq_nosync(unsigned int irq)
448 {
449 unsigned long flags;
450 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
451
452 if (!desc)
453 return -EINVAL;
454 __disable_irq(desc);
455 irq_put_desc_busunlock(desc, flags);
456 return 0;
457 }
458
459 /**
460 * disable_irq_nosync - disable an irq without waiting
461 * @irq: Interrupt to disable
462 *
463 * Disable the selected interrupt line. Disables and Enables are
464 * nested.
465 * Unlike disable_irq(), this function does not ensure existing
466 * instances of the IRQ handler have completed before returning.
467 *
468 * This function may be called from IRQ context.
469 */
470 void disable_irq_nosync(unsigned int irq)
471 {
472 __disable_irq_nosync(irq);
473 }
474 EXPORT_SYMBOL(disable_irq_nosync);
475
476 /**
477 * disable_irq - disable an irq and wait for completion
478 * @irq: Interrupt to disable
479 *
480 * Disable the selected interrupt line. Enables and Disables are
481 * nested.
482 * This function waits for any pending IRQ handlers for this interrupt
483 * to complete before returning. If you use this function while
484 * holding a resource the IRQ handler may need you will deadlock.
485 *
486 * This function may be called - with care - from IRQ context.
487 */
488 void disable_irq(unsigned int irq)
489 {
490 if (!__disable_irq_nosync(irq))
491 synchronize_irq(irq);
492 }
493 EXPORT_SYMBOL(disable_irq);
494
495 /**
496 * disable_hardirq - disables an irq and waits for hardirq completion
497 * @irq: Interrupt to disable
498 *
499 * Disable the selected interrupt line. Enables and Disables are
500 * nested.
501 * This function waits for any pending hard IRQ handlers for this
502 * interrupt to complete before returning. If you use this function while
503 * holding a resource the hard IRQ handler may need you will deadlock.
504 *
505 * When used to optimistically disable an interrupt from atomic context
506 * the return value must be checked.
507 *
508 * Returns: false if a threaded handler is active.
509 *
510 * This function may be called - with care - from IRQ context.
511 */
512 bool disable_hardirq(unsigned int irq)
513 {
514 if (!__disable_irq_nosync(irq))
515 return synchronize_hardirq(irq);
516
517 return false;
518 }
519 EXPORT_SYMBOL_GPL(disable_hardirq);
520
521 void __enable_irq(struct irq_desc *desc)
522 {
523 switch (desc->depth) {
524 case 0:
525 err_out:
526 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
527 irq_desc_get_irq(desc));
528 break;
529 case 1: {
530 if (desc->istate & IRQS_SUSPENDED)
531 goto err_out;
532 /* Prevent probing on this irq: */
533 irq_settings_set_noprobe(desc);
534 irq_enable(desc);
535 check_irq_resend(desc);
536 /* fall-through */
537 }
538 default:
539 desc->depth--;
540 }
541 }
542
543 /**
544 * enable_irq - enable handling of an irq
545 * @irq: Interrupt to enable
546 *
547 * Undoes the effect of one call to disable_irq(). If this
548 * matches the last disable, processing of interrupts on this
549 * IRQ line is re-enabled.
550 *
551 * This function may be called from IRQ context only when
552 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
553 */
554 void enable_irq(unsigned int irq)
555 {
556 unsigned long flags;
557 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
558
559 if (!desc)
560 return;
561 if (WARN(!desc->irq_data.chip,
562 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
563 goto out;
564
565 __enable_irq(desc);
566 out:
567 irq_put_desc_busunlock(desc, flags);
568 }
569 EXPORT_SYMBOL(enable_irq);
570
571 static int set_irq_wake_real(unsigned int irq, unsigned int on)
572 {
573 struct irq_desc *desc = irq_to_desc(irq);
574 int ret = -ENXIO;
575
576 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
577 return 0;
578
579 if (desc->irq_data.chip->irq_set_wake)
580 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
581
582 return ret;
583 }
584
585 /**
586 * irq_set_irq_wake - control irq power management wakeup
587 * @irq: interrupt to control
588 * @on: enable/disable power management wakeup
589 *
590 * Enable/disable power management wakeup mode, which is
591 * disabled by default. Enables and disables must match,
592 * just as they match for non-wakeup mode support.
593 *
594 * Wakeup mode lets this IRQ wake the system from sleep
595 * states like "suspend to RAM".
596 */
597 int irq_set_irq_wake(unsigned int irq, unsigned int on)
598 {
599 unsigned long flags;
600 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
601 int ret = 0;
602
603 if (!desc)
604 return -EINVAL;
605
606 /* wakeup-capable irqs can be shared between drivers that
607 * don't need to have the same sleep mode behaviors.
608 */
609 if (on) {
610 if (desc->wake_depth++ == 0) {
611 ret = set_irq_wake_real(irq, on);
612 if (ret)
613 desc->wake_depth = 0;
614 else
615 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
616 }
617 } else {
618 if (desc->wake_depth == 0) {
619 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
620 } else if (--desc->wake_depth == 0) {
621 ret = set_irq_wake_real(irq, on);
622 if (ret)
623 desc->wake_depth = 1;
624 else
625 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
626 }
627 }
628 irq_put_desc_busunlock(desc, flags);
629 return ret;
630 }
631 EXPORT_SYMBOL(irq_set_irq_wake);
632
633 /*
634 * Internal function that tells the architecture code whether a
635 * particular irq has been exclusively allocated or is available
636 * for driver use.
637 */
638 int can_request_irq(unsigned int irq, unsigned long irqflags)
639 {
640 unsigned long flags;
641 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
642 int canrequest = 0;
643
644 if (!desc)
645 return 0;
646
647 if (irq_settings_can_request(desc)) {
648 if (!desc->action ||
649 irqflags & desc->action->flags & IRQF_SHARED)
650 canrequest = 1;
651 }
652 irq_put_desc_unlock(desc, flags);
653 return canrequest;
654 }
655
656 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
657 {
658 struct irq_chip *chip = desc->irq_data.chip;
659 int ret, unmask = 0;
660
661 if (!chip || !chip->irq_set_type) {
662 /*
663 * IRQF_TRIGGER_* but the PIC does not support multiple
664 * flow-types?
665 */
666 pr_debug("No set_type function for IRQ %d (%s)\n",
667 irq_desc_get_irq(desc),
668 chip ? (chip->name ? : "unknown") : "unknown");
669 return 0;
670 }
671
672 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
673 if (!irqd_irq_masked(&desc->irq_data))
674 mask_irq(desc);
675 if (!irqd_irq_disabled(&desc->irq_data))
676 unmask = 1;
677 }
678
679 /* Mask all flags except trigger mode */
680 flags &= IRQ_TYPE_SENSE_MASK;
681 ret = chip->irq_set_type(&desc->irq_data, flags);
682
683 switch (ret) {
684 case IRQ_SET_MASK_OK:
685 case IRQ_SET_MASK_OK_DONE:
686 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
687 irqd_set(&desc->irq_data, flags);
688
689 case IRQ_SET_MASK_OK_NOCOPY:
690 flags = irqd_get_trigger_type(&desc->irq_data);
691 irq_settings_set_trigger_mask(desc, flags);
692 irqd_clear(&desc->irq_data, IRQD_LEVEL);
693 irq_settings_clr_level(desc);
694 if (flags & IRQ_TYPE_LEVEL_MASK) {
695 irq_settings_set_level(desc);
696 irqd_set(&desc->irq_data, IRQD_LEVEL);
697 }
698
699 ret = 0;
700 break;
701 default:
702 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
703 flags, irq_desc_get_irq(desc), chip->irq_set_type);
704 }
705 if (unmask)
706 unmask_irq(desc);
707 return ret;
708 }
709
710 #ifdef CONFIG_HARDIRQS_SW_RESEND
711 int irq_set_parent(int irq, int parent_irq)
712 {
713 unsigned long flags;
714 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
715
716 if (!desc)
717 return -EINVAL;
718
719 desc->parent_irq = parent_irq;
720
721 irq_put_desc_unlock(desc, flags);
722 return 0;
723 }
724 #endif
725
726 /*
727 * Default primary interrupt handler for threaded interrupts. Is
728 * assigned as primary handler when request_threaded_irq is called
729 * with handler == NULL. Useful for oneshot interrupts.
730 */
731 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
732 {
733 return IRQ_WAKE_THREAD;
734 }
735
736 /*
737 * Primary handler for nested threaded interrupts. Should never be
738 * called.
739 */
740 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
741 {
742 WARN(1, "Primary handler called for nested irq %d\n", irq);
743 return IRQ_NONE;
744 }
745
746 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
747 {
748 WARN(1, "Secondary action handler called for irq %d\n", irq);
749 return IRQ_NONE;
750 }
751
752 static int irq_wait_for_interrupt(struct irqaction *action)
753 {
754 set_current_state(TASK_INTERRUPTIBLE);
755
756 while (!kthread_should_stop()) {
757
758 if (test_and_clear_bit(IRQTF_RUNTHREAD,
759 &action->thread_flags)) {
760 __set_current_state(TASK_RUNNING);
761 return 0;
762 }
763 schedule();
764 set_current_state(TASK_INTERRUPTIBLE);
765 }
766 __set_current_state(TASK_RUNNING);
767 return -1;
768 }
769
770 /*
771 * Oneshot interrupts keep the irq line masked until the threaded
772 * handler finished. unmask if the interrupt has not been disabled and
773 * is marked MASKED.
774 */
775 static void irq_finalize_oneshot(struct irq_desc *desc,
776 struct irqaction *action)
777 {
778 if (!(desc->istate & IRQS_ONESHOT) ||
779 action->handler == irq_forced_secondary_handler)
780 return;
781 again:
782 chip_bus_lock(desc);
783 raw_spin_lock_irq(&desc->lock);
784
785 /*
786 * Implausible though it may be we need to protect us against
787 * the following scenario:
788 *
789 * The thread is faster done than the hard interrupt handler
790 * on the other CPU. If we unmask the irq line then the
791 * interrupt can come in again and masks the line, leaves due
792 * to IRQS_INPROGRESS and the irq line is masked forever.
793 *
794 * This also serializes the state of shared oneshot handlers
795 * versus "desc->threads_onehsot |= action->thread_mask;" in
796 * irq_wake_thread(). See the comment there which explains the
797 * serialization.
798 */
799 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
800 raw_spin_unlock_irq(&desc->lock);
801 chip_bus_sync_unlock(desc);
802 cpu_relax();
803 goto again;
804 }
805
806 /*
807 * Now check again, whether the thread should run. Otherwise
808 * we would clear the threads_oneshot bit of this thread which
809 * was just set.
810 */
811 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
812 goto out_unlock;
813
814 desc->threads_oneshot &= ~action->thread_mask;
815
816 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
817 irqd_irq_masked(&desc->irq_data))
818 unmask_threaded_irq(desc);
819
820 out_unlock:
821 raw_spin_unlock_irq(&desc->lock);
822 chip_bus_sync_unlock(desc);
823 }
824
825 #ifdef CONFIG_SMP
826 /*
827 * Check whether we need to change the affinity of the interrupt thread.
828 */
829 static void
830 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
831 {
832 cpumask_var_t mask;
833 bool valid = true;
834
835 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
836 return;
837
838 /*
839 * In case we are out of memory we set IRQTF_AFFINITY again and
840 * try again next time
841 */
842 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
843 set_bit(IRQTF_AFFINITY, &action->thread_flags);
844 return;
845 }
846
847 raw_spin_lock_irq(&desc->lock);
848 /*
849 * This code is triggered unconditionally. Check the affinity
850 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
851 */
852 if (desc->irq_common_data.affinity)
853 cpumask_copy(mask, desc->irq_common_data.affinity);
854 else
855 valid = false;
856 raw_spin_unlock_irq(&desc->lock);
857
858 if (valid)
859 set_cpus_allowed_ptr(current, mask);
860 free_cpumask_var(mask);
861 }
862 #else
863 static inline void
864 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
865 #endif
866
867 /*
868 * Interrupts which are not explicitely requested as threaded
869 * interrupts rely on the implicit bh/preempt disable of the hard irq
870 * context. So we need to disable bh here to avoid deadlocks and other
871 * side effects.
872 */
873 static irqreturn_t
874 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
875 {
876 irqreturn_t ret;
877
878 local_bh_disable();
879 ret = action->thread_fn(action->irq, action->dev_id);
880 irq_finalize_oneshot(desc, action);
881 local_bh_enable();
882 return ret;
883 }
884
885 /*
886 * Interrupts explicitly requested as threaded interrupts want to be
887 * preemtible - many of them need to sleep and wait for slow busses to
888 * complete.
889 */
890 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
891 struct irqaction *action)
892 {
893 irqreturn_t ret;
894
895 ret = action->thread_fn(action->irq, action->dev_id);
896 irq_finalize_oneshot(desc, action);
897 return ret;
898 }
899
900 static void wake_threads_waitq(struct irq_desc *desc)
901 {
902 if (atomic_dec_and_test(&desc->threads_active))
903 wake_up(&desc->wait_for_threads);
904 }
905
906 static void irq_thread_dtor(struct callback_head *unused)
907 {
908 struct task_struct *tsk = current;
909 struct irq_desc *desc;
910 struct irqaction *action;
911
912 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
913 return;
914
915 action = kthread_data(tsk);
916
917 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
918 tsk->comm, tsk->pid, action->irq);
919
920
921 desc = irq_to_desc(action->irq);
922 /*
923 * If IRQTF_RUNTHREAD is set, we need to decrement
924 * desc->threads_active and wake possible waiters.
925 */
926 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
927 wake_threads_waitq(desc);
928
929 /* Prevent a stale desc->threads_oneshot */
930 irq_finalize_oneshot(desc, action);
931 }
932
933 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
934 {
935 struct irqaction *secondary = action->secondary;
936
937 if (WARN_ON_ONCE(!secondary))
938 return;
939
940 raw_spin_lock_irq(&desc->lock);
941 __irq_wake_thread(desc, secondary);
942 raw_spin_unlock_irq(&desc->lock);
943 }
944
945 /*
946 * Interrupt handler thread
947 */
948 static int irq_thread(void *data)
949 {
950 struct callback_head on_exit_work;
951 struct irqaction *action = data;
952 struct irq_desc *desc = irq_to_desc(action->irq);
953 irqreturn_t (*handler_fn)(struct irq_desc *desc,
954 struct irqaction *action);
955
956 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
957 &action->thread_flags))
958 handler_fn = irq_forced_thread_fn;
959 else
960 handler_fn = irq_thread_fn;
961
962 init_task_work(&on_exit_work, irq_thread_dtor);
963 task_work_add(current, &on_exit_work, false);
964
965 irq_thread_check_affinity(desc, action);
966
967 while (!irq_wait_for_interrupt(action)) {
968 irqreturn_t action_ret;
969
970 irq_thread_check_affinity(desc, action);
971
972 action_ret = handler_fn(desc, action);
973 if (action_ret == IRQ_HANDLED)
974 atomic_inc(&desc->threads_handled);
975 if (action_ret == IRQ_WAKE_THREAD)
976 irq_wake_secondary(desc, action);
977
978 wake_threads_waitq(desc);
979 }
980
981 /*
982 * This is the regular exit path. __free_irq() is stopping the
983 * thread via kthread_stop() after calling
984 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
985 * oneshot mask bit can be set. We cannot verify that as we
986 * cannot touch the oneshot mask at this point anymore as
987 * __setup_irq() might have given out currents thread_mask
988 * again.
989 */
990 task_work_cancel(current, irq_thread_dtor);
991 return 0;
992 }
993
994 /**
995 * irq_wake_thread - wake the irq thread for the action identified by dev_id
996 * @irq: Interrupt line
997 * @dev_id: Device identity for which the thread should be woken
998 *
999 */
1000 void irq_wake_thread(unsigned int irq, void *dev_id)
1001 {
1002 struct irq_desc *desc = irq_to_desc(irq);
1003 struct irqaction *action;
1004 unsigned long flags;
1005
1006 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1007 return;
1008
1009 raw_spin_lock_irqsave(&desc->lock, flags);
1010 for_each_action_of_desc(desc, action) {
1011 if (action->dev_id == dev_id) {
1012 if (action->thread)
1013 __irq_wake_thread(desc, action);
1014 break;
1015 }
1016 }
1017 raw_spin_unlock_irqrestore(&desc->lock, flags);
1018 }
1019 EXPORT_SYMBOL_GPL(irq_wake_thread);
1020
1021 static int irq_setup_forced_threading(struct irqaction *new)
1022 {
1023 if (!force_irqthreads)
1024 return 0;
1025 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1026 return 0;
1027
1028 new->flags |= IRQF_ONESHOT;
1029
1030 /*
1031 * Handle the case where we have a real primary handler and a
1032 * thread handler. We force thread them as well by creating a
1033 * secondary action.
1034 */
1035 if (new->handler != irq_default_primary_handler && new->thread_fn) {
1036 /* Allocate the secondary action */
1037 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1038 if (!new->secondary)
1039 return -ENOMEM;
1040 new->secondary->handler = irq_forced_secondary_handler;
1041 new->secondary->thread_fn = new->thread_fn;
1042 new->secondary->dev_id = new->dev_id;
1043 new->secondary->irq = new->irq;
1044 new->secondary->name = new->name;
1045 }
1046 /* Deal with the primary handler */
1047 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1048 new->thread_fn = new->handler;
1049 new->handler = irq_default_primary_handler;
1050 return 0;
1051 }
1052
1053 static int irq_request_resources(struct irq_desc *desc)
1054 {
1055 struct irq_data *d = &desc->irq_data;
1056 struct irq_chip *c = d->chip;
1057
1058 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1059 }
1060
1061 static void irq_release_resources(struct irq_desc *desc)
1062 {
1063 struct irq_data *d = &desc->irq_data;
1064 struct irq_chip *c = d->chip;
1065
1066 if (c->irq_release_resources)
1067 c->irq_release_resources(d);
1068 }
1069
1070 static int
1071 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1072 {
1073 struct task_struct *t;
1074 struct sched_param param = {
1075 .sched_priority = MAX_USER_RT_PRIO/2,
1076 };
1077
1078 if (!secondary) {
1079 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1080 new->name);
1081 } else {
1082 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1083 new->name);
1084 param.sched_priority -= 1;
1085 }
1086
1087 if (IS_ERR(t))
1088 return PTR_ERR(t);
1089
1090 sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1091
1092 /*
1093 * We keep the reference to the task struct even if
1094 * the thread dies to avoid that the interrupt code
1095 * references an already freed task_struct.
1096 */
1097 get_task_struct(t);
1098 new->thread = t;
1099 /*
1100 * Tell the thread to set its affinity. This is
1101 * important for shared interrupt handlers as we do
1102 * not invoke setup_affinity() for the secondary
1103 * handlers as everything is already set up. Even for
1104 * interrupts marked with IRQF_NO_BALANCE this is
1105 * correct as we want the thread to move to the cpu(s)
1106 * on which the requesting code placed the interrupt.
1107 */
1108 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1109 return 0;
1110 }
1111
1112 /*
1113 * Internal function to register an irqaction - typically used to
1114 * allocate special interrupts that are part of the architecture.
1115 */
1116 static int
1117 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1118 {
1119 struct irqaction *old, **old_ptr;
1120 unsigned long flags, thread_mask = 0;
1121 int ret, nested, shared = 0;
1122 cpumask_var_t mask;
1123
1124 if (!desc)
1125 return -EINVAL;
1126
1127 if (desc->irq_data.chip == &no_irq_chip)
1128 return -ENOSYS;
1129 if (!try_module_get(desc->owner))
1130 return -ENODEV;
1131
1132 new->irq = irq;
1133
1134 /*
1135 * If the trigger type is not specified by the caller,
1136 * then use the default for this interrupt.
1137 */
1138 if (!(new->flags & IRQF_TRIGGER_MASK))
1139 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1140
1141 /*
1142 * Check whether the interrupt nests into another interrupt
1143 * thread.
1144 */
1145 nested = irq_settings_is_nested_thread(desc);
1146 if (nested) {
1147 if (!new->thread_fn) {
1148 ret = -EINVAL;
1149 goto out_mput;
1150 }
1151 /*
1152 * Replace the primary handler which was provided from
1153 * the driver for non nested interrupt handling by the
1154 * dummy function which warns when called.
1155 */
1156 new->handler = irq_nested_primary_handler;
1157 } else {
1158 if (irq_settings_can_thread(desc)) {
1159 ret = irq_setup_forced_threading(new);
1160 if (ret)
1161 goto out_mput;
1162 }
1163 }
1164
1165 /*
1166 * Create a handler thread when a thread function is supplied
1167 * and the interrupt does not nest into another interrupt
1168 * thread.
1169 */
1170 if (new->thread_fn && !nested) {
1171 ret = setup_irq_thread(new, irq, false);
1172 if (ret)
1173 goto out_mput;
1174 if (new->secondary) {
1175 ret = setup_irq_thread(new->secondary, irq, true);
1176 if (ret)
1177 goto out_thread;
1178 }
1179 }
1180
1181 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1182 ret = -ENOMEM;
1183 goto out_thread;
1184 }
1185
1186 /*
1187 * Drivers are often written to work w/o knowledge about the
1188 * underlying irq chip implementation, so a request for a
1189 * threaded irq without a primary hard irq context handler
1190 * requires the ONESHOT flag to be set. Some irq chips like
1191 * MSI based interrupts are per se one shot safe. Check the
1192 * chip flags, so we can avoid the unmask dance at the end of
1193 * the threaded handler for those.
1194 */
1195 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1196 new->flags &= ~IRQF_ONESHOT;
1197
1198 /*
1199 * The following block of code has to be executed atomically
1200 */
1201 raw_spin_lock_irqsave(&desc->lock, flags);
1202 old_ptr = &desc->action;
1203 old = *old_ptr;
1204 if (old) {
1205 /*
1206 * Can't share interrupts unless both agree to and are
1207 * the same type (level, edge, polarity). So both flag
1208 * fields must have IRQF_SHARED set and the bits which
1209 * set the trigger type must match. Also all must
1210 * agree on ONESHOT.
1211 */
1212 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1213 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1214 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1215 goto mismatch;
1216
1217 /* All handlers must agree on per-cpuness */
1218 if ((old->flags & IRQF_PERCPU) !=
1219 (new->flags & IRQF_PERCPU))
1220 goto mismatch;
1221
1222 /* add new interrupt at end of irq queue */
1223 do {
1224 /*
1225 * Or all existing action->thread_mask bits,
1226 * so we can find the next zero bit for this
1227 * new action.
1228 */
1229 thread_mask |= old->thread_mask;
1230 old_ptr = &old->next;
1231 old = *old_ptr;
1232 } while (old);
1233 shared = 1;
1234 }
1235
1236 /*
1237 * Setup the thread mask for this irqaction for ONESHOT. For
1238 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1239 * conditional in irq_wake_thread().
1240 */
1241 if (new->flags & IRQF_ONESHOT) {
1242 /*
1243 * Unlikely to have 32 resp 64 irqs sharing one line,
1244 * but who knows.
1245 */
1246 if (thread_mask == ~0UL) {
1247 ret = -EBUSY;
1248 goto out_mask;
1249 }
1250 /*
1251 * The thread_mask for the action is or'ed to
1252 * desc->thread_active to indicate that the
1253 * IRQF_ONESHOT thread handler has been woken, but not
1254 * yet finished. The bit is cleared when a thread
1255 * completes. When all threads of a shared interrupt
1256 * line have completed desc->threads_active becomes
1257 * zero and the interrupt line is unmasked. See
1258 * handle.c:irq_wake_thread() for further information.
1259 *
1260 * If no thread is woken by primary (hard irq context)
1261 * interrupt handlers, then desc->threads_active is
1262 * also checked for zero to unmask the irq line in the
1263 * affected hard irq flow handlers
1264 * (handle_[fasteoi|level]_irq).
1265 *
1266 * The new action gets the first zero bit of
1267 * thread_mask assigned. See the loop above which or's
1268 * all existing action->thread_mask bits.
1269 */
1270 new->thread_mask = 1 << ffz(thread_mask);
1271
1272 } else if (new->handler == irq_default_primary_handler &&
1273 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1274 /*
1275 * The interrupt was requested with handler = NULL, so
1276 * we use the default primary handler for it. But it
1277 * does not have the oneshot flag set. In combination
1278 * with level interrupts this is deadly, because the
1279 * default primary handler just wakes the thread, then
1280 * the irq lines is reenabled, but the device still
1281 * has the level irq asserted. Rinse and repeat....
1282 *
1283 * While this works for edge type interrupts, we play
1284 * it safe and reject unconditionally because we can't
1285 * say for sure which type this interrupt really
1286 * has. The type flags are unreliable as the
1287 * underlying chip implementation can override them.
1288 */
1289 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1290 irq);
1291 ret = -EINVAL;
1292 goto out_mask;
1293 }
1294
1295 if (!shared) {
1296 ret = irq_request_resources(desc);
1297 if (ret) {
1298 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1299 new->name, irq, desc->irq_data.chip->name);
1300 goto out_mask;
1301 }
1302
1303 init_waitqueue_head(&desc->wait_for_threads);
1304
1305 /* Setup the type (level, edge polarity) if configured: */
1306 if (new->flags & IRQF_TRIGGER_MASK) {
1307 ret = __irq_set_trigger(desc,
1308 new->flags & IRQF_TRIGGER_MASK);
1309
1310 if (ret)
1311 goto out_mask;
1312 }
1313
1314 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1315 IRQS_ONESHOT | IRQS_WAITING);
1316 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1317
1318 if (new->flags & IRQF_PERCPU) {
1319 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1320 irq_settings_set_per_cpu(desc);
1321 }
1322
1323 if (new->flags & IRQF_ONESHOT)
1324 desc->istate |= IRQS_ONESHOT;
1325
1326 if (irq_settings_can_autoenable(desc))
1327 irq_startup(desc, true);
1328 else
1329 /* Undo nested disables: */
1330 desc->depth = 1;
1331
1332 /* Exclude IRQ from balancing if requested */
1333 if (new->flags & IRQF_NOBALANCING) {
1334 irq_settings_set_no_balancing(desc);
1335 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1336 }
1337
1338 /* Set default affinity mask once everything is setup */
1339 setup_affinity(desc, mask);
1340
1341 } else if (new->flags & IRQF_TRIGGER_MASK) {
1342 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1343 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1344
1345 if (nmsk != omsk)
1346 /* hope the handler works with current trigger mode */
1347 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1348 irq, nmsk, omsk);
1349 }
1350
1351 *old_ptr = new;
1352
1353 irq_pm_install_action(desc, new);
1354
1355 /* Reset broken irq detection when installing new handler */
1356 desc->irq_count = 0;
1357 desc->irqs_unhandled = 0;
1358
1359 /*
1360 * Check whether we disabled the irq via the spurious handler
1361 * before. Reenable it and give it another chance.
1362 */
1363 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1364 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1365 __enable_irq(desc);
1366 }
1367
1368 raw_spin_unlock_irqrestore(&desc->lock, flags);
1369
1370 /*
1371 * Strictly no need to wake it up, but hung_task complains
1372 * when no hard interrupt wakes the thread up.
1373 */
1374 if (new->thread)
1375 wake_up_process(new->thread);
1376 if (new->secondary)
1377 wake_up_process(new->secondary->thread);
1378
1379 register_irq_proc(irq, desc);
1380 new->dir = NULL;
1381 register_handler_proc(irq, new);
1382 free_cpumask_var(mask);
1383
1384 return 0;
1385
1386 mismatch:
1387 if (!(new->flags & IRQF_PROBE_SHARED)) {
1388 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1389 irq, new->flags, new->name, old->flags, old->name);
1390 #ifdef CONFIG_DEBUG_SHIRQ
1391 dump_stack();
1392 #endif
1393 }
1394 ret = -EBUSY;
1395
1396 out_mask:
1397 raw_spin_unlock_irqrestore(&desc->lock, flags);
1398 free_cpumask_var(mask);
1399
1400 out_thread:
1401 if (new->thread) {
1402 struct task_struct *t = new->thread;
1403
1404 new->thread = NULL;
1405 kthread_stop(t);
1406 put_task_struct(t);
1407 }
1408 if (new->secondary && new->secondary->thread) {
1409 struct task_struct *t = new->secondary->thread;
1410
1411 new->secondary->thread = NULL;
1412 kthread_stop(t);
1413 put_task_struct(t);
1414 }
1415 out_mput:
1416 module_put(desc->owner);
1417 return ret;
1418 }
1419
1420 /**
1421 * setup_irq - setup an interrupt
1422 * @irq: Interrupt line to setup
1423 * @act: irqaction for the interrupt
1424 *
1425 * Used to statically setup interrupts in the early boot process.
1426 */
1427 int setup_irq(unsigned int irq, struct irqaction *act)
1428 {
1429 int retval;
1430 struct irq_desc *desc = irq_to_desc(irq);
1431
1432 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1433 return -EINVAL;
1434
1435 retval = irq_chip_pm_get(&desc->irq_data);
1436 if (retval < 0)
1437 return retval;
1438
1439 chip_bus_lock(desc);
1440 retval = __setup_irq(irq, desc, act);
1441 chip_bus_sync_unlock(desc);
1442
1443 if (retval)
1444 irq_chip_pm_put(&desc->irq_data);
1445
1446 return retval;
1447 }
1448 EXPORT_SYMBOL_GPL(setup_irq);
1449
1450 /*
1451 * Internal function to unregister an irqaction - used to free
1452 * regular and special interrupts that are part of the architecture.
1453 */
1454 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1455 {
1456 struct irq_desc *desc = irq_to_desc(irq);
1457 struct irqaction *action, **action_ptr;
1458 unsigned long flags;
1459
1460 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1461
1462 if (!desc)
1463 return NULL;
1464
1465 chip_bus_lock(desc);
1466 raw_spin_lock_irqsave(&desc->lock, flags);
1467
1468 /*
1469 * There can be multiple actions per IRQ descriptor, find the right
1470 * one based on the dev_id:
1471 */
1472 action_ptr = &desc->action;
1473 for (;;) {
1474 action = *action_ptr;
1475
1476 if (!action) {
1477 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1478 raw_spin_unlock_irqrestore(&desc->lock, flags);
1479 chip_bus_sync_unlock(desc);
1480 return NULL;
1481 }
1482
1483 if (action->dev_id == dev_id)
1484 break;
1485 action_ptr = &action->next;
1486 }
1487
1488 /* Found it - now remove it from the list of entries: */
1489 *action_ptr = action->next;
1490
1491 irq_pm_remove_action(desc, action);
1492
1493 /* If this was the last handler, shut down the IRQ line: */
1494 if (!desc->action) {
1495 irq_settings_clr_disable_unlazy(desc);
1496 irq_shutdown(desc);
1497 irq_release_resources(desc);
1498 }
1499
1500 #ifdef CONFIG_SMP
1501 /* make sure affinity_hint is cleaned up */
1502 if (WARN_ON_ONCE(desc->affinity_hint))
1503 desc->affinity_hint = NULL;
1504 #endif
1505
1506 raw_spin_unlock_irqrestore(&desc->lock, flags);
1507 chip_bus_sync_unlock(desc);
1508
1509 unregister_handler_proc(irq, action);
1510
1511 /* Make sure it's not being used on another CPU: */
1512 synchronize_irq(irq);
1513
1514 #ifdef CONFIG_DEBUG_SHIRQ
1515 /*
1516 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1517 * event to happen even now it's being freed, so let's make sure that
1518 * is so by doing an extra call to the handler ....
1519 *
1520 * ( We do this after actually deregistering it, to make sure that a
1521 * 'real' IRQ doesn't run in * parallel with our fake. )
1522 */
1523 if (action->flags & IRQF_SHARED) {
1524 local_irq_save(flags);
1525 action->handler(irq, dev_id);
1526 local_irq_restore(flags);
1527 }
1528 #endif
1529
1530 if (action->thread) {
1531 kthread_stop(action->thread);
1532 put_task_struct(action->thread);
1533 if (action->secondary && action->secondary->thread) {
1534 kthread_stop(action->secondary->thread);
1535 put_task_struct(action->secondary->thread);
1536 }
1537 }
1538
1539 irq_chip_pm_put(&desc->irq_data);
1540 module_put(desc->owner);
1541 kfree(action->secondary);
1542 return action;
1543 }
1544
1545 /**
1546 * remove_irq - free an interrupt
1547 * @irq: Interrupt line to free
1548 * @act: irqaction for the interrupt
1549 *
1550 * Used to remove interrupts statically setup by the early boot process.
1551 */
1552 void remove_irq(unsigned int irq, struct irqaction *act)
1553 {
1554 struct irq_desc *desc = irq_to_desc(irq);
1555
1556 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1557 __free_irq(irq, act->dev_id);
1558 }
1559 EXPORT_SYMBOL_GPL(remove_irq);
1560
1561 /**
1562 * free_irq - free an interrupt allocated with request_irq
1563 * @irq: Interrupt line to free
1564 * @dev_id: Device identity to free
1565 *
1566 * Remove an interrupt handler. The handler is removed and if the
1567 * interrupt line is no longer in use by any driver it is disabled.
1568 * On a shared IRQ the caller must ensure the interrupt is disabled
1569 * on the card it drives before calling this function. The function
1570 * does not return until any executing interrupts for this IRQ
1571 * have completed.
1572 *
1573 * This function must not be called from interrupt context.
1574 */
1575 void free_irq(unsigned int irq, void *dev_id)
1576 {
1577 struct irq_desc *desc = irq_to_desc(irq);
1578
1579 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1580 return;
1581
1582 #ifdef CONFIG_SMP
1583 if (WARN_ON(desc->affinity_notify))
1584 desc->affinity_notify = NULL;
1585 #endif
1586
1587 kfree(__free_irq(irq, dev_id));
1588 }
1589 EXPORT_SYMBOL(free_irq);
1590
1591 /**
1592 * request_threaded_irq - allocate an interrupt line
1593 * @irq: Interrupt line to allocate
1594 * @handler: Function to be called when the IRQ occurs.
1595 * Primary handler for threaded interrupts
1596 * If NULL and thread_fn != NULL the default
1597 * primary handler is installed
1598 * @thread_fn: Function called from the irq handler thread
1599 * If NULL, no irq thread is created
1600 * @irqflags: Interrupt type flags
1601 * @devname: An ascii name for the claiming device
1602 * @dev_id: A cookie passed back to the handler function
1603 *
1604 * This call allocates interrupt resources and enables the
1605 * interrupt line and IRQ handling. From the point this
1606 * call is made your handler function may be invoked. Since
1607 * your handler function must clear any interrupt the board
1608 * raises, you must take care both to initialise your hardware
1609 * and to set up the interrupt handler in the right order.
1610 *
1611 * If you want to set up a threaded irq handler for your device
1612 * then you need to supply @handler and @thread_fn. @handler is
1613 * still called in hard interrupt context and has to check
1614 * whether the interrupt originates from the device. If yes it
1615 * needs to disable the interrupt on the device and return
1616 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1617 * @thread_fn. This split handler design is necessary to support
1618 * shared interrupts.
1619 *
1620 * Dev_id must be globally unique. Normally the address of the
1621 * device data structure is used as the cookie. Since the handler
1622 * receives this value it makes sense to use it.
1623 *
1624 * If your interrupt is shared you must pass a non NULL dev_id
1625 * as this is required when freeing the interrupt.
1626 *
1627 * Flags:
1628 *
1629 * IRQF_SHARED Interrupt is shared
1630 * IRQF_TRIGGER_* Specify active edge(s) or level
1631 *
1632 */
1633 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1634 irq_handler_t thread_fn, unsigned long irqflags,
1635 const char *devname, void *dev_id)
1636 {
1637 struct irqaction *action;
1638 struct irq_desc *desc;
1639 int retval;
1640
1641 if (irq == IRQ_NOTCONNECTED)
1642 return -ENOTCONN;
1643
1644 /*
1645 * Sanity-check: shared interrupts must pass in a real dev-ID,
1646 * otherwise we'll have trouble later trying to figure out
1647 * which interrupt is which (messes up the interrupt freeing
1648 * logic etc).
1649 *
1650 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1651 * it cannot be set along with IRQF_NO_SUSPEND.
1652 */
1653 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1654 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1655 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1656 return -EINVAL;
1657
1658 desc = irq_to_desc(irq);
1659 if (!desc)
1660 return -EINVAL;
1661
1662 if (!irq_settings_can_request(desc) ||
1663 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1664 return -EINVAL;
1665
1666 if (!handler) {
1667 if (!thread_fn)
1668 return -EINVAL;
1669 handler = irq_default_primary_handler;
1670 }
1671
1672 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1673 if (!action)
1674 return -ENOMEM;
1675
1676 action->handler = handler;
1677 action->thread_fn = thread_fn;
1678 action->flags = irqflags;
1679 action->name = devname;
1680 action->dev_id = dev_id;
1681
1682 retval = irq_chip_pm_get(&desc->irq_data);
1683 if (retval < 0) {
1684 kfree(action);
1685 return retval;
1686 }
1687
1688 chip_bus_lock(desc);
1689 retval = __setup_irq(irq, desc, action);
1690 chip_bus_sync_unlock(desc);
1691
1692 if (retval) {
1693 irq_chip_pm_put(&desc->irq_data);
1694 kfree(action->secondary);
1695 kfree(action);
1696 }
1697
1698 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1699 if (!retval && (irqflags & IRQF_SHARED)) {
1700 /*
1701 * It's a shared IRQ -- the driver ought to be prepared for it
1702 * to happen immediately, so let's make sure....
1703 * We disable the irq to make sure that a 'real' IRQ doesn't
1704 * run in parallel with our fake.
1705 */
1706 unsigned long flags;
1707
1708 disable_irq(irq);
1709 local_irq_save(flags);
1710
1711 handler(irq, dev_id);
1712
1713 local_irq_restore(flags);
1714 enable_irq(irq);
1715 }
1716 #endif
1717 return retval;
1718 }
1719 EXPORT_SYMBOL(request_threaded_irq);
1720
1721 /**
1722 * request_any_context_irq - allocate an interrupt line
1723 * @irq: Interrupt line to allocate
1724 * @handler: Function to be called when the IRQ occurs.
1725 * Threaded handler for threaded interrupts.
1726 * @flags: Interrupt type flags
1727 * @name: An ascii name for the claiming device
1728 * @dev_id: A cookie passed back to the handler function
1729 *
1730 * This call allocates interrupt resources and enables the
1731 * interrupt line and IRQ handling. It selects either a
1732 * hardirq or threaded handling method depending on the
1733 * context.
1734 *
1735 * On failure, it returns a negative value. On success,
1736 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1737 */
1738 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1739 unsigned long flags, const char *name, void *dev_id)
1740 {
1741 struct irq_desc *desc;
1742 int ret;
1743
1744 if (irq == IRQ_NOTCONNECTED)
1745 return -ENOTCONN;
1746
1747 desc = irq_to_desc(irq);
1748 if (!desc)
1749 return -EINVAL;
1750
1751 if (irq_settings_is_nested_thread(desc)) {
1752 ret = request_threaded_irq(irq, NULL, handler,
1753 flags, name, dev_id);
1754 return !ret ? IRQC_IS_NESTED : ret;
1755 }
1756
1757 ret = request_irq(irq, handler, flags, name, dev_id);
1758 return !ret ? IRQC_IS_HARDIRQ : ret;
1759 }
1760 EXPORT_SYMBOL_GPL(request_any_context_irq);
1761
1762 void enable_percpu_irq(unsigned int irq, unsigned int type)
1763 {
1764 unsigned int cpu = smp_processor_id();
1765 unsigned long flags;
1766 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1767
1768 if (!desc)
1769 return;
1770
1771 /*
1772 * If the trigger type is not specified by the caller, then
1773 * use the default for this interrupt.
1774 */
1775 type &= IRQ_TYPE_SENSE_MASK;
1776 if (type == IRQ_TYPE_NONE)
1777 type = irqd_get_trigger_type(&desc->irq_data);
1778
1779 if (type != IRQ_TYPE_NONE) {
1780 int ret;
1781
1782 ret = __irq_set_trigger(desc, type);
1783
1784 if (ret) {
1785 WARN(1, "failed to set type for IRQ%d\n", irq);
1786 goto out;
1787 }
1788 }
1789
1790 irq_percpu_enable(desc, cpu);
1791 out:
1792 irq_put_desc_unlock(desc, flags);
1793 }
1794 EXPORT_SYMBOL_GPL(enable_percpu_irq);
1795
1796 /**
1797 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1798 * @irq: Linux irq number to check for
1799 *
1800 * Must be called from a non migratable context. Returns the enable
1801 * state of a per cpu interrupt on the current cpu.
1802 */
1803 bool irq_percpu_is_enabled(unsigned int irq)
1804 {
1805 unsigned int cpu = smp_processor_id();
1806 struct irq_desc *desc;
1807 unsigned long flags;
1808 bool is_enabled;
1809
1810 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1811 if (!desc)
1812 return false;
1813
1814 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1815 irq_put_desc_unlock(desc, flags);
1816
1817 return is_enabled;
1818 }
1819 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1820
1821 void disable_percpu_irq(unsigned int irq)
1822 {
1823 unsigned int cpu = smp_processor_id();
1824 unsigned long flags;
1825 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1826
1827 if (!desc)
1828 return;
1829
1830 irq_percpu_disable(desc, cpu);
1831 irq_put_desc_unlock(desc, flags);
1832 }
1833 EXPORT_SYMBOL_GPL(disable_percpu_irq);
1834
1835 /*
1836 * Internal function to unregister a percpu irqaction.
1837 */
1838 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1839 {
1840 struct irq_desc *desc = irq_to_desc(irq);
1841 struct irqaction *action;
1842 unsigned long flags;
1843
1844 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1845
1846 if (!desc)
1847 return NULL;
1848
1849 raw_spin_lock_irqsave(&desc->lock, flags);
1850
1851 action = desc->action;
1852 if (!action || action->percpu_dev_id != dev_id) {
1853 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1854 goto bad;
1855 }
1856
1857 if (!cpumask_empty(desc->percpu_enabled)) {
1858 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1859 irq, cpumask_first(desc->percpu_enabled));
1860 goto bad;
1861 }
1862
1863 /* Found it - now remove it from the list of entries: */
1864 desc->action = NULL;
1865
1866 raw_spin_unlock_irqrestore(&desc->lock, flags);
1867
1868 unregister_handler_proc(irq, action);
1869
1870 irq_chip_pm_put(&desc->irq_data);
1871 module_put(desc->owner);
1872 return action;
1873
1874 bad:
1875 raw_spin_unlock_irqrestore(&desc->lock, flags);
1876 return NULL;
1877 }
1878
1879 /**
1880 * remove_percpu_irq - free a per-cpu interrupt
1881 * @irq: Interrupt line to free
1882 * @act: irqaction for the interrupt
1883 *
1884 * Used to remove interrupts statically setup by the early boot process.
1885 */
1886 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1887 {
1888 struct irq_desc *desc = irq_to_desc(irq);
1889
1890 if (desc && irq_settings_is_per_cpu_devid(desc))
1891 __free_percpu_irq(irq, act->percpu_dev_id);
1892 }
1893
1894 /**
1895 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
1896 * @irq: Interrupt line to free
1897 * @dev_id: Device identity to free
1898 *
1899 * Remove a percpu interrupt handler. The handler is removed, but
1900 * the interrupt line is not disabled. This must be done on each
1901 * CPU before calling this function. The function does not return
1902 * until any executing interrupts for this IRQ have completed.
1903 *
1904 * This function must not be called from interrupt context.
1905 */
1906 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1907 {
1908 struct irq_desc *desc = irq_to_desc(irq);
1909
1910 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1911 return;
1912
1913 chip_bus_lock(desc);
1914 kfree(__free_percpu_irq(irq, dev_id));
1915 chip_bus_sync_unlock(desc);
1916 }
1917 EXPORT_SYMBOL_GPL(free_percpu_irq);
1918
1919 /**
1920 * setup_percpu_irq - setup a per-cpu interrupt
1921 * @irq: Interrupt line to setup
1922 * @act: irqaction for the interrupt
1923 *
1924 * Used to statically setup per-cpu interrupts in the early boot process.
1925 */
1926 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1927 {
1928 struct irq_desc *desc = irq_to_desc(irq);
1929 int retval;
1930
1931 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1932 return -EINVAL;
1933
1934 retval = irq_chip_pm_get(&desc->irq_data);
1935 if (retval < 0)
1936 return retval;
1937
1938 chip_bus_lock(desc);
1939 retval = __setup_irq(irq, desc, act);
1940 chip_bus_sync_unlock(desc);
1941
1942 if (retval)
1943 irq_chip_pm_put(&desc->irq_data);
1944
1945 return retval;
1946 }
1947
1948 /**
1949 * request_percpu_irq - allocate a percpu interrupt line
1950 * @irq: Interrupt line to allocate
1951 * @handler: Function to be called when the IRQ occurs.
1952 * @devname: An ascii name for the claiming device
1953 * @dev_id: A percpu cookie passed back to the handler function
1954 *
1955 * This call allocates interrupt resources and enables the
1956 * interrupt on the local CPU. If the interrupt is supposed to be
1957 * enabled on other CPUs, it has to be done on each CPU using
1958 * enable_percpu_irq().
1959 *
1960 * Dev_id must be globally unique. It is a per-cpu variable, and
1961 * the handler gets called with the interrupted CPU's instance of
1962 * that variable.
1963 */
1964 int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1965 const char *devname, void __percpu *dev_id)
1966 {
1967 struct irqaction *action;
1968 struct irq_desc *desc;
1969 int retval;
1970
1971 if (!dev_id)
1972 return -EINVAL;
1973
1974 desc = irq_to_desc(irq);
1975 if (!desc || !irq_settings_can_request(desc) ||
1976 !irq_settings_is_per_cpu_devid(desc))
1977 return -EINVAL;
1978
1979 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1980 if (!action)
1981 return -ENOMEM;
1982
1983 action->handler = handler;
1984 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1985 action->name = devname;
1986 action->percpu_dev_id = dev_id;
1987
1988 retval = irq_chip_pm_get(&desc->irq_data);
1989 if (retval < 0) {
1990 kfree(action);
1991 return retval;
1992 }
1993
1994 chip_bus_lock(desc);
1995 retval = __setup_irq(irq, desc, action);
1996 chip_bus_sync_unlock(desc);
1997
1998 if (retval) {
1999 irq_chip_pm_put(&desc->irq_data);
2000 kfree(action);
2001 }
2002
2003 return retval;
2004 }
2005 EXPORT_SYMBOL_GPL(request_percpu_irq);
2006
2007 /**
2008 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2009 * @irq: Interrupt line that is forwarded to a VM
2010 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2011 * @state: a pointer to a boolean where the state is to be storeed
2012 *
2013 * This call snapshots the internal irqchip state of an
2014 * interrupt, returning into @state the bit corresponding to
2015 * stage @which
2016 *
2017 * This function should be called with preemption disabled if the
2018 * interrupt controller has per-cpu registers.
2019 */
2020 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2021 bool *state)
2022 {
2023 struct irq_desc *desc;
2024 struct irq_data *data;
2025 struct irq_chip *chip;
2026 unsigned long flags;
2027 int err = -EINVAL;
2028
2029 desc = irq_get_desc_buslock(irq, &flags, 0);
2030 if (!desc)
2031 return err;
2032
2033 data = irq_desc_get_irq_data(desc);
2034
2035 do {
2036 chip = irq_data_get_irq_chip(data);
2037 if (chip->irq_get_irqchip_state)
2038 break;
2039 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2040 data = data->parent_data;
2041 #else
2042 data = NULL;
2043 #endif
2044 } while (data);
2045
2046 if (data)
2047 err = chip->irq_get_irqchip_state(data, which, state);
2048
2049 irq_put_desc_busunlock(desc, flags);
2050 return err;
2051 }
2052 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2053
2054 /**
2055 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2056 * @irq: Interrupt line that is forwarded to a VM
2057 * @which: State to be restored (one of IRQCHIP_STATE_*)
2058 * @val: Value corresponding to @which
2059 *
2060 * This call sets the internal irqchip state of an interrupt,
2061 * depending on the value of @which.
2062 *
2063 * This function should be called with preemption disabled if the
2064 * interrupt controller has per-cpu registers.
2065 */
2066 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2067 bool val)
2068 {
2069 struct irq_desc *desc;
2070 struct irq_data *data;
2071 struct irq_chip *chip;
2072 unsigned long flags;
2073 int err = -EINVAL;
2074
2075 desc = irq_get_desc_buslock(irq, &flags, 0);
2076 if (!desc)
2077 return err;
2078
2079 data = irq_desc_get_irq_data(desc);
2080
2081 do {
2082 chip = irq_data_get_irq_chip(data);
2083 if (chip->irq_set_irqchip_state)
2084 break;
2085 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2086 data = data->parent_data;
2087 #else
2088 data = NULL;
2089 #endif
2090 } while (data);
2091
2092 if (data)
2093 err = chip->irq_set_irqchip_state(data, which, val);
2094
2095 irq_put_desc_busunlock(desc, flags);
2096 return err;
2097 }
2098 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
This page took 0.075333 seconds and 5 git commands to generate.