Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/x86
[deliverable/linux.git] / include / linux / interrupt.h
1 /* interrupt.h */
2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
4
5 #include <linux/config.h>
6 #include <linux/kernel.h>
7 #include <linux/linkage.h>
8 #include <linux/bitops.h>
9 #include <linux/preempt.h>
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/sched.h>
13 #include <asm/atomic.h>
14 #include <asm/ptrace.h>
15 #include <asm/system.h>
16
17 /*
18 * For 2.4.x compatibility, 2.4.x can use
19 *
20 * typedef void irqreturn_t;
21 * #define IRQ_NONE
22 * #define IRQ_HANDLED
23 * #define IRQ_RETVAL(x)
24 *
25 * To mix old-style and new-style irq handler returns.
26 *
27 * IRQ_NONE means we didn't handle it.
28 * IRQ_HANDLED means that we did have a valid interrupt and handled it.
29 * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
30 */
31 typedef int irqreturn_t;
32
33 #define IRQ_NONE (0)
34 #define IRQ_HANDLED (1)
35 #define IRQ_RETVAL(x) ((x) != 0)
36
37 struct irqaction {
38 irqreturn_t (*handler)(int, void *, struct pt_regs *);
39 unsigned long flags;
40 cpumask_t mask;
41 const char *name;
42 void *dev_id;
43 struct irqaction *next;
44 int irq;
45 struct proc_dir_entry *dir;
46 };
47
48 extern irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs);
49 extern int request_irq(unsigned int,
50 irqreturn_t (*handler)(int, void *, struct pt_regs *),
51 unsigned long, const char *, void *);
52 extern void free_irq(unsigned int, void *);
53
54
55 #ifdef CONFIG_GENERIC_HARDIRQS
56 extern void disable_irq_nosync(unsigned int irq);
57 extern void disable_irq(unsigned int irq);
58 extern void enable_irq(unsigned int irq);
59 #endif
60
61 #ifndef __ARCH_SET_SOFTIRQ_PENDING
62 #define set_softirq_pending(x) (local_softirq_pending() = (x))
63 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
64 #endif
65
66 /*
67 * Temporary defines for UP kernels, until all code gets fixed.
68 */
69 #ifndef CONFIG_SMP
70 static inline void __deprecated cli(void)
71 {
72 local_irq_disable();
73 }
74 static inline void __deprecated sti(void)
75 {
76 local_irq_enable();
77 }
78 static inline void __deprecated save_flags(unsigned long *x)
79 {
80 local_save_flags(*x);
81 }
82 #define save_flags(x) save_flags(&x)
83 static inline void __deprecated restore_flags(unsigned long x)
84 {
85 local_irq_restore(x);
86 }
87
88 static inline void __deprecated save_and_cli(unsigned long *x)
89 {
90 local_irq_save(*x);
91 }
92 #define save_and_cli(x) save_and_cli(&x)
93 #endif /* CONFIG_SMP */
94
95 /* SoftIRQ primitives. */
96 #define local_bh_disable() \
97 do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
98 #define __local_bh_enable() \
99 do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
100
101 extern void local_bh_enable(void);
102
103 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
104 frequency threaded job scheduling. For almost all the purposes
105 tasklets are more than enough. F.e. all serial device BHs et
106 al. should be converted to tasklets, not to softirqs.
107 */
108
109 enum
110 {
111 HI_SOFTIRQ=0,
112 TIMER_SOFTIRQ,
113 NET_TX_SOFTIRQ,
114 NET_RX_SOFTIRQ,
115 BLOCK_SOFTIRQ,
116 TASKLET_SOFTIRQ
117 };
118
119 /* softirq mask and active fields moved to irq_cpustat_t in
120 * asm/hardirq.h to get better cache usage. KAO
121 */
122
123 struct softirq_action
124 {
125 void (*action)(struct softirq_action *);
126 void *data;
127 };
128
129 asmlinkage void do_softirq(void);
130 extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
131 extern void softirq_init(void);
132 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
133 extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
134 extern void FASTCALL(raise_softirq(unsigned int nr));
135
136
137 /* Tasklets --- multithreaded analogue of BHs.
138
139 Main feature differing them of generic softirqs: tasklet
140 is running only on one CPU simultaneously.
141
142 Main feature differing them of BHs: different tasklets
143 may be run simultaneously on different CPUs.
144
145 Properties:
146 * If tasklet_schedule() is called, then tasklet is guaranteed
147 to be executed on some cpu at least once after this.
148 * If the tasklet is already scheduled, but its excecution is still not
149 started, it will be executed only once.
150 * If this tasklet is already running on another CPU (or schedule is called
151 from tasklet itself), it is rescheduled for later.
152 * Tasklet is strictly serialized wrt itself, but not
153 wrt another tasklets. If client needs some intertask synchronization,
154 he makes it with spinlocks.
155 */
156
157 struct tasklet_struct
158 {
159 struct tasklet_struct *next;
160 unsigned long state;
161 atomic_t count;
162 void (*func)(unsigned long);
163 unsigned long data;
164 };
165
166 #define DECLARE_TASKLET(name, func, data) \
167 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
168
169 #define DECLARE_TASKLET_DISABLED(name, func, data) \
170 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
171
172
173 enum
174 {
175 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
176 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
177 };
178
179 #ifdef CONFIG_SMP
180 static inline int tasklet_trylock(struct tasklet_struct *t)
181 {
182 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
183 }
184
185 static inline void tasklet_unlock(struct tasklet_struct *t)
186 {
187 smp_mb__before_clear_bit();
188 clear_bit(TASKLET_STATE_RUN, &(t)->state);
189 }
190
191 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
192 {
193 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
194 }
195 #else
196 #define tasklet_trylock(t) 1
197 #define tasklet_unlock_wait(t) do { } while (0)
198 #define tasklet_unlock(t) do { } while (0)
199 #endif
200
201 extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
202
203 static inline void tasklet_schedule(struct tasklet_struct *t)
204 {
205 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
206 __tasklet_schedule(t);
207 }
208
209 extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
210
211 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
212 {
213 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
214 __tasklet_hi_schedule(t);
215 }
216
217
218 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
219 {
220 atomic_inc(&t->count);
221 smp_mb__after_atomic_inc();
222 }
223
224 static inline void tasklet_disable(struct tasklet_struct *t)
225 {
226 tasklet_disable_nosync(t);
227 tasklet_unlock_wait(t);
228 smp_mb();
229 }
230
231 static inline void tasklet_enable(struct tasklet_struct *t)
232 {
233 smp_mb__before_atomic_dec();
234 atomic_dec(&t->count);
235 }
236
237 static inline void tasklet_hi_enable(struct tasklet_struct *t)
238 {
239 smp_mb__before_atomic_dec();
240 atomic_dec(&t->count);
241 }
242
243 extern void tasklet_kill(struct tasklet_struct *t);
244 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
245 extern void tasklet_init(struct tasklet_struct *t,
246 void (*func)(unsigned long), unsigned long data);
247
248 /*
249 * Autoprobing for irqs:
250 *
251 * probe_irq_on() and probe_irq_off() provide robust primitives
252 * for accurate IRQ probing during kernel initialization. They are
253 * reasonably simple to use, are not "fooled" by spurious interrupts,
254 * and, unlike other attempts at IRQ probing, they do not get hung on
255 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
256 *
257 * For reasonably foolproof probing, use them as follows:
258 *
259 * 1. clear and/or mask the device's internal interrupt.
260 * 2. sti();
261 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
262 * 4. enable the device and cause it to trigger an interrupt.
263 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
264 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
265 * 7. service the device to clear its pending interrupt.
266 * 8. loop again if paranoia is required.
267 *
268 * probe_irq_on() returns a mask of allocated irq's.
269 *
270 * probe_irq_off() takes the mask as a parameter,
271 * and returns the irq number which occurred,
272 * or zero if none occurred, or a negative irq number
273 * if more than one irq occurred.
274 */
275
276 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
277 static inline unsigned long probe_irq_on(void)
278 {
279 return 0;
280 }
281 static inline int probe_irq_off(unsigned long val)
282 {
283 return 0;
284 }
285 static inline unsigned int probe_irq_mask(unsigned long val)
286 {
287 return 0;
288 }
289 #else
290 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
291 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
292 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
293 #endif
294
295 #endif
This page took 0.063462 seconds and 6 git commands to generate.