x86: modify write_ldt function
[deliverable/linux.git] / arch / x86 / kernel / i8259_32.c
1 #include <linux/errno.h>
2 #include <linux/signal.h>
3 #include <linux/sched.h>
4 #include <linux/ioport.h>
5 #include <linux/interrupt.h>
6 #include <linux/slab.h>
7 #include <linux/random.h>
8 #include <linux/init.h>
9 #include <linux/kernel_stat.h>
10 #include <linux/sysdev.h>
11 #include <linux/bitops.h>
12
13 #include <asm/atomic.h>
14 #include <asm/system.h>
15 #include <asm/io.h>
16 #include <asm/timer.h>
17 #include <asm/pgtable.h>
18 #include <asm/delay.h>
19 #include <asm/desc.h>
20 #include <asm/apic.h>
21 #include <asm/arch_hooks.h>
22 #include <asm/i8259.h>
23
24 /*
25 * This is the 'legacy' 8259A Programmable Interrupt Controller,
26 * present in the majority of PC/AT boxes.
27 * plus some generic x86 specific things if generic specifics makes
28 * any sense at all.
29 * this file should become arch/i386/kernel/irq.c when the old irq.c
30 * moves to arch independent land
31 */
32
33 static int i8259A_auto_eoi;
34 DEFINE_SPINLOCK(i8259A_lock);
35 static void mask_and_ack_8259A(unsigned int);
36
37 static struct irq_chip i8259A_chip = {
38 .name = "XT-PIC",
39 .mask = disable_8259A_irq,
40 .disable = disable_8259A_irq,
41 .unmask = enable_8259A_irq,
42 .mask_ack = mask_and_ack_8259A,
43 };
44
45 /*
46 * 8259A PIC functions to handle ISA devices:
47 */
48
49 /*
50 * This contains the irq mask for both 8259A irq controllers,
51 */
52 unsigned int cached_irq_mask = 0xffff;
53
54 /*
55 * Not all IRQs can be routed through the IO-APIC, eg. on certain (older)
56 * boards the timer interrupt is not really connected to any IO-APIC pin,
57 * it's fed to the master 8259A's IR0 line only.
58 *
59 * Any '1' bit in this mask means the IRQ is routed through the IO-APIC.
60 * this 'mixed mode' IRQ handling costs nothing because it's only used
61 * at IRQ setup time.
62 */
63 unsigned long io_apic_irqs;
64
65 void disable_8259A_irq(unsigned int irq)
66 {
67 unsigned int mask = 1 << irq;
68 unsigned long flags;
69
70 spin_lock_irqsave(&i8259A_lock, flags);
71 cached_irq_mask |= mask;
72 if (irq & 8)
73 outb(cached_slave_mask, PIC_SLAVE_IMR);
74 else
75 outb(cached_master_mask, PIC_MASTER_IMR);
76 spin_unlock_irqrestore(&i8259A_lock, flags);
77 }
78
79 void enable_8259A_irq(unsigned int irq)
80 {
81 unsigned int mask = ~(1 << irq);
82 unsigned long flags;
83
84 spin_lock_irqsave(&i8259A_lock, flags);
85 cached_irq_mask &= mask;
86 if (irq & 8)
87 outb(cached_slave_mask, PIC_SLAVE_IMR);
88 else
89 outb(cached_master_mask, PIC_MASTER_IMR);
90 spin_unlock_irqrestore(&i8259A_lock, flags);
91 }
92
93 int i8259A_irq_pending(unsigned int irq)
94 {
95 unsigned int mask = 1<<irq;
96 unsigned long flags;
97 int ret;
98
99 spin_lock_irqsave(&i8259A_lock, flags);
100 if (irq < 8)
101 ret = inb(PIC_MASTER_CMD) & mask;
102 else
103 ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
104 spin_unlock_irqrestore(&i8259A_lock, flags);
105
106 return ret;
107 }
108
109 void make_8259A_irq(unsigned int irq)
110 {
111 disable_irq_nosync(irq);
112 io_apic_irqs &= ~(1<<irq);
113 set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
114 "XT");
115 enable_irq(irq);
116 }
117
118 /*
119 * This function assumes to be called rarely. Switching between
120 * 8259A registers is slow.
121 * This has to be protected by the irq controller spinlock
122 * before being called.
123 */
124 static inline int i8259A_irq_real(unsigned int irq)
125 {
126 int value;
127 int irqmask = 1<<irq;
128
129 if (irq < 8) {
130 outb(0x0B,PIC_MASTER_CMD); /* ISR register */
131 value = inb(PIC_MASTER_CMD) & irqmask;
132 outb(0x0A,PIC_MASTER_CMD); /* back to the IRR register */
133 return value;
134 }
135 outb(0x0B,PIC_SLAVE_CMD); /* ISR register */
136 value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
137 outb(0x0A,PIC_SLAVE_CMD); /* back to the IRR register */
138 return value;
139 }
140
141 /*
142 * Careful! The 8259A is a fragile beast, it pretty
143 * much _has_ to be done exactly like this (mask it
144 * first, _then_ send the EOI, and the order of EOI
145 * to the two 8259s is important!
146 */
147 static void mask_and_ack_8259A(unsigned int irq)
148 {
149 unsigned int irqmask = 1 << irq;
150 unsigned long flags;
151
152 spin_lock_irqsave(&i8259A_lock, flags);
153 /*
154 * Lightweight spurious IRQ detection. We do not want
155 * to overdo spurious IRQ handling - it's usually a sign
156 * of hardware problems, so we only do the checks we can
157 * do without slowing down good hardware unnecessarily.
158 *
159 * Note that IRQ7 and IRQ15 (the two spurious IRQs
160 * usually resulting from the 8259A-1|2 PICs) occur
161 * even if the IRQ is masked in the 8259A. Thus we
162 * can check spurious 8259A IRQs without doing the
163 * quite slow i8259A_irq_real() call for every IRQ.
164 * This does not cover 100% of spurious interrupts,
165 * but should be enough to warn the user that there
166 * is something bad going on ...
167 */
168 if (cached_irq_mask & irqmask)
169 goto spurious_8259A_irq;
170 cached_irq_mask |= irqmask;
171
172 handle_real_irq:
173 if (irq & 8) {
174 inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
175 outb(cached_slave_mask, PIC_SLAVE_IMR);
176 outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
177 outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
178 } else {
179 inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
180 outb(cached_master_mask, PIC_MASTER_IMR);
181 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
182 }
183 spin_unlock_irqrestore(&i8259A_lock, flags);
184 return;
185
186 spurious_8259A_irq:
187 /*
188 * this is the slow path - should happen rarely.
189 */
190 if (i8259A_irq_real(irq))
191 /*
192 * oops, the IRQ _is_ in service according to the
193 * 8259A - not spurious, go handle it.
194 */
195 goto handle_real_irq;
196
197 {
198 static int spurious_irq_mask;
199 /*
200 * At this point we can be sure the IRQ is spurious,
201 * lets ACK and report it. [once per IRQ]
202 */
203 if (!(spurious_irq_mask & irqmask)) {
204 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
205 spurious_irq_mask |= irqmask;
206 }
207 atomic_inc(&irq_err_count);
208 /*
209 * Theoretically we do not have to handle this IRQ,
210 * but in Linux this does not cause problems and is
211 * simpler for us.
212 */
213 goto handle_real_irq;
214 }
215 }
216
217 static char irq_trigger[2];
218 /**
219 * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ
220 */
221 static void restore_ELCR(char *trigger)
222 {
223 outb(trigger[0], 0x4d0);
224 outb(trigger[1], 0x4d1);
225 }
226
227 static void save_ELCR(char *trigger)
228 {
229 /* IRQ 0,1,2,8,13 are marked as reserved */
230 trigger[0] = inb(0x4d0) & 0xF8;
231 trigger[1] = inb(0x4d1) & 0xDE;
232 }
233
234 static int i8259A_resume(struct sys_device *dev)
235 {
236 init_8259A(i8259A_auto_eoi);
237 restore_ELCR(irq_trigger);
238 return 0;
239 }
240
241 static int i8259A_suspend(struct sys_device *dev, pm_message_t state)
242 {
243 save_ELCR(irq_trigger);
244 return 0;
245 }
246
247 static int i8259A_shutdown(struct sys_device *dev)
248 {
249 /* Put the i8259A into a quiescent state that
250 * the kernel initialization code can get it
251 * out of.
252 */
253 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
254 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
255 return 0;
256 }
257
258 static struct sysdev_class i8259_sysdev_class = {
259 .name = "i8259",
260 .suspend = i8259A_suspend,
261 .resume = i8259A_resume,
262 .shutdown = i8259A_shutdown,
263 };
264
265 static struct sys_device device_i8259A = {
266 .id = 0,
267 .cls = &i8259_sysdev_class,
268 };
269
270 static int __init i8259A_init_sysfs(void)
271 {
272 int error = sysdev_class_register(&i8259_sysdev_class);
273 if (!error)
274 error = sysdev_register(&device_i8259A);
275 return error;
276 }
277
278 device_initcall(i8259A_init_sysfs);
279
280 void init_8259A(int auto_eoi)
281 {
282 unsigned long flags;
283
284 i8259A_auto_eoi = auto_eoi;
285
286 spin_lock_irqsave(&i8259A_lock, flags);
287
288 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
289 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
290
291 /*
292 * outb_p - this has to work on a wide range of PC hardware.
293 */
294 outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
295 outb_p(0x20 + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */
296 outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
297 if (auto_eoi) /* master does Auto EOI */
298 outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
299 else /* master expects normal EOI */
300 outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
301
302 outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
303 outb_p(0x20 + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */
304 outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
305 outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
306 if (auto_eoi)
307 /*
308 * In AEOI mode we just have to mask the interrupt
309 * when acking.
310 */
311 i8259A_chip.mask_ack = disable_8259A_irq;
312 else
313 i8259A_chip.mask_ack = mask_and_ack_8259A;
314
315 udelay(100); /* wait for 8259A to initialize */
316
317 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
318 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
319
320 spin_unlock_irqrestore(&i8259A_lock, flags);
321 }
322
323 /*
324 * Note that on a 486, we don't want to do a SIGFPE on an irq13
325 * as the irq is unreliable, and exception 16 works correctly
326 * (ie as explained in the intel literature). On a 386, you
327 * can't use exception 16 due to bad IBM design, so we have to
328 * rely on the less exact irq13.
329 *
330 * Careful.. Not only is IRQ13 unreliable, but it is also
331 * leads to races. IBM designers who came up with it should
332 * be shot.
333 */
334
335
336 static irqreturn_t math_error_irq(int cpl, void *dev_id)
337 {
338 extern void math_error(void __user *);
339 outb(0,0xF0);
340 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
341 return IRQ_NONE;
342 math_error((void __user *)get_irq_regs()->ip);
343 return IRQ_HANDLED;
344 }
345
346 /*
347 * New motherboards sometimes make IRQ 13 be a PCI interrupt,
348 * so allow interrupt sharing.
349 */
350 static struct irqaction fpu_irq = {
351 .handler = math_error_irq,
352 .mask = CPU_MASK_NONE,
353 .name = "fpu",
354 };
355
356 void __init init_ISA_irqs (void)
357 {
358 int i;
359
360 #ifdef CONFIG_X86_LOCAL_APIC
361 init_bsp_APIC();
362 #endif
363 init_8259A(0);
364
365 for (i = 0; i < NR_IRQS; i++) {
366 irq_desc[i].status = IRQ_DISABLED;
367 irq_desc[i].action = NULL;
368 irq_desc[i].depth = 1;
369
370 if (i < 16) {
371 /*
372 * 16 old-style INTA-cycle interrupts:
373 */
374 set_irq_chip_and_handler_name(i, &i8259A_chip,
375 handle_level_irq, "XT");
376 } else {
377 /*
378 * 'high' PCI IRQs filled in on demand
379 */
380 irq_desc[i].chip = &no_irq_chip;
381 }
382 }
383 }
384
385 /* Overridden in paravirt.c */
386 void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
387
388 void __init native_init_IRQ(void)
389 {
390 int i;
391
392 /* all the set up before the call gates are initialised */
393 pre_intr_init_hook();
394
395 /*
396 * Cover the whole vector space, no vector can escape
397 * us. (some of these will be overridden and become
398 * 'special' SMP interrupts)
399 */
400 for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
401 int vector = FIRST_EXTERNAL_VECTOR + i;
402 if (i >= NR_IRQS)
403 break;
404 /* SYSCALL_VECTOR was reserved in trap_init. */
405 if (!test_bit(vector, used_vectors))
406 set_intr_gate(vector, interrupt[i]);
407 }
408
409 /* setup after call gates are initialised (usually add in
410 * the architecture specific gates)
411 */
412 intr_init_hook();
413
414 /*
415 * External FPU? Set up irq13 if so, for
416 * original braindamaged IBM FERR coupling.
417 */
418 if (boot_cpu_data.hard_math && !cpu_has_fpu)
419 setup_irq(FPU_IRQ, &fpu_irq);
420
421 irq_ctx_init(smp_processor_id());
422 }
This page took 0.042174 seconds and 5 git commands to generate.