parisc: implement irq stacks - part 2 (v2)
[deliverable/linux.git] / arch / parisc / kernel / irq.c
CommitLineData
1da177e4
LT
1/*
2 * Code to handle x86 style IRQs plus some generic interrupt stuff.
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
6 * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
7 * Copyright (C) 1999-2000 Grant Grundler
8 * Copyright (c) 2005 Matthew Wilcox
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24#include <linux/bitops.h>
1da177e4
LT
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/kernel_stat.h>
29#include <linux/seq_file.h>
30#include <linux/spinlock.h>
31#include <linux/types.h>
c2ab64d0 32#include <asm/io.h>
1da177e4 33
1d4c452a
KM
34#include <asm/smp.h>
35
1da177e4
LT
36#undef PARISC_IRQ_CR16_COUNTS
37
be577a52
MW
38extern irqreturn_t timer_interrupt(int, void *);
39extern irqreturn_t ipi_interrupt(int, void *);
1da177e4
LT
40
41#define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
42
43/* Bits in EIEM correlate with cpu_irq_action[].
44** Numbered *Big Endian*! (ie bit 0 is MSB)
45*/
46static volatile unsigned long cpu_eiem = 0;
47
7085689e 48/*
462b529f 49** local ACK bitmap ... habitually set to 1, but reset to zero
7085689e
JB
50** between ->ack() and ->end() of the interrupt to prevent
51** re-interruption of a processing interrupt.
52*/
7085689e
JB
53static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
54
4c4231ea 55static void cpu_mask_irq(struct irq_data *d)
1da177e4 56{
4c4231ea 57 unsigned long eirr_bit = EIEM_MASK(d->irq);
1da177e4
LT
58
59 cpu_eiem &= ~eirr_bit;
d911aed8
JB
60 /* Do nothing on the other CPUs. If they get this interrupt,
61 * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
62 * handle it, and the set_eiem() at the bottom will ensure it
63 * then gets disabled */
1da177e4
LT
64}
65
4c4231ea 66static void __cpu_unmask_irq(unsigned int irq)
1da177e4
LT
67{
68 unsigned long eirr_bit = EIEM_MASK(irq);
69
1da177e4 70 cpu_eiem |= eirr_bit;
d911aed8 71
d911aed8
JB
72 /* This is just a simple NOP IPI. But what it does is cause
73 * all the other CPUs to do a set_eiem(cpu_eiem) at the end
74 * of the interrupt handler */
75 smp_send_all_nop();
1da177e4
LT
76}
77
4c4231ea
TG
78static void cpu_unmask_irq(struct irq_data *d)
79{
80 __cpu_unmask_irq(d->irq);
81}
82
83void cpu_ack_irq(struct irq_data *d)
7085689e 84{
4c4231ea 85 unsigned long mask = EIEM_MASK(d->irq);
7085689e
JB
86 int cpu = smp_processor_id();
87
88 /* Clear in EIEM so we can no longer process */
462b529f 89 per_cpu(local_ack_eiem, cpu) &= ~mask;
7085689e
JB
90
91 /* disable the interrupt */
462b529f
GG
92 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
93
7085689e
JB
94 /* and now ack it */
95 mtctl(mask, 23);
96}
97
4c4231ea 98void cpu_eoi_irq(struct irq_data *d)
7085689e 99{
4c4231ea 100 unsigned long mask = EIEM_MASK(d->irq);
7085689e
JB
101 int cpu = smp_processor_id();
102
103 /* set it in the eiems---it's no longer in process */
462b529f 104 per_cpu(local_ack_eiem, cpu) |= mask;
7085689e
JB
105
106 /* enable the interrupt */
462b529f 107 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
7085689e
JB
108}
109
c2ab64d0 110#ifdef CONFIG_SMP
4c4231ea 111int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
c2ab64d0
JB
112{
113 int cpu_dest;
114
115 /* timer and ipi have to always be received on all CPUs */
337ce681 116 if (irqd_is_per_cpu(d))
c2ab64d0 117 return -EINVAL;
c2ab64d0
JB
118
119 /* whatever mask they set, we just allow one CPU */
120 cpu_dest = first_cpu(*dest);
c2ab64d0 121
8b6649c5 122 return cpu_dest;
c2ab64d0
JB
123}
124
4c4231ea
TG
125static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
126 bool force)
c2ab64d0 127{
8b6649c5
KM
128 int cpu_dest;
129
4c4231ea 130 cpu_dest = cpu_check_affinity(d, dest);
8b6649c5 131 if (cpu_dest < 0)
d5dedd45 132 return -1;
c2ab64d0 133
4c4231ea 134 cpumask_copy(d->affinity, dest);
d5dedd45
YL
135
136 return 0;
c2ab64d0
JB
137}
138#endif
139
dfe07565 140static struct irq_chip cpu_interrupt_type = {
4c4231ea
TG
141 .name = "CPU",
142 .irq_mask = cpu_mask_irq,
143 .irq_unmask = cpu_unmask_irq,
144 .irq_ack = cpu_ack_irq,
145 .irq_eoi = cpu_eoi_irq,
c2ab64d0 146#ifdef CONFIG_SMP
4c4231ea 147 .irq_set_affinity = cpu_set_affinity_irq,
c2ab64d0 148#endif
c0ad90a3
IM
149 /* XXX: Needs to be written. We managed without it so far, but
150 * we really ought to write it.
151 */
4c4231ea 152 .irq_retrigger = NULL,
1da177e4
LT
153};
154
cd85d551
HD
155DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
156#define irq_stats(x) (&per_cpu(irq_stat, x))
157
158/*
159 * /proc/interrupts printing for arch specific interrupts
160 */
161int arch_show_interrupts(struct seq_file *p, int prec)
162{
163 int j;
164
165#ifdef CONFIG_DEBUG_STACKOVERFLOW
166 seq_printf(p, "%*s: ", prec, "STK");
167 for_each_online_cpu(j)
168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
416821d3
HD
169 seq_puts(p, " Kernel stack usage\n");
170# ifdef CONFIG_IRQSTACKS
171 seq_printf(p, "%*s: ", prec, "IST");
172 for_each_online_cpu(j)
173 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
174 seq_puts(p, " Interrupt stack usage\n");
175 seq_printf(p, "%*s: ", prec, "ISC");
176 for_each_online_cpu(j)
177 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_counter);
178 seq_puts(p, " Interrupt stack usage counter\n");
179# endif
cd85d551
HD
180#endif
181#ifdef CONFIG_SMP
182 seq_printf(p, "%*s: ", prec, "RES");
183 for_each_online_cpu(j)
184 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
416821d3 185 seq_puts(p, " Rescheduling interrupts\n");
cd85d551
HD
186 seq_printf(p, "%*s: ", prec, "CAL");
187 for_each_online_cpu(j)
0fc537d1 188 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
416821d3 189 seq_puts(p, " Function call interrupts\n");
0fc537d1 190#endif
cd85d551
HD
191 seq_printf(p, "%*s: ", prec, "TLB");
192 for_each_online_cpu(j)
193 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
416821d3 194 seq_puts(p, " TLB shootdowns\n");
cd85d551
HD
195 return 0;
196}
197
1da177e4
LT
198int show_interrupts(struct seq_file *p, void *v)
199{
200 int i = *(loff_t *) v, j;
201 unsigned long flags;
202
203 if (i == 0) {
204 seq_puts(p, " ");
205 for_each_online_cpu(j)
206 seq_printf(p, " CPU%d", j);
207
208#ifdef PARISC_IRQ_CR16_COUNTS
209 seq_printf(p, " [min/avg/max] (CPU cycle counts)");
210#endif
211 seq_putc(p, '\n');
212 }
213
214 if (i < NR_IRQS) {
68f20f43 215 struct irq_desc *desc = irq_to_desc(i);
1da177e4
LT
216 struct irqaction *action;
217
68f20f43
TG
218 raw_spin_lock_irqsave(&desc->lock, flags);
219 action = desc->action;
1da177e4
LT
220 if (!action)
221 goto skip;
222 seq_printf(p, "%3d: ", i);
223#ifdef CONFIG_SMP
224 for_each_online_cpu(j)
dee4102a 225 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
1da177e4
LT
226#else
227 seq_printf(p, "%10u ", kstat_irqs(i));
228#endif
229
68f20f43 230 seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
1da177e4
LT
231#ifndef PARISC_IRQ_CR16_COUNTS
232 seq_printf(p, " %s", action->name);
233
234 while ((action = action->next))
235 seq_printf(p, ", %s", action->name);
236#else
237 for ( ;action; action = action->next) {
238 unsigned int k, avg, min, max;
239
240 min = max = action->cr16_hist[0];
241
242 for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
243 int hist = action->cr16_hist[k];
244
245 if (hist) {
246 avg += hist;
247 } else
248 break;
249
250 if (hist > max) max = hist;
251 if (hist < min) min = hist;
252 }
253
254 avg /= k;
255 seq_printf(p, " %s[%d/%d/%d]", action->name,
256 min,avg,max);
257 }
258#endif
259
260 seq_putc(p, '\n');
261 skip:
68f20f43 262 raw_spin_unlock_irqrestore(&desc->lock, flags);
1da177e4
LT
263 }
264
cd85d551
HD
265 if (i == NR_IRQS)
266 arch_show_interrupts(p, 3);
267
1da177e4
LT
268 return 0;
269}
270
271
272
273/*
274** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
275** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
276**
277** To use txn_XXX() interfaces, get a Virtual IRQ first.
278** Then use that to get the Transaction address and data.
279*/
280
5cfe87d3 281int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
1da177e4 282{
68f20f43 283 if (irq_has_action(irq))
1da177e4 284 return -EBUSY;
e2f571d2 285 if (irq_get_chip(irq) != &cpu_interrupt_type)
1da177e4
LT
286 return -EBUSY;
287
ba20085c 288 /* for iosapic interrupts */
1da177e4 289 if (type) {
e2f571d2
TG
290 irq_set_chip_and_handler(irq, type, handle_percpu_irq);
291 irq_set_chip_data(irq, data);
4c4231ea 292 __cpu_unmask_irq(irq);
1da177e4
LT
293 }
294 return 0;
295}
296
297int txn_claim_irq(int irq)
298{
299 return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq;
300}
301
302/*
303 * The bits_wide parameter accommodates the limitations of the HW/SW which
304 * use these bits:
305 * Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
306 * V-class (EPIC): 6 bits
307 * N/L/A-class (iosapic): 8 bits
308 * PCI 2.2 MSI: 16 bits
309 * Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric)
310 *
311 * On the service provider side:
312 * o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)
313 * o PA 2.0 wide mode 6-bits (per processor)
314 * o IA64 8-bits (0-256 total)
315 *
316 * So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported
317 * by the processor...and the N/L-class I/O subsystem supports more bits than
318 * PA2.0 has. The first case is the problem.
319 */
320int txn_alloc_irq(unsigned int bits_wide)
321{
322 int irq;
323
324 /* never return irq 0 cause that's the interval timer */
325 for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) {
326 if (cpu_claim_irq(irq, NULL, NULL) < 0)
327 continue;
328 if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide))
329 continue;
330 return irq;
331 }
332
333 /* unlikely, but be prepared */
334 return -1;
335}
336
03afe22f 337
c2ab64d0
JB
338unsigned long txn_affinity_addr(unsigned int irq, int cpu)
339{
03afe22f 340#ifdef CONFIG_SMP
4c4231ea
TG
341 struct irq_data *d = irq_get_irq_data(irq);
342 cpumask_copy(d->affinity, cpumask_of(cpu));
03afe22f 343#endif
c2ab64d0 344
ef017beb 345 return per_cpu(cpu_data, cpu).txn_addr;
c2ab64d0
JB
346}
347
03afe22f 348
1da177e4
LT
349unsigned long txn_alloc_addr(unsigned int virt_irq)
350{
351 static int next_cpu = -1;
352
353 next_cpu++; /* assign to "next" CPU we want this bugger on */
354
355 /* validate entry */
bd071e1a 356 while ((next_cpu < nr_cpu_ids) &&
ef017beb
HD
357 (!per_cpu(cpu_data, next_cpu).txn_addr ||
358 !cpu_online(next_cpu)))
1da177e4
LT
359 next_cpu++;
360
bd071e1a 361 if (next_cpu >= nr_cpu_ids)
1da177e4
LT
362 next_cpu = 0; /* nothing else, assign monarch */
363
c2ab64d0 364 return txn_affinity_addr(virt_irq, next_cpu);
1da177e4
LT
365}
366
367
368unsigned int txn_alloc_data(unsigned int virt_irq)
369{
370 return virt_irq - CPU_IRQ_BASE;
371}
372
7085689e
JB
373static inline int eirr_to_irq(unsigned long eirr)
374{
0c2de3c6 375 int bit = fls_long(eirr);
7085689e
JB
376 return (BITS_PER_LONG - bit) + TIMER_IRQ;
377}
378
9372450c
HD
379int sysctl_panic_on_stackoverflow = 1;
380
381static inline void stack_overflow_check(struct pt_regs *regs)
382{
383#ifdef CONFIG_DEBUG_STACKOVERFLOW
384 #define STACK_MARGIN (256*6)
385
386 /* Our stack starts directly behind the thread_info struct. */
387 unsigned long stack_start = (unsigned long) current_thread_info();
388 unsigned long sp = regs->gr[30];
cd85d551
HD
389 unsigned long stack_usage;
390 unsigned int *last_usage;
416821d3 391 int cpu = smp_processor_id();
9372450c
HD
392
393 /* if sr7 != 0, we interrupted a userspace process which we do not want
394 * to check for stack overflow. We will only check the kernel stack. */
395 if (regs->sr[7])
396 return;
397
cd85d551
HD
398 /* calculate kernel stack usage */
399 stack_usage = sp - stack_start;
416821d3
HD
400#ifdef CONFIG_IRQSTACKS
401 if (likely(stack_usage <= THREAD_SIZE))
402 goto check_kernel_stack; /* found kernel stack */
403
404 /* check irq stack usage */
405 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
406 stack_usage = sp - stack_start;
407
408 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
409 if (unlikely(stack_usage > *last_usage))
410 *last_usage = stack_usage;
411
412 if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
413 return;
414
415 pr_emerg("stackcheck: %s will most likely overflow irq stack "
416 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
417 current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
418 goto panic_check;
419
420check_kernel_stack:
421#endif
422
423 /* check kernel stack usage */
424 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
cd85d551
HD
425
426 if (unlikely(stack_usage > *last_usage))
427 *last_usage = stack_usage;
428
429 if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
9372450c
HD
430 return;
431
432 pr_emerg("stackcheck: %s will most likely overflow kernel stack "
433 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
434 current->comm, sp, stack_start, stack_start + THREAD_SIZE);
435
416821d3
HD
436#ifdef CONFIG_IRQSTACKS
437panic_check:
438#endif
9372450c
HD
439 if (sysctl_panic_on_stackoverflow)
440 panic("low stack detected by irq handler - check messages\n");
441#endif
442}
443
200c8804 444#ifdef CONFIG_IRQSTACKS
416821d3
HD
445DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
446 .lock = __RAW_SPIN_LOCK_UNLOCKED((irq_stack_union).lock)
447 };
200c8804
HD
448
449static void execute_on_irq_stack(void *func, unsigned long param1)
450{
416821d3 451 union irq_stack_union *union_ptr;
200c8804 452 unsigned long irq_stack;
416821d3 453 raw_spinlock_t *irq_stack_in_use;
200c8804 454
416821d3
HD
455 union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
456 irq_stack = (unsigned long) &union_ptr->stack;
457 irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.lock),
458 64); /* align for stack frame usage */
200c8804 459
416821d3
HD
460 /* We may be called recursive. If we are already using the irq stack,
461 * just continue to use it. Use spinlocks to serialize
462 * the irq stack usage.
463 */
464 irq_stack_in_use = &union_ptr->lock;
465 if (!raw_spin_trylock(irq_stack_in_use)) {
466 void (*direct_call)(unsigned long p1) = func;
467
468 /* We are using the IRQ stack already.
469 * Do direct call on current stack. */
470 direct_call(param1);
471 return;
472 }
200c8804
HD
473
474 /* This is where we switch to the IRQ stack. */
475 call_on_stack(param1, func, irq_stack);
476
416821d3
HD
477 __inc_irq_stat(irq_stack_counter);
478
479 /* free up irq stack usage. */
480 do_raw_spin_unlock(irq_stack_in_use);
481}
482
483asmlinkage void do_softirq(void)
484{
485 __u32 pending;
486 unsigned long flags;
487
488 if (in_interrupt())
489 return;
490
491 local_irq_save(flags);
492
493 pending = local_softirq_pending();
494
495 if (pending)
496 execute_on_irq_stack(__do_softirq, 0);
497
498 local_irq_restore(flags);
200c8804
HD
499}
500#endif /* CONFIG_IRQSTACKS */
501
1da177e4
LT
502/* ONLY called from entry.S:intr_extint() */
503void do_cpu_irq_mask(struct pt_regs *regs)
504{
e11e30a0 505 struct pt_regs *old_regs;
1da177e4 506 unsigned long eirr_val;
7085689e 507 int irq, cpu = smp_processor_id();
03afe22f 508#ifdef CONFIG_SMP
4c4231ea 509 struct irq_desc *desc;
7085689e 510 cpumask_t dest;
03afe22f 511#endif
1da177e4 512
e11e30a0 513 old_regs = set_irq_regs(regs);
7085689e
JB
514 local_irq_disable();
515 irq_enter();
1da177e4 516
462b529f 517 eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
7085689e
JB
518 if (!eirr_val)
519 goto set_out;
520 irq = eirr_to_irq(eirr_val);
c2ab64d0 521
7085689e 522#ifdef CONFIG_SMP
4c4231ea
TG
523 desc = irq_to_desc(irq);
524 cpumask_copy(&dest, desc->irq_data.affinity);
337ce681 525 if (irqd_is_per_cpu(&desc->irq_data) &&
7085689e
JB
526 !cpu_isset(smp_processor_id(), dest)) {
527 int cpu = first_cpu(dest);
528
529 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
530 irq, smp_processor_id(), cpu);
531 gsc_writel(irq + CPU_IRQ_BASE,
ef017beb 532 per_cpu(cpu_data, cpu).hpa);
7085689e 533 goto set_out;
1da177e4 534 }
7085689e 535#endif
9372450c 536 stack_overflow_check(regs);
200c8804
HD
537
538#ifdef CONFIG_IRQSTACKS
539 execute_on_irq_stack(&generic_handle_irq, irq);
540#else
ba20085c 541 generic_handle_irq(irq);
200c8804 542#endif /* CONFIG_IRQSTACKS */
3f902886 543
7085689e 544 out:
1da177e4 545 irq_exit();
e11e30a0 546 set_irq_regs(old_regs);
7085689e 547 return;
1da177e4 548
7085689e 549 set_out:
462b529f 550 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
7085689e
JB
551 goto out;
552}
1da177e4
LT
553
554static struct irqaction timer_action = {
555 .handler = timer_interrupt,
556 .name = "timer",
b54cb233 557 .flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
1da177e4
LT
558};
559
560#ifdef CONFIG_SMP
561static struct irqaction ipi_action = {
562 .handler = ipi_interrupt,
563 .name = "IPI",
b54cb233 564 .flags = IRQF_PERCPU,
1da177e4
LT
565};
566#endif
567
568static void claim_cpu_irqs(void)
569{
570 int i;
571 for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
e2f571d2 572 irq_set_chip_and_handler(i, &cpu_interrupt_type,
d16cd297 573 handle_percpu_irq);
1da177e4
LT
574 }
575
e2f571d2 576 irq_set_handler(TIMER_IRQ, handle_percpu_irq);
ba20085c 577 setup_irq(TIMER_IRQ, &timer_action);
1da177e4 578#ifdef CONFIG_SMP
e2f571d2 579 irq_set_handler(IPI_IRQ, handle_percpu_irq);
ba20085c 580 setup_irq(IPI_IRQ, &ipi_action);
1da177e4
LT
581#endif
582}
583
584void __init init_IRQ(void)
585{
586 local_irq_disable(); /* PARANOID - should already be disabled */
587 mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
1da177e4 588#ifdef CONFIG_SMP
cac1f12b
JDA
589 if (!cpu_eiem) {
590 claim_cpu_irqs();
1da177e4 591 cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
cac1f12b 592 }
1da177e4 593#else
cac1f12b 594 claim_cpu_irqs();
1da177e4
LT
595 cpu_eiem = EIEM_MASK(TIMER_IRQ);
596#endif
597 set_eiem(cpu_eiem); /* EIEM : enable all external intr */
1da177e4 598}
This page took 0.606451 seconds and 5 git commands to generate.