Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
[deliverable/linux.git] / arch / powerpc / sysdev / mpic.c
CommitLineData
14cf11af
PM
1/*
2 * arch/powerpc/kernel/mpic.c
3 *
4 * Driver for interrupt controllers following the OpenPIC standard, the
5 * common implementation beeing IBM's MPIC. This driver also can deal
6 * with various broken implementations of this HW.
7 *
8 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive
12 * for more details.
13 */
14
15#undef DEBUG
1beb6a7d
BH
16#undef DEBUG_IPI
17#undef DEBUG_IRQ
18#undef DEBUG_LOW
14cf11af 19
14cf11af
PM
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/irq.h>
24#include <linux/smp.h>
25#include <linux/interrupt.h>
26#include <linux/bootmem.h>
27#include <linux/spinlock.h>
28#include <linux/pci.h>
29
30#include <asm/ptrace.h>
31#include <asm/signal.h>
32#include <asm/io.h>
33#include <asm/pgtable.h>
34#include <asm/irq.h>
35#include <asm/machdep.h>
36#include <asm/mpic.h>
37#include <asm/smp.h>
38
39#ifdef DEBUG
40#define DBG(fmt...) printk(fmt)
41#else
42#define DBG(fmt...)
43#endif
44
45static struct mpic *mpics;
46static struct mpic *mpic_primary;
47static DEFINE_SPINLOCK(mpic_lock);
48
c0c0d996 49#ifdef CONFIG_PPC32 /* XXX for now */
e40c7f02
AW
50#ifdef CONFIG_IRQ_ALL_CPUS
51#define distribute_irqs (1)
52#else
53#define distribute_irqs (0)
54#endif
c0c0d996 55#endif
14cf11af
PM
56
57/*
58 * Register accessor functions
59 */
60
61
62static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base,
63 unsigned int reg)
64{
65 if (be)
66 return in_be32(base + (reg >> 2));
67 else
68 return in_le32(base + (reg >> 2));
69}
70
71static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base,
72 unsigned int reg, u32 value)
73{
74 if (be)
75 out_be32(base + (reg >> 2), value);
76 else
77 out_le32(base + (reg >> 2), value);
78}
79
80static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
81{
82 unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0;
83 unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
84
85 if (mpic->flags & MPIC_BROKEN_IPI)
86 be = !be;
87 return _mpic_read(be, mpic->gregs, offset);
88}
89
90static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
91{
92 unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
93
94 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value);
95}
96
97static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
98{
99 unsigned int cpu = 0;
100
101 if (mpic->flags & MPIC_PRIMARY)
102 cpu = hard_smp_processor_id();
b9e5b4e6
BH
103 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,
104 mpic->cpuregs[cpu], reg);
14cf11af
PM
105}
106
107static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
108{
109 unsigned int cpu = 0;
110
111 if (mpic->flags & MPIC_PRIMARY)
112 cpu = hard_smp_processor_id();
113
114 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value);
115}
116
117static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
118{
119 unsigned int isu = src_no >> mpic->isu_shift;
120 unsigned int idx = src_no & mpic->isu_mask;
121
122 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
123 reg + (idx * MPIC_IRQ_STRIDE));
124}
125
126static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
127 unsigned int reg, u32 value)
128{
129 unsigned int isu = src_no >> mpic->isu_shift;
130 unsigned int idx = src_no & mpic->isu_mask;
131
132 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
133 reg + (idx * MPIC_IRQ_STRIDE), value);
134}
135
136#define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r))
137#define mpic_write(b,r,v) _mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v))
138#define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i))
139#define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v))
140#define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i))
141#define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v))
142#define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r))
143#define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v))
144
145
146/*
147 * Low level utility functions
148 */
149
150
151
152/* Check if we have one of those nice broken MPICs with a flipped endian on
153 * reads from IPI registers
154 */
155static void __init mpic_test_broken_ipi(struct mpic *mpic)
156{
157 u32 r;
158
159 mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK);
160 r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0);
161
162 if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
163 printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
164 mpic->flags |= MPIC_BROKEN_IPI;
165 }
166}
167
168#ifdef CONFIG_MPIC_BROKEN_U3
169
170/* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
171 * to force the edge setting on the MPIC and do the ack workaround.
172 */
1beb6a7d 173static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
14cf11af 174{
1beb6a7d 175 if (source >= 128 || !mpic->fixups)
14cf11af 176 return 0;
1beb6a7d 177 return mpic->fixups[source].base != NULL;
14cf11af
PM
178}
179
c4b22f26 180
1beb6a7d 181static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
14cf11af 182{
1beb6a7d 183 struct mpic_irq_fixup *fixup = &mpic->fixups[source];
14cf11af 184
1beb6a7d
BH
185 if (fixup->applebase) {
186 unsigned int soff = (fixup->index >> 3) & ~3;
187 unsigned int mask = 1U << (fixup->index & 0x1f);
188 writel(mask, fixup->applebase + soff);
189 } else {
190 spin_lock(&mpic->fixup_lock);
191 writeb(0x11 + 2 * fixup->index, fixup->base + 2);
192 writel(fixup->data, fixup->base + 4);
193 spin_unlock(&mpic->fixup_lock);
194 }
14cf11af
PM
195}
196
1beb6a7d
BH
197static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
198 unsigned int irqflags)
199{
200 struct mpic_irq_fixup *fixup = &mpic->fixups[source];
201 unsigned long flags;
202 u32 tmp;
203
204 if (fixup->base == NULL)
205 return;
206
207 DBG("startup_ht_interrupt(%u, %u) index: %d\n",
208 source, irqflags, fixup->index);
209 spin_lock_irqsave(&mpic->fixup_lock, flags);
210 /* Enable and configure */
211 writeb(0x10 + 2 * fixup->index, fixup->base + 2);
212 tmp = readl(fixup->base + 4);
213 tmp &= ~(0x23U);
214 if (irqflags & IRQ_LEVEL)
215 tmp |= 0x22;
216 writel(tmp, fixup->base + 4);
217 spin_unlock_irqrestore(&mpic->fixup_lock, flags);
218}
219
220static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source,
221 unsigned int irqflags)
222{
223 struct mpic_irq_fixup *fixup = &mpic->fixups[source];
224 unsigned long flags;
225 u32 tmp;
226
227 if (fixup->base == NULL)
228 return;
229
230 DBG("shutdown_ht_interrupt(%u, %u)\n", source, irqflags);
231
232 /* Disable */
233 spin_lock_irqsave(&mpic->fixup_lock, flags);
234 writeb(0x10 + 2 * fixup->index, fixup->base + 2);
235 tmp = readl(fixup->base + 4);
72b13819 236 tmp |= 1;
1beb6a7d
BH
237 writel(tmp, fixup->base + 4);
238 spin_unlock_irqrestore(&mpic->fixup_lock, flags);
239}
14cf11af 240
1beb6a7d
BH
241static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
242 unsigned int devfn, u32 vdid)
14cf11af 243{
c4b22f26 244 int i, irq, n;
1beb6a7d 245 u8 __iomem *base;
14cf11af 246 u32 tmp;
c4b22f26 247 u8 pos;
14cf11af 248
1beb6a7d
BH
249 for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
250 pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
251 u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
252 if (id == PCI_CAP_ID_HT_IRQCONF) {
c4b22f26
SB
253 id = readb(devbase + pos + 3);
254 if (id == 0x80)
255 break;
256 }
14cf11af 257 }
c4b22f26
SB
258 if (pos == 0)
259 return;
260
1beb6a7d
BH
261 base = devbase + pos;
262 writeb(0x01, base + 2);
263 n = (readl(base + 4) >> 16) & 0xff;
14cf11af 264
1beb6a7d
BH
265 printk(KERN_INFO "mpic: - HT:%02x.%x [0x%02x] vendor %04x device %04x"
266 " has %d irqs\n",
267 devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1);
c4b22f26
SB
268
269 for (i = 0; i <= n; i++) {
1beb6a7d
BH
270 writeb(0x10 + 2 * i, base + 2);
271 tmp = readl(base + 4);
14cf11af 272 irq = (tmp >> 16) & 0xff;
1beb6a7d
BH
273 DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp);
274 /* mask it , will be unmasked later */
275 tmp |= 0x1;
276 writel(tmp, base + 4);
277 mpic->fixups[irq].index = i;
278 mpic->fixups[irq].base = base;
279 /* Apple HT PIC has a non-standard way of doing EOIs */
280 if ((vdid & 0xffff) == 0x106b)
281 mpic->fixups[irq].applebase = devbase + 0x60;
282 else
283 mpic->fixups[irq].applebase = NULL;
284 writeb(0x11 + 2 * i, base + 2);
285 mpic->fixups[irq].data = readl(base + 4) | 0x80000000;
14cf11af
PM
286 }
287}
288
c4b22f26 289
1beb6a7d 290static void __init mpic_scan_ht_pics(struct mpic *mpic)
14cf11af
PM
291{
292 unsigned int devfn;
293 u8 __iomem *cfgspace;
294
1beb6a7d 295 printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
14cf11af
PM
296
297 /* Allocate fixups array */
298 mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup));
299 BUG_ON(mpic->fixups == NULL);
300 memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup));
301
302 /* Init spinlock */
303 spin_lock_init(&mpic->fixup_lock);
304
c4b22f26
SB
305 /* Map U3 config space. We assume all IO-APICs are on the primary bus
306 * so we only need to map 64kB.
14cf11af 307 */
c4b22f26 308 cfgspace = ioremap(0xf2000000, 0x10000);
14cf11af
PM
309 BUG_ON(cfgspace == NULL);
310
1beb6a7d
BH
311 /* Now we scan all slots. We do a very quick scan, we read the header
312 * type, vendor ID and device ID only, that's plenty enough
14cf11af 313 */
c4b22f26 314 for (devfn = 0; devfn < 0x100; devfn++) {
14cf11af
PM
315 u8 __iomem *devbase = cfgspace + (devfn << 8);
316 u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
317 u32 l = readl(devbase + PCI_VENDOR_ID);
1beb6a7d 318 u16 s;
14cf11af
PM
319
320 DBG("devfn %x, l: %x\n", devfn, l);
321
322 /* If no device, skip */
323 if (l == 0xffffffff || l == 0x00000000 ||
324 l == 0x0000ffff || l == 0xffff0000)
325 goto next;
1beb6a7d
BH
326 /* Check if is supports capability lists */
327 s = readw(devbase + PCI_STATUS);
328 if (!(s & PCI_STATUS_CAP_LIST))
329 goto next;
14cf11af 330
1beb6a7d 331 mpic_scan_ht_pic(mpic, devbase, devfn, l);
c4b22f26 332
14cf11af
PM
333 next:
334 /* next device, if function 0 */
c4b22f26 335 if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0)
14cf11af
PM
336 devfn += 7;
337 }
338}
339
340#endif /* CONFIG_MPIC_BROKEN_U3 */
341
342
0ebfff14
BH
343#define mpic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
344
14cf11af
PM
345/* Find an mpic associated with a given linux interrupt */
346static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi)
347{
0ebfff14
BH
348 unsigned int src = mpic_irq_to_hw(irq);
349
350 if (irq < NUM_ISA_INTERRUPTS)
351 return NULL;
352 if (is_ipi)
353 *is_ipi = (src >= MPIC_VEC_IPI_0 && src <= MPIC_VEC_IPI_3);
354
355 return irq_desc[irq].chip_data;
14cf11af
PM
356}
357
358/* Convert a cpu mask from logical to physical cpu numbers. */
359static inline u32 mpic_physmask(u32 cpumask)
360{
361 int i;
362 u32 mask = 0;
363
364 for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1)
365 mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
366 return mask;
367}
368
369#ifdef CONFIG_SMP
370/* Get the mpic structure from the IPI number */
371static inline struct mpic * mpic_from_ipi(unsigned int ipi)
372{
b9e5b4e6 373 return irq_desc[ipi].chip_data;
14cf11af
PM
374}
375#endif
376
377/* Get the mpic structure from the irq number */
378static inline struct mpic * mpic_from_irq(unsigned int irq)
379{
b9e5b4e6 380 return irq_desc[irq].chip_data;
14cf11af
PM
381}
382
383/* Send an EOI */
384static inline void mpic_eoi(struct mpic *mpic)
385{
386 mpic_cpu_write(MPIC_CPU_EOI, 0);
387 (void)mpic_cpu_read(MPIC_CPU_WHOAMI);
388}
389
390#ifdef CONFIG_SMP
391static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
392{
0ebfff14 393 smp_message_recv(mpic_irq_to_hw(irq) - MPIC_VEC_IPI_0, regs);
14cf11af
PM
394 return IRQ_HANDLED;
395}
396#endif /* CONFIG_SMP */
397
398/*
399 * Linux descriptor level callbacks
400 */
401
402
b9e5b4e6 403static void mpic_unmask_irq(unsigned int irq)
14cf11af
PM
404{
405 unsigned int loops = 100000;
406 struct mpic *mpic = mpic_from_irq(irq);
0ebfff14 407 unsigned int src = mpic_irq_to_hw(irq);
14cf11af 408
bd561c79 409 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
14cf11af
PM
410
411 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
e5356640
BH
412 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) &
413 ~MPIC_VECPRI_MASK);
14cf11af
PM
414
415 /* make sure mask gets to controller before we return to user */
416 do {
417 if (!loops--) {
418 printk(KERN_ERR "mpic_enable_irq timeout\n");
419 break;
420 }
421 } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
422}
423
b9e5b4e6 424static void mpic_mask_irq(unsigned int irq)
14cf11af
PM
425{
426 unsigned int loops = 100000;
427 struct mpic *mpic = mpic_from_irq(irq);
0ebfff14 428 unsigned int src = mpic_irq_to_hw(irq);
14cf11af
PM
429
430 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
431
432 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
e5356640
BH
433 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) |
434 MPIC_VECPRI_MASK);
14cf11af
PM
435
436 /* make sure mask gets to controller before we return to user */
437 do {
438 if (!loops--) {
439 printk(KERN_ERR "mpic_enable_irq timeout\n");
440 break;
441 }
442 } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
443}
444
b9e5b4e6 445static void mpic_end_irq(unsigned int irq)
1beb6a7d 446{
b9e5b4e6
BH
447 struct mpic *mpic = mpic_from_irq(irq);
448
449#ifdef DEBUG_IRQ
450 DBG("%s: end_irq: %d\n", mpic->name, irq);
451#endif
452 /* We always EOI on end_irq() even for edge interrupts since that
453 * should only lower the priority, the MPIC should have properly
454 * latched another edge interrupt coming in anyway
455 */
456
457 mpic_eoi(mpic);
458}
459
1beb6a7d 460#ifdef CONFIG_MPIC_BROKEN_U3
b9e5b4e6
BH
461
462static void mpic_unmask_ht_irq(unsigned int irq)
463{
1beb6a7d 464 struct mpic *mpic = mpic_from_irq(irq);
0ebfff14 465 unsigned int src = mpic_irq_to_hw(irq);
1beb6a7d 466
b9e5b4e6 467 mpic_unmask_irq(irq);
1beb6a7d 468
b9e5b4e6
BH
469 if (irq_desc[irq].status & IRQ_LEVEL)
470 mpic_ht_end_irq(mpic, src);
471}
472
473static unsigned int mpic_startup_ht_irq(unsigned int irq)
474{
475 struct mpic *mpic = mpic_from_irq(irq);
0ebfff14 476 unsigned int src = mpic_irq_to_hw(irq);
1beb6a7d 477
b9e5b4e6
BH
478 mpic_unmask_irq(irq);
479 mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status);
480
481 return 0;
1beb6a7d
BH
482}
483
b9e5b4e6
BH
484static void mpic_shutdown_ht_irq(unsigned int irq)
485{
486 struct mpic *mpic = mpic_from_irq(irq);
0ebfff14 487 unsigned int src = mpic_irq_to_hw(irq);
b9e5b4e6
BH
488
489 mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status);
490 mpic_mask_irq(irq);
491}
492
493static void mpic_end_ht_irq(unsigned int irq)
14cf11af
PM
494{
495 struct mpic *mpic = mpic_from_irq(irq);
0ebfff14 496 unsigned int src = mpic_irq_to_hw(irq);
14cf11af 497
1beb6a7d 498#ifdef DEBUG_IRQ
14cf11af 499 DBG("%s: end_irq: %d\n", mpic->name, irq);
1beb6a7d 500#endif
14cf11af
PM
501 /* We always EOI on end_irq() even for edge interrupts since that
502 * should only lower the priority, the MPIC should have properly
503 * latched another edge interrupt coming in anyway
504 */
505
b9e5b4e6
BH
506 if (irq_desc[irq].status & IRQ_LEVEL)
507 mpic_ht_end_irq(mpic, src);
14cf11af
PM
508 mpic_eoi(mpic);
509}
510
b9e5b4e6
BH
511#endif /* CONFIG_MPIC_BROKEN_U3 */
512
14cf11af
PM
513#ifdef CONFIG_SMP
514
b9e5b4e6 515static void mpic_unmask_ipi(unsigned int irq)
14cf11af
PM
516{
517 struct mpic *mpic = mpic_from_ipi(irq);
0ebfff14 518 unsigned int src = mpic_irq_to_hw(irq) - MPIC_VEC_IPI_0;
14cf11af
PM
519
520 DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src);
521 mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
522}
523
b9e5b4e6 524static void mpic_mask_ipi(unsigned int irq)
14cf11af
PM
525{
526 /* NEVER disable an IPI... that's just plain wrong! */
527}
528
529static void mpic_end_ipi(unsigned int irq)
530{
531 struct mpic *mpic = mpic_from_ipi(irq);
532
533 /*
534 * IPIs are marked IRQ_PER_CPU. This has the side effect of
535 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
536 * applying to them. We EOI them late to avoid re-entering.
6714465e 537 * We mark IPI's with IRQF_DISABLED as they must run with
14cf11af
PM
538 * irqs disabled.
539 */
540 mpic_eoi(mpic);
541}
542
543#endif /* CONFIG_SMP */
544
545static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
546{
547 struct mpic *mpic = mpic_from_irq(irq);
0ebfff14 548 unsigned int src = mpic_irq_to_hw(irq);
14cf11af
PM
549
550 cpumask_t tmp;
551
552 cpus_and(tmp, cpumask, cpu_online_map);
553
0ebfff14 554 mpic_irq_write(src, MPIC_IRQ_DESTINATION,
14cf11af
PM
555 mpic_physmask(cpus_addr(tmp)[0]));
556}
557
0ebfff14
BH
558static unsigned int mpic_flags_to_vecpri(unsigned int flags, int *level)
559{
560 unsigned int vecpri;
561
562 /* Now convert sense value */
563 switch(flags & IRQ_TYPE_SENSE_MASK) {
564 case IRQ_TYPE_EDGE_RISING:
565 vecpri = MPIC_VECPRI_SENSE_EDGE |
566 MPIC_VECPRI_POLARITY_POSITIVE;
567 *level = 0;
568 break;
569 case IRQ_TYPE_EDGE_FALLING:
570 vecpri = MPIC_VECPRI_SENSE_EDGE |
571 MPIC_VECPRI_POLARITY_NEGATIVE;
572 *level = 0;
573 break;
574 case IRQ_TYPE_LEVEL_HIGH:
575 vecpri = MPIC_VECPRI_SENSE_LEVEL |
576 MPIC_VECPRI_POLARITY_POSITIVE;
577 *level = 1;
578 break;
579 case IRQ_TYPE_LEVEL_LOW:
580 default:
581 vecpri = MPIC_VECPRI_SENSE_LEVEL |
582 MPIC_VECPRI_POLARITY_NEGATIVE;
583 *level = 1;
584 }
585 return vecpri;
586}
587
b9e5b4e6
BH
588static struct irq_chip mpic_irq_chip = {
589 .mask = mpic_mask_irq,
590 .unmask = mpic_unmask_irq,
591 .eoi = mpic_end_irq,
592};
593
594#ifdef CONFIG_SMP
595static struct irq_chip mpic_ipi_chip = {
596 .mask = mpic_mask_ipi,
597 .unmask = mpic_unmask_ipi,
598 .eoi = mpic_end_ipi,
599};
600#endif /* CONFIG_SMP */
601
602#ifdef CONFIG_MPIC_BROKEN_U3
603static struct irq_chip mpic_irq_ht_chip = {
604 .startup = mpic_startup_ht_irq,
605 .shutdown = mpic_shutdown_ht_irq,
606 .mask = mpic_mask_irq,
607 .unmask = mpic_unmask_ht_irq,
608 .eoi = mpic_end_ht_irq,
609};
610#endif /* CONFIG_MPIC_BROKEN_U3 */
611
14cf11af 612
0ebfff14
BH
613static int mpic_host_match(struct irq_host *h, struct device_node *node)
614{
615 struct mpic *mpic = h->host_data;
616
617 /* Exact match, unless mpic node is NULL */
618 return mpic->of_node == NULL || mpic->of_node == node;
619}
620
621static int mpic_host_map(struct irq_host *h, unsigned int virq,
622 irq_hw_number_t hw, unsigned int flags)
623{
624 struct irq_desc *desc = get_irq_desc(virq);
625 struct irq_chip *chip;
626 struct mpic *mpic = h->host_data;
627 unsigned int vecpri = MPIC_VECPRI_SENSE_LEVEL |
628 MPIC_VECPRI_POLARITY_NEGATIVE;
629 int level;
630
631 pr_debug("mpic: map virq %d, hwirq 0x%lx, flags: 0x%x\n",
632 virq, hw, flags);
633
634 if (hw == MPIC_VEC_SPURRIOUS)
635 return -EINVAL;
636#ifdef CONFIG_SMP
637 else if (hw >= MPIC_VEC_IPI_0) {
638 WARN_ON(!(mpic->flags & MPIC_PRIMARY));
639
640 pr_debug("mpic: mapping as IPI\n");
641 set_irq_chip_data(virq, mpic);
642 set_irq_chip_and_handler(virq, &mpic->hc_ipi,
643 handle_percpu_irq);
644 return 0;
645 }
646#endif /* CONFIG_SMP */
647
648 if (hw >= mpic->irq_count)
649 return -EINVAL;
650
651 /* If no sense provided, check default sense array */
652 if (((flags & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_NONE) &&
653 mpic->senses && hw < mpic->senses_count)
654 flags |= mpic->senses[hw];
655
656 vecpri = mpic_flags_to_vecpri(flags, &level);
657 if (level)
658 desc->status |= IRQ_LEVEL;
659 chip = &mpic->hc_irq;
660
661#ifdef CONFIG_MPIC_BROKEN_U3
662 /* Check for HT interrupts, override vecpri */
663 if (mpic_is_ht_interrupt(mpic, hw)) {
664 vecpri &= ~(MPIC_VECPRI_SENSE_MASK |
665 MPIC_VECPRI_POLARITY_MASK);
666 vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
667 chip = &mpic->hc_ht_irq;
668 }
669#endif
670
671 /* Reconfigure irq */
672 vecpri |= MPIC_VECPRI_MASK | hw | (8 << MPIC_VECPRI_PRIORITY_SHIFT);
673 mpic_irq_write(hw, MPIC_IRQ_VECTOR_PRI, vecpri);
674
675 pr_debug("mpic: mapping as IRQ\n");
676
677 set_irq_chip_data(virq, mpic);
678 set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq);
679 return 0;
680}
681
682static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,
683 u32 *intspec, unsigned int intsize,
684 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
685
686{
687 static unsigned char map_mpic_senses[4] = {
688 IRQ_TYPE_EDGE_RISING,
689 IRQ_TYPE_LEVEL_LOW,
690 IRQ_TYPE_LEVEL_HIGH,
691 IRQ_TYPE_EDGE_FALLING,
692 };
693
694 *out_hwirq = intspec[0];
695 if (intsize > 1 && intspec[1] < 4)
696 *out_flags = map_mpic_senses[intspec[1]];
697 else
698 *out_flags = IRQ_TYPE_NONE;
699
700 return 0;
701}
702
703static struct irq_host_ops mpic_host_ops = {
704 .match = mpic_host_match,
705 .map = mpic_host_map,
706 .xlate = mpic_host_xlate,
707};
708
14cf11af
PM
709/*
710 * Exported functions
711 */
712
0ebfff14
BH
713struct mpic * __init mpic_alloc(struct device_node *node,
714 unsigned long phys_addr,
14cf11af
PM
715 unsigned int flags,
716 unsigned int isu_size,
14cf11af 717 unsigned int irq_count,
14cf11af
PM
718 const char *name)
719{
720 struct mpic *mpic;
721 u32 reg;
722 const char *vers;
723 int i;
724
725 mpic = alloc_bootmem(sizeof(struct mpic));
726 if (mpic == NULL)
727 return NULL;
728
14cf11af
PM
729 memset(mpic, 0, sizeof(struct mpic));
730 mpic->name = name;
0ebfff14 731 mpic->of_node = node ? of_node_get(node) : NULL;
14cf11af 732
0ebfff14
BH
733 mpic->irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR, 256,
734 &mpic_host_ops,
735 MPIC_VEC_SPURRIOUS);
736 if (mpic->irqhost == NULL) {
737 of_node_put(node);
738 return NULL;
739 }
740
741 mpic->irqhost->host_data = mpic;
b9e5b4e6 742 mpic->hc_irq = mpic_irq_chip;
14cf11af 743 mpic->hc_irq.typename = name;
14cf11af
PM
744 if (flags & MPIC_PRIMARY)
745 mpic->hc_irq.set_affinity = mpic_set_affinity;
b9e5b4e6
BH
746#ifdef CONFIG_MPIC_BROKEN_U3
747 mpic->hc_ht_irq = mpic_irq_ht_chip;
748 mpic->hc_ht_irq.typename = name;
749 if (flags & MPIC_PRIMARY)
750 mpic->hc_ht_irq.set_affinity = mpic_set_affinity;
751#endif /* CONFIG_MPIC_BROKEN_U3 */
14cf11af 752#ifdef CONFIG_SMP
b9e5b4e6 753 mpic->hc_ipi = mpic_ipi_chip;
0ebfff14 754 mpic->hc_ipi.typename = name;
14cf11af
PM
755#endif /* CONFIG_SMP */
756
757 mpic->flags = flags;
758 mpic->isu_size = isu_size;
14cf11af 759 mpic->irq_count = irq_count;
14cf11af 760 mpic->num_sources = 0; /* so far */
14cf11af
PM
761
762 /* Map the global registers */
763 mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000);
bd561c79 764 mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2);
14cf11af
PM
765 BUG_ON(mpic->gregs == NULL);
766
767 /* Reset */
768 if (flags & MPIC_WANTS_RESET) {
769 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
770 mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
771 | MPIC_GREG_GCONF_RESET);
772 while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
773 & MPIC_GREG_GCONF_RESET)
774 mb();
775 }
776
777 /* Read feature register, calculate num CPUs and, for non-ISU
778 * MPICs, num sources as well. On ISU MPICs, sources are counted
779 * as ISUs are added
780 */
781 reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0);
782 mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK)
783 >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1;
784 if (isu_size == 0)
785 mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK)
786 >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1;
787
788 /* Map the per-CPU registers */
789 for (i = 0; i < mpic->num_cpus; i++) {
790 mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE +
791 i * MPIC_CPU_STRIDE, 0x1000);
792 BUG_ON(mpic->cpuregs[i] == NULL);
793 }
794
795 /* Initialize main ISU if none provided */
796 if (mpic->isu_size == 0) {
797 mpic->isu_size = mpic->num_sources;
798 mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE,
799 MPIC_IRQ_STRIDE * mpic->isu_size);
800 BUG_ON(mpic->isus[0] == NULL);
801 }
802 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
803 mpic->isu_mask = (1 << mpic->isu_shift) - 1;
804
805 /* Display version */
806 switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) {
807 case 1:
808 vers = "1.0";
809 break;
810 case 2:
811 vers = "1.2";
812 break;
813 case 3:
814 vers = "1.3";
815 break;
816 default:
817 vers = "<unknown>";
818 break;
819 }
820 printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n",
821 name, vers, phys_addr, mpic->num_cpus);
822 printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size,
823 mpic->isu_shift, mpic->isu_mask);
824
825 mpic->next = mpics;
826 mpics = mpic;
827
0ebfff14 828 if (flags & MPIC_PRIMARY) {
14cf11af 829 mpic_primary = mpic;
0ebfff14
BH
830 irq_set_default_host(mpic->irqhost);
831 }
14cf11af
PM
832
833 return mpic;
834}
835
836void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
837 unsigned long phys_addr)
838{
839 unsigned int isu_first = isu_num * mpic->isu_size;
840
841 BUG_ON(isu_num >= MPIC_MAX_ISU);
842
843 mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size);
844 if ((isu_first + mpic->isu_size) > mpic->num_sources)
845 mpic->num_sources = isu_first + mpic->isu_size;
846}
847
0ebfff14
BH
848void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count)
849{
850 mpic->senses = senses;
851 mpic->senses_count = count;
852}
853
14cf11af
PM
854void __init mpic_init(struct mpic *mpic)
855{
856 int i;
857
858 BUG_ON(mpic->num_sources == 0);
0ebfff14
BH
859 WARN_ON(mpic->num_sources > MPIC_VEC_IPI_0);
860
861 /* Sanitize source count */
862 if (mpic->num_sources > MPIC_VEC_IPI_0)
863 mpic->num_sources = MPIC_VEC_IPI_0;
14cf11af
PM
864
865 printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
866
867 /* Set current processor priority to max */
868 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
869
870 /* Initialize timers: just disable them all */
871 for (i = 0; i < 4; i++) {
872 mpic_write(mpic->tmregs,
873 i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0);
874 mpic_write(mpic->tmregs,
875 i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI,
876 MPIC_VECPRI_MASK |
877 (MPIC_VEC_TIMER_0 + i));
878 }
879
880 /* Initialize IPIs to our reserved vectors and mark them disabled for now */
881 mpic_test_broken_ipi(mpic);
882 for (i = 0; i < 4; i++) {
883 mpic_ipi_write(i,
884 MPIC_VECPRI_MASK |
885 (10 << MPIC_VECPRI_PRIORITY_SHIFT) |
886 (MPIC_VEC_IPI_0 + i));
14cf11af
PM
887 }
888
889 /* Initialize interrupt sources */
890 if (mpic->irq_count == 0)
891 mpic->irq_count = mpic->num_sources;
892
893#ifdef CONFIG_MPIC_BROKEN_U3
1beb6a7d 894 /* Do the HT PIC fixups on U3 broken mpic */
14cf11af
PM
895 DBG("MPIC flags: %x\n", mpic->flags);
896 if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY))
b9e5b4e6 897 mpic_scan_ht_pics(mpic);
14cf11af
PM
898#endif /* CONFIG_MPIC_BROKEN_U3 */
899
900 for (i = 0; i < mpic->num_sources; i++) {
901 /* start with vector = source number, and masked */
902 u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT);
0ebfff14 903 int level = 1;
14cf11af 904
14cf11af 905 /* do senses munging */
0ebfff14
BH
906 if (mpic->senses && i < mpic->senses_count)
907 vecpri = mpic_flags_to_vecpri(mpic->senses[i],
908 &level);
909 else
14cf11af
PM
910 vecpri |= MPIC_VECPRI_SENSE_LEVEL;
911
14cf11af
PM
912 /* deal with broken U3 */
913 if (mpic->flags & MPIC_BROKEN_U3) {
914#ifdef CONFIG_MPIC_BROKEN_U3
915 if (mpic_is_ht_interrupt(mpic, i)) {
916 vecpri &= ~(MPIC_VECPRI_SENSE_MASK |
917 MPIC_VECPRI_POLARITY_MASK);
918 vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
919 }
920#else
921 printk(KERN_ERR "mpic: BROKEN_U3 set, but CONFIG doesn't match\n");
922#endif
923 }
924
925 DBG("setup source %d, vecpri: %08x, level: %d\n", i, vecpri,
926 (level != 0));
927
928 /* init hw */
929 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
930 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
931 1 << hard_smp_processor_id());
14cf11af
PM
932 }
933
934 /* Init spurrious vector */
935 mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS);
936
937 /* Disable 8259 passthrough */
938 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
939 mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
940 | MPIC_GREG_GCONF_8259_PTHROU_DIS);
941
942 /* Set current processor priority to 0 */
943 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
944}
945
868ea0c9
MG
946void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
947{
948 u32 v;
949
950 v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
951 v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK;
952 v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio);
953 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
954}
14cf11af 955
868ea0c9
MG
956void __init mpic_set_serial_int(struct mpic *mpic, int enable)
957{
958 u32 v;
959
960 v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
961 if (enable)
962 v |= MPIC_GREG_GLOBAL_CONF_1_SIE;
963 else
964 v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE;
965 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
966}
14cf11af
PM
967
968void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
969{
970 int is_ipi;
971 struct mpic *mpic = mpic_find(irq, &is_ipi);
0ebfff14 972 unsigned int src = mpic_irq_to_hw(irq);
14cf11af
PM
973 unsigned long flags;
974 u32 reg;
975
976 spin_lock_irqsave(&mpic_lock, flags);
977 if (is_ipi) {
0ebfff14 978 reg = mpic_ipi_read(src - MPIC_VEC_IPI_0) &
e5356640 979 ~MPIC_VECPRI_PRIORITY_MASK;
0ebfff14 980 mpic_ipi_write(src - MPIC_VEC_IPI_0,
14cf11af
PM
981 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
982 } else {
0ebfff14 983 reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI)
e5356640 984 & ~MPIC_VECPRI_PRIORITY_MASK;
0ebfff14 985 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
14cf11af
PM
986 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
987 }
988 spin_unlock_irqrestore(&mpic_lock, flags);
989}
990
991unsigned int mpic_irq_get_priority(unsigned int irq)
992{
993 int is_ipi;
994 struct mpic *mpic = mpic_find(irq, &is_ipi);
0ebfff14 995 unsigned int src = mpic_irq_to_hw(irq);
14cf11af
PM
996 unsigned long flags;
997 u32 reg;
998
999 spin_lock_irqsave(&mpic_lock, flags);
1000 if (is_ipi)
0ebfff14 1001 reg = mpic_ipi_read(src = MPIC_VEC_IPI_0);
14cf11af 1002 else
0ebfff14 1003 reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI);
14cf11af
PM
1004 spin_unlock_irqrestore(&mpic_lock, flags);
1005 return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT;
1006}
1007
1008void mpic_setup_this_cpu(void)
1009{
1010#ifdef CONFIG_SMP
1011 struct mpic *mpic = mpic_primary;
1012 unsigned long flags;
1013 u32 msk = 1 << hard_smp_processor_id();
1014 unsigned int i;
1015
1016 BUG_ON(mpic == NULL);
1017
1018 DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
1019
1020 spin_lock_irqsave(&mpic_lock, flags);
1021
1022 /* let the mpic know we want intrs. default affinity is 0xffffffff
1023 * until changed via /proc. That's how it's done on x86. If we want
1024 * it differently, then we should make sure we also change the default
a53da52f 1025 * values of irq_desc[].affinity in irq.c.
14cf11af
PM
1026 */
1027 if (distribute_irqs) {
1028 for (i = 0; i < mpic->num_sources ; i++)
1029 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
1030 mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk);
1031 }
1032
1033 /* Set current processor priority to 0 */
1034 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
1035
1036 spin_unlock_irqrestore(&mpic_lock, flags);
1037#endif /* CONFIG_SMP */
1038}
1039
1040int mpic_cpu_get_priority(void)
1041{
1042 struct mpic *mpic = mpic_primary;
1043
1044 return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI);
1045}
1046
1047void mpic_cpu_set_priority(int prio)
1048{
1049 struct mpic *mpic = mpic_primary;
1050
1051 prio &= MPIC_CPU_TASKPRI_MASK;
1052 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio);
1053}
1054
1055/*
1056 * XXX: someone who knows mpic should check this.
1057 * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
1058 * or can we reset the mpic in the new kernel?
1059 */
1060void mpic_teardown_this_cpu(int secondary)
1061{
1062 struct mpic *mpic = mpic_primary;
1063 unsigned long flags;
1064 u32 msk = 1 << hard_smp_processor_id();
1065 unsigned int i;
1066
1067 BUG_ON(mpic == NULL);
1068
1069 DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
1070 spin_lock_irqsave(&mpic_lock, flags);
1071
1072 /* let the mpic know we don't want intrs. */
1073 for (i = 0; i < mpic->num_sources ; i++)
1074 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
1075 mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk);
1076
1077 /* Set current processor priority to max */
1078 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
1079
1080 spin_unlock_irqrestore(&mpic_lock, flags);
1081}
1082
1083
1084void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
1085{
1086 struct mpic *mpic = mpic_primary;
1087
1088 BUG_ON(mpic == NULL);
1089
1beb6a7d 1090#ifdef DEBUG_IPI
14cf11af 1091 DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
1beb6a7d 1092#endif
14cf11af
PM
1093
1094 mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10,
1095 mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
1096}
1097
0ebfff14 1098unsigned int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
14cf11af 1099{
0ebfff14 1100 u32 src;
14cf11af 1101
0ebfff14 1102 src = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
1beb6a7d 1103#ifdef DEBUG_LOW
0ebfff14 1104 DBG("%s: get_one_irq(): %d\n", mpic->name, src);
1beb6a7d 1105#endif
0ebfff14
BH
1106 if (unlikely(src == MPIC_VEC_SPURRIOUS))
1107 return NO_IRQ;
1108 return irq_linear_revmap(mpic->irqhost, src);
14cf11af
PM
1109}
1110
0ebfff14 1111unsigned int mpic_get_irq(struct pt_regs *regs)
14cf11af
PM
1112{
1113 struct mpic *mpic = mpic_primary;
1114
1115 BUG_ON(mpic == NULL);
1116
1117 return mpic_get_one_irq(mpic, regs);
1118}
1119
1120
1121#ifdef CONFIG_SMP
1122void mpic_request_ipis(void)
1123{
1124 struct mpic *mpic = mpic_primary;
0ebfff14
BH
1125 int i;
1126 static char *ipi_names[] = {
1127 "IPI0 (call function)",
1128 "IPI1 (reschedule)",
1129 "IPI2 (unused)",
1130 "IPI3 (debugger break)",
1131 };
14cf11af 1132 BUG_ON(mpic == NULL);
14cf11af 1133
0ebfff14
BH
1134 printk(KERN_INFO "mpic: requesting IPIs ... \n");
1135
1136 for (i = 0; i < 4; i++) {
1137 unsigned int vipi = irq_create_mapping(mpic->irqhost,
1138 MPIC_VEC_IPI_0 + i, 0);
1139 if (vipi == NO_IRQ) {
1140 printk(KERN_ERR "Failed to map IPI %d\n", i);
1141 break;
1142 }
1143 request_irq(vipi, mpic_ipi_action, IRQF_DISABLED,
1144 ipi_names[i], mpic);
1145 }
14cf11af 1146}
a9c59264
PM
1147
1148void smp_mpic_message_pass(int target, int msg)
1149{
1150 /* make sure we're sending something that translates to an IPI */
1151 if ((unsigned int)msg > 3) {
1152 printk("SMP %d: smp_message_pass: unknown msg %d\n",
1153 smp_processor_id(), msg);
1154 return;
1155 }
1156 switch (target) {
1157 case MSG_ALL:
1158 mpic_send_ipi(msg, 0xffffffff);
1159 break;
1160 case MSG_ALL_BUT_SELF:
1161 mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
1162 break;
1163 default:
1164 mpic_send_ipi(msg, 1 << target);
1165 break;
1166 }
1167}
14cf11af 1168#endif /* CONFIG_SMP */
This page took 0.188189 seconds and 5 git commands to generate.