Commit | Line | Data |
---|---|---|
14cf11af PM |
1 | /* |
2 | * arch/powerpc/kernel/mpic.c | |
3 | * | |
4 | * Driver for interrupt controllers following the OpenPIC standard, the | |
5 | * common implementation beeing IBM's MPIC. This driver also can deal | |
6 | * with various broken implementations of this HW. | |
7 | * | |
8 | * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. | |
9 | * | |
10 | * This file is subject to the terms and conditions of the GNU General Public | |
11 | * License. See the file COPYING in the main directory of this archive | |
12 | * for more details. | |
13 | */ | |
14 | ||
15 | #undef DEBUG | |
1beb6a7d BH |
16 | #undef DEBUG_IPI |
17 | #undef DEBUG_IRQ | |
18 | #undef DEBUG_LOW | |
14cf11af PM |
19 | |
20 | #include <linux/config.h> | |
21 | #include <linux/types.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/irq.h> | |
25 | #include <linux/smp.h> | |
26 | #include <linux/interrupt.h> | |
27 | #include <linux/bootmem.h> | |
28 | #include <linux/spinlock.h> | |
29 | #include <linux/pci.h> | |
30 | ||
31 | #include <asm/ptrace.h> | |
32 | #include <asm/signal.h> | |
33 | #include <asm/io.h> | |
34 | #include <asm/pgtable.h> | |
35 | #include <asm/irq.h> | |
36 | #include <asm/machdep.h> | |
37 | #include <asm/mpic.h> | |
38 | #include <asm/smp.h> | |
39 | ||
40 | #ifdef DEBUG | |
41 | #define DBG(fmt...) printk(fmt) | |
42 | #else | |
43 | #define DBG(fmt...) | |
44 | #endif | |
45 | ||
46 | static struct mpic *mpics; | |
47 | static struct mpic *mpic_primary; | |
48 | static DEFINE_SPINLOCK(mpic_lock); | |
49 | ||
c0c0d996 | 50 | #ifdef CONFIG_PPC32 /* XXX for now */ |
e40c7f02 AW |
51 | #ifdef CONFIG_IRQ_ALL_CPUS |
52 | #define distribute_irqs (1) | |
53 | #else | |
54 | #define distribute_irqs (0) | |
55 | #endif | |
c0c0d996 | 56 | #endif |
14cf11af PM |
57 | |
58 | /* | |
59 | * Register accessor functions | |
60 | */ | |
61 | ||
62 | ||
63 | static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base, | |
64 | unsigned int reg) | |
65 | { | |
66 | if (be) | |
67 | return in_be32(base + (reg >> 2)); | |
68 | else | |
69 | return in_le32(base + (reg >> 2)); | |
70 | } | |
71 | ||
72 | static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base, | |
73 | unsigned int reg, u32 value) | |
74 | { | |
75 | if (be) | |
76 | out_be32(base + (reg >> 2), value); | |
77 | else | |
78 | out_le32(base + (reg >> 2), value); | |
79 | } | |
80 | ||
81 | static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi) | |
82 | { | |
83 | unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0; | |
84 | unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10); | |
85 | ||
86 | if (mpic->flags & MPIC_BROKEN_IPI) | |
87 | be = !be; | |
88 | return _mpic_read(be, mpic->gregs, offset); | |
89 | } | |
90 | ||
91 | static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value) | |
92 | { | |
93 | unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10); | |
94 | ||
95 | _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value); | |
96 | } | |
97 | ||
98 | static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg) | |
99 | { | |
100 | unsigned int cpu = 0; | |
101 | ||
102 | if (mpic->flags & MPIC_PRIMARY) | |
103 | cpu = hard_smp_processor_id(); | |
104 | ||
105 | return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg); | |
106 | } | |
107 | ||
108 | static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value) | |
109 | { | |
110 | unsigned int cpu = 0; | |
111 | ||
112 | if (mpic->flags & MPIC_PRIMARY) | |
113 | cpu = hard_smp_processor_id(); | |
114 | ||
115 | _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value); | |
116 | } | |
117 | ||
118 | static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg) | |
119 | { | |
120 | unsigned int isu = src_no >> mpic->isu_shift; | |
121 | unsigned int idx = src_no & mpic->isu_mask; | |
122 | ||
123 | return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu], | |
124 | reg + (idx * MPIC_IRQ_STRIDE)); | |
125 | } | |
126 | ||
127 | static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no, | |
128 | unsigned int reg, u32 value) | |
129 | { | |
130 | unsigned int isu = src_no >> mpic->isu_shift; | |
131 | unsigned int idx = src_no & mpic->isu_mask; | |
132 | ||
133 | _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu], | |
134 | reg + (idx * MPIC_IRQ_STRIDE), value); | |
135 | } | |
136 | ||
137 | #define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r)) | |
138 | #define mpic_write(b,r,v) _mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v)) | |
139 | #define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i)) | |
140 | #define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v)) | |
141 | #define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i)) | |
142 | #define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v)) | |
143 | #define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r)) | |
144 | #define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v)) | |
145 | ||
146 | ||
147 | /* | |
148 | * Low level utility functions | |
149 | */ | |
150 | ||
151 | ||
152 | ||
153 | /* Check if we have one of those nice broken MPICs with a flipped endian on | |
154 | * reads from IPI registers | |
155 | */ | |
156 | static void __init mpic_test_broken_ipi(struct mpic *mpic) | |
157 | { | |
158 | u32 r; | |
159 | ||
160 | mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK); | |
161 | r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0); | |
162 | ||
163 | if (r == le32_to_cpu(MPIC_VECPRI_MASK)) { | |
164 | printk(KERN_INFO "mpic: Detected reversed IPI registers\n"); | |
165 | mpic->flags |= MPIC_BROKEN_IPI; | |
166 | } | |
167 | } | |
168 | ||
169 | #ifdef CONFIG_MPIC_BROKEN_U3 | |
170 | ||
171 | /* Test if an interrupt is sourced from HyperTransport (used on broken U3s) | |
172 | * to force the edge setting on the MPIC and do the ack workaround. | |
173 | */ | |
1beb6a7d | 174 | static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source) |
14cf11af | 175 | { |
1beb6a7d | 176 | if (source >= 128 || !mpic->fixups) |
14cf11af | 177 | return 0; |
1beb6a7d | 178 | return mpic->fixups[source].base != NULL; |
14cf11af PM |
179 | } |
180 | ||
c4b22f26 | 181 | |
1beb6a7d | 182 | static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source) |
14cf11af | 183 | { |
1beb6a7d | 184 | struct mpic_irq_fixup *fixup = &mpic->fixups[source]; |
14cf11af | 185 | |
1beb6a7d BH |
186 | if (fixup->applebase) { |
187 | unsigned int soff = (fixup->index >> 3) & ~3; | |
188 | unsigned int mask = 1U << (fixup->index & 0x1f); | |
189 | writel(mask, fixup->applebase + soff); | |
190 | } else { | |
191 | spin_lock(&mpic->fixup_lock); | |
192 | writeb(0x11 + 2 * fixup->index, fixup->base + 2); | |
193 | writel(fixup->data, fixup->base + 4); | |
194 | spin_unlock(&mpic->fixup_lock); | |
195 | } | |
14cf11af PM |
196 | } |
197 | ||
1beb6a7d BH |
198 | static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, |
199 | unsigned int irqflags) | |
200 | { | |
201 | struct mpic_irq_fixup *fixup = &mpic->fixups[source]; | |
202 | unsigned long flags; | |
203 | u32 tmp; | |
204 | ||
205 | if (fixup->base == NULL) | |
206 | return; | |
207 | ||
208 | DBG("startup_ht_interrupt(%u, %u) index: %d\n", | |
209 | source, irqflags, fixup->index); | |
210 | spin_lock_irqsave(&mpic->fixup_lock, flags); | |
211 | /* Enable and configure */ | |
212 | writeb(0x10 + 2 * fixup->index, fixup->base + 2); | |
213 | tmp = readl(fixup->base + 4); | |
214 | tmp &= ~(0x23U); | |
215 | if (irqflags & IRQ_LEVEL) | |
216 | tmp |= 0x22; | |
217 | writel(tmp, fixup->base + 4); | |
218 | spin_unlock_irqrestore(&mpic->fixup_lock, flags); | |
219 | } | |
220 | ||
221 | static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source, | |
222 | unsigned int irqflags) | |
223 | { | |
224 | struct mpic_irq_fixup *fixup = &mpic->fixups[source]; | |
225 | unsigned long flags; | |
226 | u32 tmp; | |
227 | ||
228 | if (fixup->base == NULL) | |
229 | return; | |
230 | ||
231 | DBG("shutdown_ht_interrupt(%u, %u)\n", source, irqflags); | |
232 | ||
233 | /* Disable */ | |
234 | spin_lock_irqsave(&mpic->fixup_lock, flags); | |
235 | writeb(0x10 + 2 * fixup->index, fixup->base + 2); | |
236 | tmp = readl(fixup->base + 4); | |
72b13819 | 237 | tmp |= 1; |
1beb6a7d BH |
238 | writel(tmp, fixup->base + 4); |
239 | spin_unlock_irqrestore(&mpic->fixup_lock, flags); | |
240 | } | |
14cf11af | 241 | |
1beb6a7d BH |
242 | static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase, |
243 | unsigned int devfn, u32 vdid) | |
14cf11af | 244 | { |
c4b22f26 | 245 | int i, irq, n; |
1beb6a7d | 246 | u8 __iomem *base; |
14cf11af | 247 | u32 tmp; |
c4b22f26 | 248 | u8 pos; |
14cf11af | 249 | |
1beb6a7d BH |
250 | for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0; |
251 | pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) { | |
252 | u8 id = readb(devbase + pos + PCI_CAP_LIST_ID); | |
253 | if (id == PCI_CAP_ID_HT_IRQCONF) { | |
c4b22f26 SB |
254 | id = readb(devbase + pos + 3); |
255 | if (id == 0x80) | |
256 | break; | |
257 | } | |
14cf11af | 258 | } |
c4b22f26 SB |
259 | if (pos == 0) |
260 | return; | |
261 | ||
1beb6a7d BH |
262 | base = devbase + pos; |
263 | writeb(0x01, base + 2); | |
264 | n = (readl(base + 4) >> 16) & 0xff; | |
14cf11af | 265 | |
1beb6a7d BH |
266 | printk(KERN_INFO "mpic: - HT:%02x.%x [0x%02x] vendor %04x device %04x" |
267 | " has %d irqs\n", | |
268 | devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1); | |
c4b22f26 SB |
269 | |
270 | for (i = 0; i <= n; i++) { | |
1beb6a7d BH |
271 | writeb(0x10 + 2 * i, base + 2); |
272 | tmp = readl(base + 4); | |
14cf11af | 273 | irq = (tmp >> 16) & 0xff; |
1beb6a7d BH |
274 | DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp); |
275 | /* mask it , will be unmasked later */ | |
276 | tmp |= 0x1; | |
277 | writel(tmp, base + 4); | |
278 | mpic->fixups[irq].index = i; | |
279 | mpic->fixups[irq].base = base; | |
280 | /* Apple HT PIC has a non-standard way of doing EOIs */ | |
281 | if ((vdid & 0xffff) == 0x106b) | |
282 | mpic->fixups[irq].applebase = devbase + 0x60; | |
283 | else | |
284 | mpic->fixups[irq].applebase = NULL; | |
285 | writeb(0x11 + 2 * i, base + 2); | |
286 | mpic->fixups[irq].data = readl(base + 4) | 0x80000000; | |
14cf11af PM |
287 | } |
288 | } | |
289 | ||
c4b22f26 | 290 | |
1beb6a7d | 291 | static void __init mpic_scan_ht_pics(struct mpic *mpic) |
14cf11af PM |
292 | { |
293 | unsigned int devfn; | |
294 | u8 __iomem *cfgspace; | |
295 | ||
1beb6a7d | 296 | printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n"); |
14cf11af PM |
297 | |
298 | /* Allocate fixups array */ | |
299 | mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup)); | |
300 | BUG_ON(mpic->fixups == NULL); | |
301 | memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup)); | |
302 | ||
303 | /* Init spinlock */ | |
304 | spin_lock_init(&mpic->fixup_lock); | |
305 | ||
c4b22f26 SB |
306 | /* Map U3 config space. We assume all IO-APICs are on the primary bus |
307 | * so we only need to map 64kB. | |
14cf11af | 308 | */ |
c4b22f26 | 309 | cfgspace = ioremap(0xf2000000, 0x10000); |
14cf11af PM |
310 | BUG_ON(cfgspace == NULL); |
311 | ||
1beb6a7d BH |
312 | /* Now we scan all slots. We do a very quick scan, we read the header |
313 | * type, vendor ID and device ID only, that's plenty enough | |
14cf11af | 314 | */ |
c4b22f26 | 315 | for (devfn = 0; devfn < 0x100; devfn++) { |
14cf11af PM |
316 | u8 __iomem *devbase = cfgspace + (devfn << 8); |
317 | u8 hdr_type = readb(devbase + PCI_HEADER_TYPE); | |
318 | u32 l = readl(devbase + PCI_VENDOR_ID); | |
1beb6a7d | 319 | u16 s; |
14cf11af PM |
320 | |
321 | DBG("devfn %x, l: %x\n", devfn, l); | |
322 | ||
323 | /* If no device, skip */ | |
324 | if (l == 0xffffffff || l == 0x00000000 || | |
325 | l == 0x0000ffff || l == 0xffff0000) | |
326 | goto next; | |
1beb6a7d BH |
327 | /* Check if is supports capability lists */ |
328 | s = readw(devbase + PCI_STATUS); | |
329 | if (!(s & PCI_STATUS_CAP_LIST)) | |
330 | goto next; | |
14cf11af | 331 | |
1beb6a7d | 332 | mpic_scan_ht_pic(mpic, devbase, devfn, l); |
c4b22f26 | 333 | |
14cf11af PM |
334 | next: |
335 | /* next device, if function 0 */ | |
c4b22f26 | 336 | if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0) |
14cf11af PM |
337 | devfn += 7; |
338 | } | |
339 | } | |
340 | ||
341 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | |
342 | ||
343 | ||
344 | /* Find an mpic associated with a given linux interrupt */ | |
345 | static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi) | |
346 | { | |
347 | struct mpic *mpic = mpics; | |
348 | ||
349 | while(mpic) { | |
350 | /* search IPIs first since they may override the main interrupts */ | |
351 | if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) { | |
352 | if (is_ipi) | |
353 | *is_ipi = 1; | |
354 | return mpic; | |
355 | } | |
356 | if (irq >= mpic->irq_offset && | |
357 | irq < (mpic->irq_offset + mpic->irq_count)) { | |
358 | if (is_ipi) | |
359 | *is_ipi = 0; | |
360 | return mpic; | |
361 | } | |
362 | mpic = mpic -> next; | |
363 | } | |
364 | return NULL; | |
365 | } | |
366 | ||
367 | /* Convert a cpu mask from logical to physical cpu numbers. */ | |
368 | static inline u32 mpic_physmask(u32 cpumask) | |
369 | { | |
370 | int i; | |
371 | u32 mask = 0; | |
372 | ||
373 | for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1) | |
374 | mask |= (cpumask & 1) << get_hard_smp_processor_id(i); | |
375 | return mask; | |
376 | } | |
377 | ||
378 | #ifdef CONFIG_SMP | |
379 | /* Get the mpic structure from the IPI number */ | |
380 | static inline struct mpic * mpic_from_ipi(unsigned int ipi) | |
381 | { | |
d1bef4ed | 382 | return container_of(irq_desc[ipi].chip, struct mpic, hc_ipi); |
14cf11af PM |
383 | } |
384 | #endif | |
385 | ||
386 | /* Get the mpic structure from the irq number */ | |
387 | static inline struct mpic * mpic_from_irq(unsigned int irq) | |
388 | { | |
d1bef4ed | 389 | return container_of(irq_desc[irq].chip, struct mpic, hc_irq); |
14cf11af PM |
390 | } |
391 | ||
392 | /* Send an EOI */ | |
393 | static inline void mpic_eoi(struct mpic *mpic) | |
394 | { | |
395 | mpic_cpu_write(MPIC_CPU_EOI, 0); | |
396 | (void)mpic_cpu_read(MPIC_CPU_WHOAMI); | |
397 | } | |
398 | ||
399 | #ifdef CONFIG_SMP | |
400 | static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | |
401 | { | |
402 | struct mpic *mpic = dev_id; | |
403 | ||
404 | smp_message_recv(irq - mpic->ipi_offset, regs); | |
405 | return IRQ_HANDLED; | |
406 | } | |
407 | #endif /* CONFIG_SMP */ | |
408 | ||
409 | /* | |
410 | * Linux descriptor level callbacks | |
411 | */ | |
412 | ||
413 | ||
414 | static void mpic_enable_irq(unsigned int irq) | |
415 | { | |
416 | unsigned int loops = 100000; | |
417 | struct mpic *mpic = mpic_from_irq(irq); | |
418 | unsigned int src = irq - mpic->irq_offset; | |
419 | ||
bd561c79 | 420 | DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src); |
14cf11af PM |
421 | |
422 | mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, | |
e5356640 BH |
423 | mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & |
424 | ~MPIC_VECPRI_MASK); | |
14cf11af PM |
425 | |
426 | /* make sure mask gets to controller before we return to user */ | |
427 | do { | |
428 | if (!loops--) { | |
429 | printk(KERN_ERR "mpic_enable_irq timeout\n"); | |
430 | break; | |
431 | } | |
432 | } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK); | |
1beb6a7d BH |
433 | |
434 | #ifdef CONFIG_MPIC_BROKEN_U3 | |
435 | if (mpic->flags & MPIC_BROKEN_U3) { | |
436 | unsigned int src = irq - mpic->irq_offset; | |
437 | if (mpic_is_ht_interrupt(mpic, src) && | |
438 | (irq_desc[irq].status & IRQ_LEVEL)) | |
439 | mpic_ht_end_irq(mpic, src); | |
440 | } | |
441 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | |
442 | } | |
443 | ||
444 | static unsigned int mpic_startup_irq(unsigned int irq) | |
445 | { | |
446 | #ifdef CONFIG_MPIC_BROKEN_U3 | |
447 | struct mpic *mpic = mpic_from_irq(irq); | |
448 | unsigned int src = irq - mpic->irq_offset; | |
72b13819 SB |
449 | #endif /* CONFIG_MPIC_BROKEN_U3 */ |
450 | ||
451 | mpic_enable_irq(irq); | |
1beb6a7d | 452 | |
72b13819 | 453 | #ifdef CONFIG_MPIC_BROKEN_U3 |
1beb6a7d BH |
454 | if (mpic_is_ht_interrupt(mpic, src)) |
455 | mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status); | |
1beb6a7d BH |
456 | #endif /* CONFIG_MPIC_BROKEN_U3 */ |
457 | ||
1beb6a7d | 458 | return 0; |
14cf11af PM |
459 | } |
460 | ||
461 | static void mpic_disable_irq(unsigned int irq) | |
462 | { | |
463 | unsigned int loops = 100000; | |
464 | struct mpic *mpic = mpic_from_irq(irq); | |
465 | unsigned int src = irq - mpic->irq_offset; | |
466 | ||
467 | DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); | |
468 | ||
469 | mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, | |
e5356640 BH |
470 | mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) | |
471 | MPIC_VECPRI_MASK); | |
14cf11af PM |
472 | |
473 | /* make sure mask gets to controller before we return to user */ | |
474 | do { | |
475 | if (!loops--) { | |
476 | printk(KERN_ERR "mpic_enable_irq timeout\n"); | |
477 | break; | |
478 | } | |
479 | } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK)); | |
480 | } | |
481 | ||
1beb6a7d BH |
482 | static void mpic_shutdown_irq(unsigned int irq) |
483 | { | |
484 | #ifdef CONFIG_MPIC_BROKEN_U3 | |
485 | struct mpic *mpic = mpic_from_irq(irq); | |
486 | unsigned int src = irq - mpic->irq_offset; | |
487 | ||
488 | if (mpic_is_ht_interrupt(mpic, src)) | |
489 | mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status); | |
490 | ||
491 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | |
492 | ||
493 | mpic_disable_irq(irq); | |
494 | } | |
495 | ||
14cf11af PM |
496 | static void mpic_end_irq(unsigned int irq) |
497 | { | |
498 | struct mpic *mpic = mpic_from_irq(irq); | |
499 | ||
1beb6a7d | 500 | #ifdef DEBUG_IRQ |
14cf11af | 501 | DBG("%s: end_irq: %d\n", mpic->name, irq); |
1beb6a7d | 502 | #endif |
14cf11af PM |
503 | /* We always EOI on end_irq() even for edge interrupts since that |
504 | * should only lower the priority, the MPIC should have properly | |
505 | * latched another edge interrupt coming in anyway | |
506 | */ | |
507 | ||
508 | #ifdef CONFIG_MPIC_BROKEN_U3 | |
509 | if (mpic->flags & MPIC_BROKEN_U3) { | |
510 | unsigned int src = irq - mpic->irq_offset; | |
1beb6a7d BH |
511 | if (mpic_is_ht_interrupt(mpic, src) && |
512 | (irq_desc[irq].status & IRQ_LEVEL)) | |
513 | mpic_ht_end_irq(mpic, src); | |
14cf11af PM |
514 | } |
515 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | |
516 | ||
517 | mpic_eoi(mpic); | |
518 | } | |
519 | ||
520 | #ifdef CONFIG_SMP | |
521 | ||
522 | static void mpic_enable_ipi(unsigned int irq) | |
523 | { | |
524 | struct mpic *mpic = mpic_from_ipi(irq); | |
525 | unsigned int src = irq - mpic->ipi_offset; | |
526 | ||
527 | DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src); | |
528 | mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); | |
529 | } | |
530 | ||
531 | static void mpic_disable_ipi(unsigned int irq) | |
532 | { | |
533 | /* NEVER disable an IPI... that's just plain wrong! */ | |
534 | } | |
535 | ||
536 | static void mpic_end_ipi(unsigned int irq) | |
537 | { | |
538 | struct mpic *mpic = mpic_from_ipi(irq); | |
539 | ||
540 | /* | |
541 | * IPIs are marked IRQ_PER_CPU. This has the side effect of | |
542 | * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from | |
543 | * applying to them. We EOI them late to avoid re-entering. | |
544 | * We mark IPI's with SA_INTERRUPT as they must run with | |
545 | * irqs disabled. | |
546 | */ | |
547 | mpic_eoi(mpic); | |
548 | } | |
549 | ||
550 | #endif /* CONFIG_SMP */ | |
551 | ||
552 | static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) | |
553 | { | |
554 | struct mpic *mpic = mpic_from_irq(irq); | |
555 | ||
556 | cpumask_t tmp; | |
557 | ||
558 | cpus_and(tmp, cpumask, cpu_online_map); | |
559 | ||
560 | mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION, | |
561 | mpic_physmask(cpus_addr(tmp)[0])); | |
562 | } | |
563 | ||
564 | ||
565 | /* | |
566 | * Exported functions | |
567 | */ | |
568 | ||
569 | ||
570 | struct mpic * __init mpic_alloc(unsigned long phys_addr, | |
571 | unsigned int flags, | |
572 | unsigned int isu_size, | |
573 | unsigned int irq_offset, | |
574 | unsigned int irq_count, | |
575 | unsigned int ipi_offset, | |
576 | unsigned char *senses, | |
577 | unsigned int senses_count, | |
578 | const char *name) | |
579 | { | |
580 | struct mpic *mpic; | |
581 | u32 reg; | |
582 | const char *vers; | |
583 | int i; | |
584 | ||
585 | mpic = alloc_bootmem(sizeof(struct mpic)); | |
586 | if (mpic == NULL) | |
587 | return NULL; | |
588 | ||
589 | ||
590 | memset(mpic, 0, sizeof(struct mpic)); | |
591 | mpic->name = name; | |
592 | ||
593 | mpic->hc_irq.typename = name; | |
1beb6a7d BH |
594 | mpic->hc_irq.startup = mpic_startup_irq; |
595 | mpic->hc_irq.shutdown = mpic_shutdown_irq; | |
14cf11af PM |
596 | mpic->hc_irq.enable = mpic_enable_irq; |
597 | mpic->hc_irq.disable = mpic_disable_irq; | |
598 | mpic->hc_irq.end = mpic_end_irq; | |
599 | if (flags & MPIC_PRIMARY) | |
600 | mpic->hc_irq.set_affinity = mpic_set_affinity; | |
601 | #ifdef CONFIG_SMP | |
602 | mpic->hc_ipi.typename = name; | |
603 | mpic->hc_ipi.enable = mpic_enable_ipi; | |
604 | mpic->hc_ipi.disable = mpic_disable_ipi; | |
605 | mpic->hc_ipi.end = mpic_end_ipi; | |
606 | #endif /* CONFIG_SMP */ | |
607 | ||
608 | mpic->flags = flags; | |
609 | mpic->isu_size = isu_size; | |
610 | mpic->irq_offset = irq_offset; | |
611 | mpic->irq_count = irq_count; | |
612 | mpic->ipi_offset = ipi_offset; | |
613 | mpic->num_sources = 0; /* so far */ | |
614 | mpic->senses = senses; | |
615 | mpic->senses_count = senses_count; | |
616 | ||
617 | /* Map the global registers */ | |
618 | mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000); | |
bd561c79 | 619 | mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2); |
14cf11af PM |
620 | BUG_ON(mpic->gregs == NULL); |
621 | ||
622 | /* Reset */ | |
623 | if (flags & MPIC_WANTS_RESET) { | |
624 | mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0, | |
625 | mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) | |
626 | | MPIC_GREG_GCONF_RESET); | |
627 | while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) | |
628 | & MPIC_GREG_GCONF_RESET) | |
629 | mb(); | |
630 | } | |
631 | ||
632 | /* Read feature register, calculate num CPUs and, for non-ISU | |
633 | * MPICs, num sources as well. On ISU MPICs, sources are counted | |
634 | * as ISUs are added | |
635 | */ | |
636 | reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0); | |
637 | mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK) | |
638 | >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1; | |
639 | if (isu_size == 0) | |
640 | mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK) | |
641 | >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1; | |
642 | ||
643 | /* Map the per-CPU registers */ | |
644 | for (i = 0; i < mpic->num_cpus; i++) { | |
645 | mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE + | |
646 | i * MPIC_CPU_STRIDE, 0x1000); | |
647 | BUG_ON(mpic->cpuregs[i] == NULL); | |
648 | } | |
649 | ||
650 | /* Initialize main ISU if none provided */ | |
651 | if (mpic->isu_size == 0) { | |
652 | mpic->isu_size = mpic->num_sources; | |
653 | mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE, | |
654 | MPIC_IRQ_STRIDE * mpic->isu_size); | |
655 | BUG_ON(mpic->isus[0] == NULL); | |
656 | } | |
657 | mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); | |
658 | mpic->isu_mask = (1 << mpic->isu_shift) - 1; | |
659 | ||
660 | /* Display version */ | |
661 | switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) { | |
662 | case 1: | |
663 | vers = "1.0"; | |
664 | break; | |
665 | case 2: | |
666 | vers = "1.2"; | |
667 | break; | |
668 | case 3: | |
669 | vers = "1.3"; | |
670 | break; | |
671 | default: | |
672 | vers = "<unknown>"; | |
673 | break; | |
674 | } | |
675 | printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n", | |
676 | name, vers, phys_addr, mpic->num_cpus); | |
677 | printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size, | |
678 | mpic->isu_shift, mpic->isu_mask); | |
679 | ||
680 | mpic->next = mpics; | |
681 | mpics = mpic; | |
682 | ||
683 | if (flags & MPIC_PRIMARY) | |
684 | mpic_primary = mpic; | |
685 | ||
686 | return mpic; | |
687 | } | |
688 | ||
689 | void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, | |
690 | unsigned long phys_addr) | |
691 | { | |
692 | unsigned int isu_first = isu_num * mpic->isu_size; | |
693 | ||
694 | BUG_ON(isu_num >= MPIC_MAX_ISU); | |
695 | ||
696 | mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size); | |
697 | if ((isu_first + mpic->isu_size) > mpic->num_sources) | |
698 | mpic->num_sources = isu_first + mpic->isu_size; | |
699 | } | |
700 | ||
701 | void __init mpic_setup_cascade(unsigned int irq, mpic_cascade_t handler, | |
702 | void *data) | |
703 | { | |
704 | struct mpic *mpic = mpic_find(irq, NULL); | |
705 | unsigned long flags; | |
706 | ||
707 | /* Synchronization here is a bit dodgy, so don't try to replace cascade | |
708 | * interrupts on the fly too often ... but normally it's set up at boot. | |
709 | */ | |
710 | spin_lock_irqsave(&mpic_lock, flags); | |
711 | if (mpic->cascade) | |
712 | mpic_disable_irq(mpic->cascade_vec + mpic->irq_offset); | |
713 | mpic->cascade = NULL; | |
714 | wmb(); | |
715 | mpic->cascade_vec = irq - mpic->irq_offset; | |
716 | mpic->cascade_data = data; | |
717 | wmb(); | |
718 | mpic->cascade = handler; | |
719 | mpic_enable_irq(irq); | |
720 | spin_unlock_irqrestore(&mpic_lock, flags); | |
721 | } | |
722 | ||
723 | void __init mpic_init(struct mpic *mpic) | |
724 | { | |
725 | int i; | |
726 | ||
727 | BUG_ON(mpic->num_sources == 0); | |
728 | ||
729 | printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources); | |
730 | ||
731 | /* Set current processor priority to max */ | |
732 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf); | |
733 | ||
734 | /* Initialize timers: just disable them all */ | |
735 | for (i = 0; i < 4; i++) { | |
736 | mpic_write(mpic->tmregs, | |
737 | i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0); | |
738 | mpic_write(mpic->tmregs, | |
739 | i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI, | |
740 | MPIC_VECPRI_MASK | | |
741 | (MPIC_VEC_TIMER_0 + i)); | |
742 | } | |
743 | ||
744 | /* Initialize IPIs to our reserved vectors and mark them disabled for now */ | |
745 | mpic_test_broken_ipi(mpic); | |
746 | for (i = 0; i < 4; i++) { | |
747 | mpic_ipi_write(i, | |
748 | MPIC_VECPRI_MASK | | |
749 | (10 << MPIC_VECPRI_PRIORITY_SHIFT) | | |
750 | (MPIC_VEC_IPI_0 + i)); | |
751 | #ifdef CONFIG_SMP | |
752 | if (!(mpic->flags & MPIC_PRIMARY)) | |
753 | continue; | |
754 | irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU; | |
d1bef4ed | 755 | irq_desc[mpic->ipi_offset+i].chip = &mpic->hc_ipi; |
14cf11af PM |
756 | #endif /* CONFIG_SMP */ |
757 | } | |
758 | ||
759 | /* Initialize interrupt sources */ | |
760 | if (mpic->irq_count == 0) | |
761 | mpic->irq_count = mpic->num_sources; | |
762 | ||
763 | #ifdef CONFIG_MPIC_BROKEN_U3 | |
1beb6a7d | 764 | /* Do the HT PIC fixups on U3 broken mpic */ |
14cf11af PM |
765 | DBG("MPIC flags: %x\n", mpic->flags); |
766 | if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY)) | |
1beb6a7d | 767 | mpic_scan_ht_pics(mpic); |
14cf11af PM |
768 | #endif /* CONFIG_MPIC_BROKEN_U3 */ |
769 | ||
770 | for (i = 0; i < mpic->num_sources; i++) { | |
771 | /* start with vector = source number, and masked */ | |
772 | u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT); | |
773 | int level = 0; | |
774 | ||
775 | /* if it's an IPI, we skip it */ | |
776 | if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) && | |
777 | (mpic->irq_offset + i) < (mpic->ipi_offset + i + 4)) | |
778 | continue; | |
779 | ||
780 | /* do senses munging */ | |
781 | if (mpic->senses && i < mpic->senses_count) { | |
782 | if (mpic->senses[i] & IRQ_SENSE_LEVEL) | |
783 | vecpri |= MPIC_VECPRI_SENSE_LEVEL; | |
784 | if (mpic->senses[i] & IRQ_POLARITY_POSITIVE) | |
785 | vecpri |= MPIC_VECPRI_POLARITY_POSITIVE; | |
786 | } else | |
787 | vecpri |= MPIC_VECPRI_SENSE_LEVEL; | |
788 | ||
789 | /* remember if it was a level interrupts */ | |
790 | level = (vecpri & MPIC_VECPRI_SENSE_LEVEL); | |
791 | ||
792 | /* deal with broken U3 */ | |
793 | if (mpic->flags & MPIC_BROKEN_U3) { | |
794 | #ifdef CONFIG_MPIC_BROKEN_U3 | |
795 | if (mpic_is_ht_interrupt(mpic, i)) { | |
796 | vecpri &= ~(MPIC_VECPRI_SENSE_MASK | | |
797 | MPIC_VECPRI_POLARITY_MASK); | |
798 | vecpri |= MPIC_VECPRI_POLARITY_POSITIVE; | |
799 | } | |
800 | #else | |
801 | printk(KERN_ERR "mpic: BROKEN_U3 set, but CONFIG doesn't match\n"); | |
802 | #endif | |
803 | } | |
804 | ||
805 | DBG("setup source %d, vecpri: %08x, level: %d\n", i, vecpri, | |
806 | (level != 0)); | |
807 | ||
808 | /* init hw */ | |
809 | mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri); | |
810 | mpic_irq_write(i, MPIC_IRQ_DESTINATION, | |
811 | 1 << hard_smp_processor_id()); | |
812 | ||
813 | /* init linux descriptors */ | |
814 | if (i < mpic->irq_count) { | |
815 | irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0; | |
d1bef4ed | 816 | irq_desc[mpic->irq_offset+i].chip = &mpic->hc_irq; |
14cf11af PM |
817 | } |
818 | } | |
819 | ||
820 | /* Init spurrious vector */ | |
821 | mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS); | |
822 | ||
823 | /* Disable 8259 passthrough */ | |
824 | mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0, | |
825 | mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) | |
826 | | MPIC_GREG_GCONF_8259_PTHROU_DIS); | |
827 | ||
828 | /* Set current processor priority to 0 */ | |
829 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0); | |
830 | } | |
831 | ||
868ea0c9 MG |
832 | void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio) |
833 | { | |
834 | u32 v; | |
835 | ||
836 | v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1); | |
837 | v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK; | |
838 | v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio); | |
839 | mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v); | |
840 | } | |
14cf11af | 841 | |
868ea0c9 MG |
842 | void __init mpic_set_serial_int(struct mpic *mpic, int enable) |
843 | { | |
844 | u32 v; | |
845 | ||
846 | v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1); | |
847 | if (enable) | |
848 | v |= MPIC_GREG_GLOBAL_CONF_1_SIE; | |
849 | else | |
850 | v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE; | |
851 | mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v); | |
852 | } | |
14cf11af PM |
853 | |
854 | void mpic_irq_set_priority(unsigned int irq, unsigned int pri) | |
855 | { | |
856 | int is_ipi; | |
857 | struct mpic *mpic = mpic_find(irq, &is_ipi); | |
858 | unsigned long flags; | |
859 | u32 reg; | |
860 | ||
861 | spin_lock_irqsave(&mpic_lock, flags); | |
862 | if (is_ipi) { | |
e5356640 BH |
863 | reg = mpic_ipi_read(irq - mpic->ipi_offset) & |
864 | ~MPIC_VECPRI_PRIORITY_MASK; | |
14cf11af PM |
865 | mpic_ipi_write(irq - mpic->ipi_offset, |
866 | reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); | |
867 | } else { | |
e5356640 BH |
868 | reg = mpic_irq_read(irq - mpic->irq_offset,MPIC_IRQ_VECTOR_PRI) |
869 | & ~MPIC_VECPRI_PRIORITY_MASK; | |
14cf11af PM |
870 | mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI, |
871 | reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); | |
872 | } | |
873 | spin_unlock_irqrestore(&mpic_lock, flags); | |
874 | } | |
875 | ||
876 | unsigned int mpic_irq_get_priority(unsigned int irq) | |
877 | { | |
878 | int is_ipi; | |
879 | struct mpic *mpic = mpic_find(irq, &is_ipi); | |
880 | unsigned long flags; | |
881 | u32 reg; | |
882 | ||
883 | spin_lock_irqsave(&mpic_lock, flags); | |
884 | if (is_ipi) | |
885 | reg = mpic_ipi_read(irq - mpic->ipi_offset); | |
886 | else | |
887 | reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI); | |
888 | spin_unlock_irqrestore(&mpic_lock, flags); | |
889 | return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT; | |
890 | } | |
891 | ||
892 | void mpic_setup_this_cpu(void) | |
893 | { | |
894 | #ifdef CONFIG_SMP | |
895 | struct mpic *mpic = mpic_primary; | |
896 | unsigned long flags; | |
897 | u32 msk = 1 << hard_smp_processor_id(); | |
898 | unsigned int i; | |
899 | ||
900 | BUG_ON(mpic == NULL); | |
901 | ||
902 | DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); | |
903 | ||
904 | spin_lock_irqsave(&mpic_lock, flags); | |
905 | ||
906 | /* let the mpic know we want intrs. default affinity is 0xffffffff | |
907 | * until changed via /proc. That's how it's done on x86. If we want | |
908 | * it differently, then we should make sure we also change the default | |
909 | * values of irq_affinity in irq.c. | |
910 | */ | |
911 | if (distribute_irqs) { | |
912 | for (i = 0; i < mpic->num_sources ; i++) | |
913 | mpic_irq_write(i, MPIC_IRQ_DESTINATION, | |
914 | mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk); | |
915 | } | |
916 | ||
917 | /* Set current processor priority to 0 */ | |
918 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0); | |
919 | ||
920 | spin_unlock_irqrestore(&mpic_lock, flags); | |
921 | #endif /* CONFIG_SMP */ | |
922 | } | |
923 | ||
924 | int mpic_cpu_get_priority(void) | |
925 | { | |
926 | struct mpic *mpic = mpic_primary; | |
927 | ||
928 | return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI); | |
929 | } | |
930 | ||
931 | void mpic_cpu_set_priority(int prio) | |
932 | { | |
933 | struct mpic *mpic = mpic_primary; | |
934 | ||
935 | prio &= MPIC_CPU_TASKPRI_MASK; | |
936 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio); | |
937 | } | |
938 | ||
939 | /* | |
940 | * XXX: someone who knows mpic should check this. | |
941 | * do we need to eoi the ipi including for kexec cpu here (see xics comments)? | |
942 | * or can we reset the mpic in the new kernel? | |
943 | */ | |
944 | void mpic_teardown_this_cpu(int secondary) | |
945 | { | |
946 | struct mpic *mpic = mpic_primary; | |
947 | unsigned long flags; | |
948 | u32 msk = 1 << hard_smp_processor_id(); | |
949 | unsigned int i; | |
950 | ||
951 | BUG_ON(mpic == NULL); | |
952 | ||
953 | DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); | |
954 | spin_lock_irqsave(&mpic_lock, flags); | |
955 | ||
956 | /* let the mpic know we don't want intrs. */ | |
957 | for (i = 0; i < mpic->num_sources ; i++) | |
958 | mpic_irq_write(i, MPIC_IRQ_DESTINATION, | |
959 | mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk); | |
960 | ||
961 | /* Set current processor priority to max */ | |
962 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf); | |
963 | ||
964 | spin_unlock_irqrestore(&mpic_lock, flags); | |
965 | } | |
966 | ||
967 | ||
968 | void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask) | |
969 | { | |
970 | struct mpic *mpic = mpic_primary; | |
971 | ||
972 | BUG_ON(mpic == NULL); | |
973 | ||
1beb6a7d | 974 | #ifdef DEBUG_IPI |
14cf11af | 975 | DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no); |
1beb6a7d | 976 | #endif |
14cf11af PM |
977 | |
978 | mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10, | |
979 | mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0])); | |
980 | } | |
981 | ||
982 | int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs) | |
983 | { | |
984 | u32 irq; | |
985 | ||
986 | irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK; | |
1beb6a7d | 987 | #ifdef DEBUG_LOW |
14cf11af | 988 | DBG("%s: get_one_irq(): %d\n", mpic->name, irq); |
1beb6a7d | 989 | #endif |
14cf11af | 990 | if (mpic->cascade && irq == mpic->cascade_vec) { |
1beb6a7d | 991 | #ifdef DEBUG_LOW |
14cf11af | 992 | DBG("%s: cascading ...\n", mpic->name); |
1beb6a7d | 993 | #endif |
14cf11af PM |
994 | irq = mpic->cascade(regs, mpic->cascade_data); |
995 | mpic_eoi(mpic); | |
996 | return irq; | |
997 | } | |
998 | if (unlikely(irq == MPIC_VEC_SPURRIOUS)) | |
999 | return -1; | |
1beb6a7d BH |
1000 | if (irq < MPIC_VEC_IPI_0) { |
1001 | #ifdef DEBUG_IRQ | |
1002 | DBG("%s: irq %d\n", mpic->name, irq + mpic->irq_offset); | |
1003 | #endif | |
14cf11af | 1004 | return irq + mpic->irq_offset; |
1beb6a7d BH |
1005 | } |
1006 | #ifdef DEBUG_IPI | |
14cf11af | 1007 | DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0); |
1beb6a7d | 1008 | #endif |
14cf11af PM |
1009 | return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset; |
1010 | } | |
1011 | ||
1012 | int mpic_get_irq(struct pt_regs *regs) | |
1013 | { | |
1014 | struct mpic *mpic = mpic_primary; | |
1015 | ||
1016 | BUG_ON(mpic == NULL); | |
1017 | ||
1018 | return mpic_get_one_irq(mpic, regs); | |
1019 | } | |
1020 | ||
1021 | ||
1022 | #ifdef CONFIG_SMP | |
1023 | void mpic_request_ipis(void) | |
1024 | { | |
1025 | struct mpic *mpic = mpic_primary; | |
1026 | ||
1027 | BUG_ON(mpic == NULL); | |
1028 | ||
1029 | printk("requesting IPIs ... \n"); | |
1030 | ||
1031 | /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */ | |
1032 | request_irq(mpic->ipi_offset+0, mpic_ipi_action, SA_INTERRUPT, | |
1033 | "IPI0 (call function)", mpic); | |
1034 | request_irq(mpic->ipi_offset+1, mpic_ipi_action, SA_INTERRUPT, | |
1035 | "IPI1 (reschedule)", mpic); | |
1036 | request_irq(mpic->ipi_offset+2, mpic_ipi_action, SA_INTERRUPT, | |
1037 | "IPI2 (unused)", mpic); | |
1038 | request_irq(mpic->ipi_offset+3, mpic_ipi_action, SA_INTERRUPT, | |
1039 | "IPI3 (debugger break)", mpic); | |
1040 | ||
1041 | printk("IPIs requested... \n"); | |
1042 | } | |
a9c59264 PM |
1043 | |
1044 | void smp_mpic_message_pass(int target, int msg) | |
1045 | { | |
1046 | /* make sure we're sending something that translates to an IPI */ | |
1047 | if ((unsigned int)msg > 3) { | |
1048 | printk("SMP %d: smp_message_pass: unknown msg %d\n", | |
1049 | smp_processor_id(), msg); | |
1050 | return; | |
1051 | } | |
1052 | switch (target) { | |
1053 | case MSG_ALL: | |
1054 | mpic_send_ipi(msg, 0xffffffff); | |
1055 | break; | |
1056 | case MSG_ALL_BUT_SELF: | |
1057 | mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id())); | |
1058 | break; | |
1059 | default: | |
1060 | mpic_send_ipi(msg, 1 << target); | |
1061 | break; | |
1062 | } | |
1063 | } | |
14cf11af | 1064 | #endif /* CONFIG_SMP */ |