Merge remote-tracking branches 'spi/fix/fsl-cpm', 'spi/fix/fsl-dspi' and 'spi/fix...
[deliverable/linux.git] / arch / blackfin / mach-common / ints-priority.c
CommitLineData
1394f032 1/*
96f1050d 2 * Set up the interrupt priorities
1394f032 3 *
96f1050d
RG
4 * Copyright 2004-2009 Analog Devices Inc.
5 * 2003 Bas Vermeulen <bas@buyways.nl>
6 * 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
7 * 2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
8 * 1999 D. Jeff Dionne <jeff@uclinux.org>
9 * 1996 Roman Zippel
1394f032 10 *
96f1050d 11 * Licensed under the GPL-2
1394f032
BW
12 */
13
14#include <linux/module.h>
15#include <linux/kernel_stat.h>
16#include <linux/seq_file.h>
17#include <linux/irq.h>
5b5da4c4 18#include <linux/sched.h>
4f6b600f
SM
19#include <linux/syscore_ops.h>
20#include <asm/delay.h>
6a01f230
YL
21#ifdef CONFIG_IPIPE
22#include <linux/ipipe.h>
23#endif
1394f032
BW
24#include <asm/traps.h>
25#include <asm/blackfin.h>
26#include <asm/gpio.h>
27#include <asm/irq_handler.h>
761ec44a 28#include <asm/dpmc.h>
06051fde 29#include <asm/traps.h>
1394f032 30
1394f032
BW
31/*
32 * NOTES:
33 * - we have separated the physical Hardware interrupt from the
34 * levels that the LINUX kernel sees (see the description in irq.h)
35 * -
36 */
37
6b3087c6 38#ifndef CONFIG_SMP
a99bbccd
MF
39/* Initialize this to an actual value to force it into the .data
40 * section so that we know it is properly initialized at entry into
41 * the kernel but before bss is initialized to zero (which is where
42 * it would live otherwise). The 0x1f magic represents the IRQs we
43 * cannot actually mask out in hardware.
44 */
40059784
MF
45unsigned long bfin_irq_flags = 0x1f;
46EXPORT_SYMBOL(bfin_irq_flags);
6b3087c6 47#endif
1394f032 48
cfefe3c6
MH
49#ifdef CONFIG_PM
50unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */
4a88d0ce 51unsigned vr_wakeup;
cfefe3c6
MH
52#endif
53
11b27cb5 54#ifndef SEC_GCTL
e9e334c3 55static struct ivgx {
464abc5d 56 /* irq number for request_irq, available in mach-bf5xx/irq.h */
24a07a12 57 unsigned int irqno;
1394f032 58 /* corresponding bit in the SIC_ISR register */
24a07a12 59 unsigned int isrflag;
1394f032
BW
60} ivg_table[NR_PERI_INTS];
61
e9e334c3 62static struct ivg_slice {
1394f032
BW
63 /* position of first irq in ivg_table for given ivg */
64 struct ivgx *ifirst;
65 struct ivgx *istop;
66} ivg7_13[IVG13 - IVG7 + 1];
67
1394f032
BW
68
69/*
70 * Search SIC_IAR and fill tables with the irqvalues
71 * and their positions in the SIC_ISR register.
72 */
73static void __init search_IAR(void)
74{
75 unsigned ivg, irq_pos = 0;
76 for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
80fcdb95 77 int irqN;
1394f032 78
34e0fc89 79 ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
1394f032 80
80fcdb95
MF
81 for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
82 int irqn;
4f6b600f
SM
83 u32 iar =
84 bfin_read32((unsigned long *)SIC_IAR0 +
80fcdb95
MF
85#if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
86 defined(CONFIG_BF538) || defined(CONFIG_BF539)
87 ((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
59003145 88#else
80fcdb95 89 (irqN >> 3)
59003145 90#endif
80fcdb95 91 );
80fcdb95
MF
92 for (irqn = irqN; irqn < irqN + 4; ++irqn) {
93 int iar_shift = (irqn & 7) * 4;
94 if (ivg == (0xf & (iar >> iar_shift))) {
95 ivg_table[irq_pos].irqno = IVG7 + irqn;
96 ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
97 ivg7_13[ivg].istop++;
98 irq_pos++;
99 }
1394f032
BW
100 }
101 }
102 }
103}
4f6b600f 104#endif
1394f032
BW
105
106/*
464abc5d 107 * This is for core internal IRQs
1394f032 108 */
f58c3276 109void bfin_ack_noop(struct irq_data *d)
1394f032
BW
110{
111 /* Dummy function. */
112}
113
4f19ea49 114static void bfin_core_mask_irq(struct irq_data *d)
1394f032 115{
4f19ea49 116 bfin_irq_flags &= ~(1 << d->irq);
3b139cdb
DH
117 if (!hard_irqs_disabled())
118 hard_local_irq_enable();
1394f032
BW
119}
120
4f19ea49 121static void bfin_core_unmask_irq(struct irq_data *d)
1394f032 122{
4f19ea49 123 bfin_irq_flags |= 1 << d->irq;
1394f032
BW
124 /*
125 * If interrupts are enabled, IMASK must contain the same value
40059784 126 * as bfin_irq_flags. Make sure that invariant holds. If interrupts
1394f032
BW
127 * are currently disabled we need not do anything; one of the
128 * callers will take care of setting IMASK to the proper value
129 * when reenabling interrupts.
40059784 130 * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
1394f032
BW
131 * what we need.
132 */
3b139cdb
DH
133 if (!hard_irqs_disabled())
134 hard_local_irq_enable();
1394f032
BW
135 return;
136}
137
86794b43 138#ifndef SEC_GCTL
f58c3276 139void bfin_internal_mask_irq(unsigned int irq)
1394f032 140{
fc6bd7b8 141 unsigned long flags = hard_local_irq_save();
fc6bd7b8 142#ifdef SIC_IMASK0
86794b43
SZ
143 unsigned mask_bank = BFIN_SYSIRQ(irq) / 32;
144 unsigned mask_bit = BFIN_SYSIRQ(irq) % 32;
c04d66bb 145 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
4f6b600f
SM
146 ~(1 << mask_bit));
147# if defined(CONFIG_SMP) || defined(CONFIG_ICC)
6b3087c6 148 bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
4f6b600f 149 ~(1 << mask_bit));
fc6bd7b8
MF
150# endif
151#else
152 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
86794b43 153 ~(1 << BFIN_SYSIRQ(irq)));
4f6b600f 154#endif /* end of SIC_IMASK0 */
3b139cdb 155 hard_local_irq_restore(flags);
1394f032
BW
156}
157
ff43a67f
TG
158static void bfin_internal_mask_irq_chip(struct irq_data *d)
159{
160 bfin_internal_mask_irq(d->irq);
161}
162
0325f25a 163#ifdef CONFIG_SMP
4f6b600f 164void bfin_internal_unmask_irq_affinity(unsigned int irq,
0325f25a
SZ
165 const struct cpumask *affinity)
166#else
f58c3276 167void bfin_internal_unmask_irq(unsigned int irq)
0325f25a 168#endif
1394f032 169{
fc6bd7b8 170 unsigned long flags = hard_local_irq_save();
9bd50df6 171
fc6bd7b8 172#ifdef SIC_IMASK0
86794b43
SZ
173 unsigned mask_bank = BFIN_SYSIRQ(irq) / 32;
174 unsigned mask_bit = BFIN_SYSIRQ(irq) % 32;
fc6bd7b8 175# ifdef CONFIG_SMP
0325f25a 176 if (cpumask_test_cpu(0, affinity))
fc6bd7b8 177# endif
0325f25a 178 bfin_write_SIC_IMASK(mask_bank,
4f6b600f
SM
179 bfin_read_SIC_IMASK(mask_bank) |
180 (1 << mask_bit));
fc6bd7b8 181# ifdef CONFIG_SMP
0325f25a
SZ
182 if (cpumask_test_cpu(1, affinity))
183 bfin_write_SICB_IMASK(mask_bank,
4f6b600f
SM
184 bfin_read_SICB_IMASK(mask_bank) |
185 (1 << mask_bit));
fc6bd7b8
MF
186# endif
187#else
188 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
86794b43
SZ
189 (1 << BFIN_SYSIRQ(irq)));
190#endif
191 hard_local_irq_restore(flags);
192}
193
194#ifdef CONFIG_SMP
195static void bfin_internal_unmask_irq_chip(struct irq_data *d)
196{
197 bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
198}
199
200static int bfin_internal_set_affinity(struct irq_data *d,
201 const struct cpumask *mask, bool force)
202{
203 bfin_internal_mask_irq(d->irq);
204 bfin_internal_unmask_irq_affinity(d->irq, mask);
205
206 return 0;
207}
208#else
209static void bfin_internal_unmask_irq_chip(struct irq_data *d)
210{
211 bfin_internal_unmask_irq(d->irq);
212}
4f6b600f 213#endif
86794b43
SZ
214
215#if defined(CONFIG_PM)
216int bfin_internal_set_wake(unsigned int irq, unsigned int state)
217{
218 u32 bank, bit, wakeup = 0;
219 unsigned long flags;
220 bank = BFIN_SYSIRQ(irq) / 32;
221 bit = BFIN_SYSIRQ(irq) % 32;
222
223 switch (irq) {
224#ifdef IRQ_RTC
225 case IRQ_RTC:
226 wakeup |= WAKE;
227 break;
228#endif
229#ifdef IRQ_CAN0_RX
230 case IRQ_CAN0_RX:
231 wakeup |= CANWE;
232 break;
6b3087c6 233#endif
86794b43
SZ
234#ifdef IRQ_CAN1_RX
235 case IRQ_CAN1_RX:
236 wakeup |= CANWE;
237 break;
238#endif
239#ifdef IRQ_USB_INT0
240 case IRQ_USB_INT0:
241 wakeup |= USBWE;
242 break;
243#endif
244#ifdef CONFIG_BF54x
245 case IRQ_CNT:
246 wakeup |= ROTWE;
247 break;
248#endif
249 default:
250 break;
251 }
252
253 flags = hard_local_irq_save();
254
255 if (state) {
256 bfin_sic_iwr[bank] |= (1 << bit);
257 vr_wakeup |= wakeup;
258
259 } else {
260 bfin_sic_iwr[bank] &= ~(1 << bit);
261 vr_wakeup &= ~wakeup;
262 }
263
4f6b600f 264 hard_local_irq_restore(flags);
86794b43
SZ
265
266 return 0;
4f6b600f
SM
267}
268
86794b43
SZ
269static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
270{
271 return bfin_internal_set_wake(d->irq, state);
272}
273#else
274inline int bfin_internal_set_wake(unsigned int irq, unsigned int state)
275{
276 return 0;
277}
278# define bfin_internal_set_wake_chip NULL
279#endif
280
281#else /* SEC_GCTL */
4f6b600f
SM
282static void bfin_sec_preflow_handler(struct irq_data *d)
283{
284 unsigned long flags = hard_local_irq_save();
86794b43 285 unsigned int sid = BFIN_SYSIRQ(d->irq);
4f6b600f
SM
286
287 bfin_write_SEC_SCI(0, SEC_CSID, sid);
288
289 hard_local_irq_restore(flags);
290}
291
292static void bfin_sec_mask_ack_irq(struct irq_data *d)
293{
294 unsigned long flags = hard_local_irq_save();
86794b43 295 unsigned int sid = BFIN_SYSIRQ(d->irq);
4f6b600f
SM
296
297 bfin_write_SEC_SCI(0, SEC_CSID, sid);
298
299 hard_local_irq_restore(flags);
300}
301
302static void bfin_sec_unmask_irq(struct irq_data *d)
303{
304 unsigned long flags = hard_local_irq_save();
86794b43 305 unsigned int sid = BFIN_SYSIRQ(d->irq);
4f6b600f
SM
306
307 bfin_write32(SEC_END, sid);
308
309 hard_local_irq_restore(flags);
310}
311
312static void bfin_sec_enable_ssi(unsigned int sid)
313{
314 unsigned long flags = hard_local_irq_save();
315 uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
316
317 reg_sctl |= SEC_SCTL_SRC_EN;
318 bfin_write_SEC_SCTL(sid, reg_sctl);
319
320 hard_local_irq_restore(flags);
321}
322
323static void bfin_sec_disable_ssi(unsigned int sid)
324{
325 unsigned long flags = hard_local_irq_save();
326 uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
327
328 reg_sctl &= ((uint32_t)~SEC_SCTL_SRC_EN);
329 bfin_write_SEC_SCTL(sid, reg_sctl);
330
331 hard_local_irq_restore(flags);
332}
333
334static void bfin_sec_set_ssi_coreid(unsigned int sid, unsigned int coreid)
335{
336 unsigned long flags = hard_local_irq_save();
337 uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
338
339 reg_sctl &= ((uint32_t)~SEC_SCTL_CTG);
340 bfin_write_SEC_SCTL(sid, reg_sctl | ((coreid << 20) & SEC_SCTL_CTG));
341
342 hard_local_irq_restore(flags);
343}
344
345static void bfin_sec_enable_sci(unsigned int sid)
346{
347 unsigned long flags = hard_local_irq_save();
348 uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
349
86794b43 350 if (sid == BFIN_SYSIRQ(IRQ_WATCH0))
4f6b600f
SM
351 reg_sctl |= SEC_SCTL_FAULT_EN;
352 else
353 reg_sctl |= SEC_SCTL_INT_EN;
354 bfin_write_SEC_SCTL(sid, reg_sctl);
355
356 hard_local_irq_restore(flags);
357}
358
359static void bfin_sec_disable_sci(unsigned int sid)
360{
361 unsigned long flags = hard_local_irq_save();
362 uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
363
364 reg_sctl &= ((uint32_t)~SEC_SCTL_INT_EN);
365 bfin_write_SEC_SCTL(sid, reg_sctl);
366
367 hard_local_irq_restore(flags);
368}
369
370static void bfin_sec_enable(struct irq_data *d)
371{
372 unsigned long flags = hard_local_irq_save();
86794b43 373 unsigned int sid = BFIN_SYSIRQ(d->irq);
4f6b600f
SM
374
375 bfin_sec_enable_sci(sid);
376 bfin_sec_enable_ssi(sid);
fc6bd7b8 377
3b139cdb 378 hard_local_irq_restore(flags);
1394f032
BW
379}
380
4f6b600f
SM
381static void bfin_sec_disable(struct irq_data *d)
382{
383 unsigned long flags = hard_local_irq_save();
86794b43 384 unsigned int sid = BFIN_SYSIRQ(d->irq);
4f6b600f
SM
385
386 bfin_sec_disable_sci(sid);
387 bfin_sec_disable_ssi(sid);
388
389 hard_local_irq_restore(flags);
390}
391
e0a59310
SZ
392static void bfin_sec_set_priority(unsigned int sec_int_levels, u8 *sec_int_priority)
393{
394 unsigned long flags = hard_local_irq_save();
395 uint32_t reg_sctl;
396 int i;
397
398 bfin_write_SEC_SCI(0, SEC_CPLVL, sec_int_levels);
399
400 for (i = 0; i < SYS_IRQS - BFIN_IRQ(0); i++) {
401 reg_sctl = bfin_read_SEC_SCTL(i) & ~SEC_SCTL_PRIO;
402 reg_sctl |= sec_int_priority[i] << SEC_SCTL_PRIO_OFFSET;
403 bfin_write_SEC_SCTL(i, reg_sctl);
404 }
405
406 hard_local_irq_restore(flags);
407}
408
86794b43 409void bfin_sec_raise_irq(unsigned int irq)
4f6b600f
SM
410{
411 unsigned long flags = hard_local_irq_save();
86794b43 412 unsigned int sid = BFIN_SYSIRQ(irq);
4f6b600f
SM
413
414 bfin_write32(SEC_RAISE, sid);
415
416 hard_local_irq_restore(flags);
417}
418
419static void init_software_driven_irq(void)
420{
421 bfin_sec_set_ssi_coreid(34, 0);
422 bfin_sec_set_ssi_coreid(35, 1);
86794b43
SZ
423
424 bfin_sec_enable_sci(35);
425 bfin_sec_enable_ssi(35);
4f6b600f
SM
426 bfin_sec_set_ssi_coreid(36, 0);
427 bfin_sec_set_ssi_coreid(37, 1);
86794b43
SZ
428 bfin_sec_enable_sci(37);
429 bfin_sec_enable_ssi(37);
4f6b600f
SM
430}
431
4f6b600f
SM
432void handle_sec_sfi_fault(uint32_t gstat)
433{
434
435}
436
437void handle_sec_sci_fault(uint32_t gstat)
438{
439 uint32_t core_id;
440 uint32_t cstat;
441
442 core_id = gstat & SEC_GSTAT_SCI;
443 cstat = bfin_read_SEC_SCI(core_id, SEC_CSTAT);
444 if (cstat & SEC_CSTAT_ERR) {
445 switch (cstat & SEC_CSTAT_ERRC) {
446 case SEC_CSTAT_ACKERR:
447 printk(KERN_DEBUG "sec ack err\n");
448 break;
449 default:
9b13494c 450 printk(KERN_DEBUG "sec sci unknown err\n");
4f6b600f
SM
451 }
452 }
453
454}
455
456void handle_sec_ssi_fault(uint32_t gstat)
457{
458 uint32_t sid;
459 uint32_t sstat;
460
461 sid = gstat & SEC_GSTAT_SID;
462 sstat = bfin_read_SEC_SSTAT(sid);
463
464}
465
1b601239 466void handle_sec_fault(uint32_t sec_gstat)
4f6b600f 467{
4f6b600f
SM
468 if (sec_gstat & SEC_GSTAT_ERR) {
469
470 switch (sec_gstat & SEC_GSTAT_ERRC) {
471 case 0:
472 handle_sec_sfi_fault(sec_gstat);
473 break;
474 case SEC_GSTAT_SCIERR:
475 handle_sec_sci_fault(sec_gstat);
476 break;
477 case SEC_GSTAT_SSIERR:
478 handle_sec_ssi_fault(sec_gstat);
479 break;
480 }
481
482
483 }
4f6b600f
SM
484}
485
1b601239
SZ
486static struct irqaction bfin_fault_irq = {
487 .name = "Blackfin fault",
488};
489
490static irqreturn_t bfin_fault_routine(int irq, void *data)
06051fde
SZ
491{
492 struct pt_regs *fp = get_irq_regs();
493
06051fde
SZ
494 switch (irq) {
495 case IRQ_C0_DBL_FAULT:
496 double_fault_c(fp);
497 break;
498 case IRQ_C0_HW_ERR:
499 dump_bfin_process(fp);
500 dump_bfin_mem(fp);
501 show_regs(fp);
502 printk(KERN_NOTICE "Kernel Stack\n");
503 show_stack(current, NULL);
504 print_modules();
86794b43 505 panic("Core 0 hardware error");
06051fde
SZ
506 break;
507 case IRQ_C0_NMI_L1_PARITY_ERR:
86794b43 508 panic("Core 0 NMI L1 parity error");
06051fde 509 break;
1b601239
SZ
510 case IRQ_SEC_ERR:
511 pr_err("SEC error\n");
512 handle_sec_fault(bfin_read32(SEC_GSTAT));
513 break;
06051fde 514 default:
1b601239 515 panic("Unknown fault %d", irq);
06051fde
SZ
516 }
517
1b601239 518 return IRQ_HANDLED;
06051fde 519}
86794b43 520#endif /* SEC_GCTL */
cfefe3c6 521
1394f032 522static struct irq_chip bfin_core_irqchip = {
763e63c6 523 .name = "CORE",
4f19ea49
TG
524 .irq_mask = bfin_core_mask_irq,
525 .irq_unmask = bfin_core_unmask_irq,
1394f032
BW
526};
527
86794b43 528#ifndef SEC_GCTL
1394f032 529static struct irq_chip bfin_internal_irqchip = {
763e63c6 530 .name = "INTN",
ff43a67f
TG
531 .irq_mask = bfin_internal_mask_irq_chip,
532 .irq_unmask = bfin_internal_unmask_irq_chip,
ff43a67f
TG
533 .irq_disable = bfin_internal_mask_irq_chip,
534 .irq_enable = bfin_internal_unmask_irq_chip,
0325f25a 535#ifdef CONFIG_SMP
ff43a67f 536 .irq_set_affinity = bfin_internal_set_affinity,
0325f25a 537#endif
ff43a67f 538 .irq_set_wake = bfin_internal_set_wake_chip,
1394f032 539};
86794b43 540#else
4f6b600f
SM
541static struct irq_chip bfin_sec_irqchip = {
542 .name = "SEC",
543 .irq_mask_ack = bfin_sec_mask_ack_irq,
544 .irq_mask = bfin_sec_mask_ack_irq,
545 .irq_unmask = bfin_sec_unmask_irq,
546 .irq_eoi = bfin_sec_unmask_irq,
547 .irq_disable = bfin_sec_disable,
548 .irq_enable = bfin_sec_enable,
549};
550#endif
551
f58c3276 552void bfin_handle_irq(unsigned irq)
6a01f230
YL
553{
554#ifdef CONFIG_IPIPE
555 struct pt_regs regs; /* Contents not used. */
556 ipipe_trace_irq_entry(irq);
557 __ipipe_handle_irq(irq, &regs);
558 ipipe_trace_irq_exit(irq);
559#else /* !CONFIG_IPIPE */
b10bbbbc 560 generic_handle_irq(irq);
6a01f230
YL
561#endif /* !CONFIG_IPIPE */
562}
563
aec59c91
MH
564#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
565static int mac_stat_int_mask;
566
567static void bfin_mac_status_ack_irq(unsigned int irq)
568{
569 switch (irq) {
570 case IRQ_MAC_MMCINT:
571 bfin_write_EMAC_MMC_TIRQS(
572 bfin_read_EMAC_MMC_TIRQE() &
573 bfin_read_EMAC_MMC_TIRQS());
574 bfin_write_EMAC_MMC_RIRQS(
575 bfin_read_EMAC_MMC_RIRQE() &
576 bfin_read_EMAC_MMC_RIRQS());
577 break;
578 case IRQ_MAC_RXFSINT:
579 bfin_write_EMAC_RX_STKY(
580 bfin_read_EMAC_RX_IRQE() &
581 bfin_read_EMAC_RX_STKY());
582 break;
583 case IRQ_MAC_TXFSINT:
584 bfin_write_EMAC_TX_STKY(
585 bfin_read_EMAC_TX_IRQE() &
586 bfin_read_EMAC_TX_STKY());
587 break;
588 case IRQ_MAC_WAKEDET:
589 bfin_write_EMAC_WKUP_CTL(
590 bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
591 break;
592 default:
593 /* These bits are W1C */
594 bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
595 break;
596 }
597}
598
172d2d1d 599static void bfin_mac_status_mask_irq(struct irq_data *d)
aec59c91 600{
172d2d1d
TG
601 unsigned int irq = d->irq;
602
aec59c91 603 mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
f58c3276 604#ifdef BF537_FAMILY
aec59c91
MH
605 switch (irq) {
606 case IRQ_MAC_PHYINT:
607 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
608 break;
609 default:
610 break;
611 }
612#else
613 if (!mac_stat_int_mask)
614 bfin_internal_mask_irq(IRQ_MAC_ERROR);
615#endif
616 bfin_mac_status_ack_irq(irq);
617}
618
172d2d1d 619static void bfin_mac_status_unmask_irq(struct irq_data *d)
aec59c91 620{
172d2d1d
TG
621 unsigned int irq = d->irq;
622
f58c3276 623#ifdef BF537_FAMILY
aec59c91
MH
624 switch (irq) {
625 case IRQ_MAC_PHYINT:
626 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
627 break;
628 default:
629 break;
630 }
631#else
632 if (!mac_stat_int_mask)
633 bfin_internal_unmask_irq(IRQ_MAC_ERROR);
634#endif
635 mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
636}
637
638#ifdef CONFIG_PM
172d2d1d 639int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
aec59c91 640{
f58c3276 641#ifdef BF537_FAMILY
aec59c91
MH
642 return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
643#else
644 return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
645#endif
646}
fc6bd7b8
MF
647#else
648# define bfin_mac_status_set_wake NULL
aec59c91
MH
649#endif
650
651static struct irq_chip bfin_mac_status_irqchip = {
652 .name = "MACST",
172d2d1d
TG
653 .irq_mask = bfin_mac_status_mask_irq,
654 .irq_unmask = bfin_mac_status_unmask_irq,
172d2d1d 655 .irq_set_wake = bfin_mac_status_set_wake,
aec59c91
MH
656};
657
f58c3276
MF
658void bfin_demux_mac_status_irq(unsigned int int_err_irq,
659 struct irq_desc *inta_desc)
aec59c91
MH
660{
661 int i, irq = 0;
662 u32 status = bfin_read_EMAC_SYSTAT();
663
bedeea6e 664 for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
aec59c91
MH
665 if (status & (1L << i)) {
666 irq = IRQ_MAC_PHYINT + i;
667 break;
668 }
669
670 if (irq) {
671 if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
672 bfin_handle_irq(irq);
673 } else {
674 bfin_mac_status_ack_irq(irq);
675 pr_debug("IRQ %d:"
4f6b600f
SM
676 " MASKED MAC ERROR INTERRUPT ASSERTED\n",
677 irq);
aec59c91
MH
678 }
679 } else
680 printk(KERN_ERR
4f6b600f
SM
681 "%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
682 " INTERRUPT ASSERTED BUT NO SOURCE FOUND"
683 "(EMAC_SYSTAT=0x%X)\n",
684 __func__, __FILE__, __LINE__, status);
aec59c91
MH
685}
686#endif
687
bfd15117
GY
688static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
689{
6a01f230 690#ifdef CONFIG_IPIPE
5b5da4c4 691 handle = handle_level_irq;
6a01f230 692#endif
43f2f115 693 __irq_set_handler_locked(irq, handle);
bfd15117
GY
694}
695
54e4ff4d 696#ifdef CONFIG_GPIO_ADI
6fce6a8d 697
54e4ff4d 698static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
8d022374 699
e9502850 700static void bfin_gpio_ack_irq(struct irq_data *d)
1394f032 701{
8d022374
MH
702 /* AFAIK ack_irq in case mask_ack is provided
703 * get's only called for edge sense irqs
704 */
e9502850 705 set_gpio_data(irq_to_gpio(d->irq), 0);
1394f032
BW
706}
707
e9502850 708static void bfin_gpio_mask_ack_irq(struct irq_data *d)
1394f032 709{
e9502850 710 unsigned int irq = d->irq;
8d022374 711 u32 gpionr = irq_to_gpio(irq);
1394f032 712
1907d8be 713 if (!irqd_is_level_type(d))
1394f032 714 set_gpio_data(gpionr, 0);
1394f032
BW
715
716 set_gpio_maska(gpionr, 0);
1394f032
BW
717}
718
e9502850 719static void bfin_gpio_mask_irq(struct irq_data *d)
1394f032 720{
e9502850 721 set_gpio_maska(irq_to_gpio(d->irq), 0);
1394f032
BW
722}
723
e9502850 724static void bfin_gpio_unmask_irq(struct irq_data *d)
1394f032 725{
e9502850 726 set_gpio_maska(irq_to_gpio(d->irq), 1);
1394f032
BW
727}
728
e9502850 729static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
1394f032 730{
e9502850 731 u32 gpionr = irq_to_gpio(d->irq);
1394f032 732
8d022374 733 if (__test_and_set_bit(gpionr, gpio_enabled))
affee2b2 734 bfin_gpio_irq_prepare(gpionr);
1394f032 735
e9502850 736 bfin_gpio_unmask_irq(d);
1394f032 737
affee2b2 738 return 0;
1394f032
BW
739}
740
e9502850 741static void bfin_gpio_irq_shutdown(struct irq_data *d)
1394f032 742{
e9502850 743 u32 gpionr = irq_to_gpio(d->irq);
30af6d49 744
e9502850 745 bfin_gpio_mask_irq(d);
30af6d49 746 __clear_bit(gpionr, gpio_enabled);
9570ff4a 747 bfin_gpio_irq_free(gpionr);
1394f032
BW
748}
749
e9502850 750static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
1394f032 751{
e9502850 752 unsigned int irq = d->irq;
8eb3e3bf
GY
753 int ret;
754 char buf[16];
8d022374 755 u32 gpionr = irq_to_gpio(irq);
1394f032
BW
756
757 if (type == IRQ_TYPE_PROBE) {
758 /* only probe unenabled GPIO interrupt lines */
c3695341 759 if (test_bit(gpionr, gpio_enabled))
1394f032
BW
760 return 0;
761 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
762 }
763
764 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
34e0fc89 765 IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
8d022374 766
9570ff4a
GY
767 snprintf(buf, 16, "gpio-irq%d", irq);
768 ret = bfin_gpio_irq_request(gpionr, buf);
769 if (ret)
770 return ret;
771
8d022374 772 if (__test_and_set_bit(gpionr, gpio_enabled))
affee2b2 773 bfin_gpio_irq_prepare(gpionr);
1394f032 774
1394f032 775 } else {
8d022374 776 __clear_bit(gpionr, gpio_enabled);
1394f032
BW
777 return 0;
778 }
779
f1bceb47 780 set_gpio_inen(gpionr, 0);
1394f032 781 set_gpio_dir(gpionr, 0);
1394f032
BW
782
783 if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
784 == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
785 set_gpio_both(gpionr, 1);
786 else
787 set_gpio_both(gpionr, 0);
788
789 if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
790 set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */
791 else
792 set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */
793
f1bceb47
MH
794 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
795 set_gpio_edge(gpionr, 1);
796 set_gpio_inen(gpionr, 1);
f1bceb47
MH
797 set_gpio_data(gpionr, 0);
798
799 } else {
800 set_gpio_edge(gpionr, 0);
f1bceb47
MH
801 set_gpio_inen(gpionr, 1);
802 }
803
1394f032 804 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
bfd15117 805 bfin_set_irq_handler(irq, handle_edge_irq);
1394f032 806 else
bfd15117 807 bfin_set_irq_handler(irq, handle_level_irq);
1394f032
BW
808
809 return 0;
810}
811
e2a8092c
MF
812static void bfin_demux_gpio_block(unsigned int irq)
813{
814 unsigned int gpio, mask;
815
816 gpio = irq_to_gpio(irq);
817 mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
818
819 while (mask) {
820 if (mask & 1)
821 bfin_handle_irq(irq);
822 irq++;
823 mask >>= 1;
824 }
825}
826
8c054103 827void bfin_demux_gpio_irq(unsigned int inta_irq,
4f6b600f 828 struct irq_desc *desc)
1394f032 829{
e2a8092c 830 unsigned int irq;
2c4f829b
MH
831
832 switch (inta_irq) {
e2a8092c 833#if defined(BF537_FAMILY)
8c054103 834 case IRQ_PF_INTA_PG_INTA:
e2a8092c
MF
835 bfin_demux_gpio_block(IRQ_PF0);
836 irq = IRQ_PG0;
2c4f829b 837 break;
8c054103 838 case IRQ_PH_INTA_MAC_RX:
2c4f829b
MH
839 irq = IRQ_PH0;
840 break;
e2a8092c
MF
841#elif defined(BF533_FAMILY)
842 case IRQ_PROG_INTA:
843 irq = IRQ_PF0;
844 break;
fc6bd7b8 845#elif defined(BF538_FAMILY)
dc26aec2
MH
846 case IRQ_PORTF_INTA:
847 irq = IRQ_PF0;
848 break;
2f6f4bcd 849#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
2c4f829b
MH
850 case IRQ_PORTF_INTA:
851 irq = IRQ_PF0;
852 break;
853 case IRQ_PORTG_INTA:
854 irq = IRQ_PG0;
855 break;
856 case IRQ_PORTH_INTA:
857 irq = IRQ_PH0;
858 break;
859#elif defined(CONFIG_BF561)
860 case IRQ_PROG0_INTA:
861 irq = IRQ_PF0;
862 break;
863 case IRQ_PROG1_INTA:
864 irq = IRQ_PF16;
865 break;
866 case IRQ_PROG2_INTA:
867 irq = IRQ_PF32;
868 break;
869#endif
870 default:
871 BUG();
872 return;
873 }
874
e2a8092c 875 bfin_demux_gpio_block(irq);
1394f032
BW
876}
877
cfefe3c6 878#ifdef CONFIG_PM
d49cdf84 879
dd8cb37b 880static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
cfefe3c6 881{
54e4ff4d
SZ
882 return bfin_gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
883}
cfefe3c6 884
54e4ff4d 885#else
cfefe3c6 886
54e4ff4d 887# define bfin_gpio_set_wake NULL
d49cdf84 888
54e4ff4d 889#endif
d49cdf84 890
54e4ff4d
SZ
891static struct irq_chip bfin_gpio_irqchip = {
892 .name = "GPIO",
893 .irq_ack = bfin_gpio_ack_irq,
894 .irq_mask = bfin_gpio_mask_irq,
895 .irq_mask_ack = bfin_gpio_mask_ack_irq,
896 .irq_unmask = bfin_gpio_unmask_irq,
897 .irq_disable = bfin_gpio_mask_irq,
898 .irq_enable = bfin_gpio_unmask_irq,
899 .irq_set_type = bfin_gpio_irq_type,
900 .irq_startup = bfin_gpio_irq_startup,
901 .irq_shutdown = bfin_gpio_irq_shutdown,
902 .irq_set_wake = bfin_gpio_set_wake,
903};
d49cdf84 904
54e4ff4d 905#endif
d49cdf84 906
54e4ff4d 907#ifdef CONFIG_PM
d49cdf84 908
11b27cb5 909#ifdef SEC_GCTL
54e4ff4d
SZ
910static u32 save_pint_sec_ctl[NR_PINT_SYS_IRQS];
911
d49cdf84
SM
912static int sec_suspend(void)
913{
914 u32 bank;
915
916 for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
86794b43 917 save_pint_sec_ctl[bank] = bfin_read_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0));
d49cdf84
SM
918 return 0;
919}
920
921static void sec_resume(void)
922{
923 u32 bank;
924
925 bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
926 udelay(100);
927 bfin_write_SEC_GCTL(SEC_GCTL_EN);
928 bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
929
930 for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
86794b43 931 bfin_write_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0), save_pint_sec_ctl[bank]);
d49cdf84
SM
932}
933
934static struct syscore_ops sec_pm_syscore_ops = {
935 .suspend = sec_suspend,
936 .resume = sec_resume,
937};
4f6b600f 938#endif
34e0fc89 939
a055b2b4 940#endif
1394f032 941
13dff62d 942void init_exception_vectors(void)
8be80ed3 943{
f0b5d12f
MF
944 /* cannot program in software:
945 * evt0 - emulation (jtag)
946 * evt1 - reset
947 */
948 bfin_write_EVT2(evt_nmi);
8be80ed3
BS
949 bfin_write_EVT3(trap);
950 bfin_write_EVT5(evt_ivhw);
951 bfin_write_EVT6(evt_timer);
952 bfin_write_EVT7(evt_evt7);
953 bfin_write_EVT8(evt_evt8);
954 bfin_write_EVT9(evt_evt9);
955 bfin_write_EVT10(evt_evt10);
956 bfin_write_EVT11(evt_evt11);
957 bfin_write_EVT12(evt_evt12);
958 bfin_write_EVT13(evt_evt13);
9703a73c 959 bfin_write_EVT14(evt_evt14);
8be80ed3
BS
960 bfin_write_EVT15(evt_system_call);
961 CSYNC();
962}
963
11b27cb5 964#ifndef SEC_GCTL
1394f032
BW
965/*
966 * This function should be called during kernel startup to initialize
967 * the BFin IRQ handling routines.
968 */
8d022374 969
1394f032
BW
970int __init init_arch_irq(void)
971{
972 int irq;
973 unsigned long ilat = 0;
fc6bd7b8 974
1394f032 975 /* Disable all the peripheral intrs - page 4-29 HW Ref manual */
fc6bd7b8 976#ifdef SIC_IMASK0
24a07a12
RH
977 bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
978 bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
fc6bd7b8 979# ifdef SIC_IMASK2
59003145 980 bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
a055b2b4 981# endif
4f6b600f 982# if defined(CONFIG_SMP) || defined(CONFIG_ICC)
6b3087c6
GY
983 bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
984 bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
985# endif
24a07a12 986#else
1394f032 987 bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
24a07a12 988#endif
1394f032
BW
989
990 local_irq_disable();
991
34e0fc89 992 for (irq = 0; irq <= SYS_IRQS; irq++) {
1394f032 993 if (irq <= IRQ_CORETMR)
43f2f115 994 irq_set_chip(irq, &bfin_core_irqchip);
1394f032 995 else
43f2f115 996 irq_set_chip(irq, &bfin_internal_irqchip);
1394f032 997
464abc5d 998 switch (irq) {
54e4ff4d
SZ
999#if !BFIN_GPIO_PINT
1000#if defined(BF537_FAMILY)
01f8e34c
MF
1001 case IRQ_PH_INTA_MAC_RX:
1002 case IRQ_PF_INTA_PG_INTA:
1003#elif defined(BF533_FAMILY)
1004 case IRQ_PROG_INTA:
2f6f4bcd 1005#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
464abc5d
MH
1006 case IRQ_PORTF_INTA:
1007 case IRQ_PORTG_INTA:
1008 case IRQ_PORTH_INTA:
2c4f829b 1009#elif defined(CONFIG_BF561)
464abc5d
MH
1010 case IRQ_PROG0_INTA:
1011 case IRQ_PROG1_INTA:
1012 case IRQ_PROG2_INTA:
fc6bd7b8 1013#elif defined(BF538_FAMILY)
dc26aec2 1014 case IRQ_PORTF_INTA:
1394f032 1015#endif
43f2f115 1016 irq_set_chained_handler(irq, bfin_demux_gpio_irq);
464abc5d 1017 break;
54e4ff4d 1018#endif
aec59c91
MH
1019#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1020 case IRQ_MAC_ERROR:
43f2f115
TG
1021 irq_set_chained_handler(irq,
1022 bfin_demux_mac_status_irq);
aec59c91
MH
1023 break;
1024#endif
4f6b600f 1025#if defined(CONFIG_SMP) || defined(CONFIG_ICC)
6b3087c6
GY
1026 case IRQ_SUPPLE_0:
1027 case IRQ_SUPPLE_1:
43f2f115 1028 irq_set_handler(irq, handle_percpu_irq);
6b3087c6
GY
1029 break;
1030#endif
17941314 1031
cb191718
YL
1032#ifdef CONFIG_TICKSOURCE_CORETMR
1033 case IRQ_CORETMR:
1034# ifdef CONFIG_SMP
43f2f115 1035 irq_set_handler(irq, handle_percpu_irq);
cb191718 1036# else
43f2f115 1037 irq_set_handler(irq, handle_simple_irq);
cb191718 1038# endif
fc6bd7b8 1039 break;
17941314 1040#endif
cb191718
YL
1041
1042#ifdef CONFIG_TICKSOURCE_GPTMR0
1043 case IRQ_TIMER0:
43f2f115 1044 irq_set_handler(irq, handle_simple_irq);
a40494a6 1045 break;
cb191718
YL
1046#endif
1047
a40494a6 1048 default:
fc6bd7b8 1049#ifdef CONFIG_IPIPE
43f2f115 1050 irq_set_handler(irq, handle_level_irq);
fc6bd7b8 1051#else
43f2f115 1052 irq_set_handler(irq, handle_simple_irq);
fc6bd7b8 1053#endif
464abc5d
MH
1054 break;
1055 }
1394f032 1056 }
464abc5d 1057
f58c3276 1058 init_mach_irq();
1394f032 1059
11b27cb5 1060#if (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
aec59c91 1061 for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
43f2f115 1062 irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
aec59c91
MH
1063 handle_level_irq);
1064#endif
464abc5d 1065 /* if configured as edge, then will be changed to do_edge_IRQ */
54e4ff4d 1066#ifdef CONFIG_GPIO_ADI
aec59c91
MH
1067 for (irq = GPIO_IRQ_BASE;
1068 irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
43f2f115 1069 irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
464abc5d 1070 handle_level_irq);
54e4ff4d 1071#endif
1394f032
BW
1072 bfin_write_IMASK(0);
1073 CSYNC();
1074 ilat = bfin_read_ILAT();
1075 CSYNC();
1076 bfin_write_ILAT(ilat);
1077 CSYNC();
1078
34e0fc89 1079 printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
40059784 1080 /* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
1394f032
BW
1081 * local_irq_enable()
1082 */
1083 program_IAR();
1084 /* Therefore it's better to setup IARs before interrupts enabled */
1085 search_IAR();
1086
1087 /* Enable interrupts IVG7-15 */
40059784 1088 bfin_irq_flags |= IMASK_IVG15 |
4f6b600f
SM
1089 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1090 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1091
1394f032 1092
349ebbcc
MH
1093 /* This implicitly covers ANOMALY_05000171
1094 * Boot-ROM code modifies SICA_IWRx wakeup registers
1095 */
be1d8543 1096#ifdef SIC_IWR0
56f5f590 1097 bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
be1d8543 1098# ifdef SIC_IWR1
2f6f4bcd 1099 /* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
55546ac4
MH
1100 * will screw up the bootrom as it relies on MDMA0/1 waking it
1101 * up from IDLE instructions. See this report for more info:
1102 * http://blackfin.uclinux.org/gf/tracker/4323
1103 */
b7e11293
MF
1104 if (ANOMALY_05000435)
1105 bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
1106 else
1107 bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
be1d8543
MF
1108# endif
1109# ifdef SIC_IWR2
56f5f590 1110 bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
fe9ec9b9
MH
1111# endif
1112#else
56f5f590 1113 bfin_write_SIC_IWR(IWR_DISABLE_ALL);
4f6b600f 1114#endif
1394f032
BW
1115 return 0;
1116}
1117
1118#ifdef CONFIG_DO_IRQ_L1
a055b2b4 1119__attribute__((l1_text))
1394f032 1120#endif
6b108049 1121static int vec_to_irq(int vec)
1394f032 1122{
6b108049
MF
1123 struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1124 struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1125 unsigned long sic_status[3];
6b108049
MF
1126 if (likely(vec == EVT_IVTMR_P))
1127 return IRQ_CORETMR;
6b108049
MF
1128#ifdef SIC_ISR
1129 sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1130#else
1131 if (smp_processor_id()) {
780172bf 1132# ifdef SICB_ISR0
6b108049
MF
1133 /* This will be optimized out in UP mode. */
1134 sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1135 sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
780172bf 1136# endif
6b108049
MF
1137 } else {
1138 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1139 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1140 }
1141#endif
1142#ifdef SIC_ISR2
1143 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1144#endif
1394f032 1145
6b108049
MF
1146 for (;; ivg++) {
1147 if (ivg >= ivg_stop)
1148 return -1;
1149#ifdef SIC_ISR
1150 if (sic_status[0] & ivg->isrflag)
1151#else
1152 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
24a07a12 1153#endif
6b108049 1154 return ivg->irqno;
1394f032 1155 }
11b27cb5
SZ
1156}
1157
1158#else /* SEC_GCTL */
1159
1160/*
1161 * This function should be called during kernel startup to initialize
1162 * the BFin IRQ handling routines.
1163 */
1164
1165int __init init_arch_irq(void)
1166{
1167 int irq;
1168 unsigned long ilat = 0;
1169
1170 bfin_write_SEC_GCTL(SEC_GCTL_RESET);
1171
1172 local_irq_disable();
1173
11b27cb5
SZ
1174 for (irq = 0; irq <= SYS_IRQS; irq++) {
1175 if (irq <= IRQ_CORETMR) {
86794b43
SZ
1176 irq_set_chip_and_handler(irq, &bfin_core_irqchip,
1177 handle_simple_irq);
1178#if defined(CONFIG_TICKSOURCE_CORETMR) && defined(CONFIG_SMP)
11b27cb5 1179 if (irq == IRQ_CORETMR)
11b27cb5 1180 irq_set_handler(irq, handle_percpu_irq);
11b27cb5 1181#endif
11b27cb5 1182 } else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) {
11b27cb5 1183 irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
86794b43
SZ
1184 handle_percpu_irq);
1185 } else {
1186 irq_set_chip(irq, &bfin_sec_irqchip);
1b601239 1187 irq_set_handler(irq, handle_fasteoi_irq);
11b27cb5
SZ
1188 __irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
1189 }
1190 }
11b27cb5
SZ
1191
1192 bfin_write_IMASK(0);
1193 CSYNC();
1194 ilat = bfin_read_ILAT();
1195 CSYNC();
1196 bfin_write_ILAT(ilat);
1197 CSYNC();
1198
1199 printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1200
e0a59310
SZ
1201 bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
1202
11b27cb5
SZ
1203 /* Enable interrupts IVG7-15 */
1204 bfin_irq_flags |= IMASK_IVG15 |
1205 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1206 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1207
1208
1209 bfin_write_SEC_FCTL(SEC_FCTL_EN | SEC_FCTL_SYSRST_EN | SEC_FCTL_FLTIN_EN);
86794b43
SZ
1210 bfin_sec_enable_sci(BFIN_SYSIRQ(IRQ_WATCH0));
1211 bfin_sec_enable_ssi(BFIN_SYSIRQ(IRQ_WATCH0));
11b27cb5
SZ
1212 bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
1213 udelay(100);
1214 bfin_write_SEC_GCTL(SEC_GCTL_EN);
1215 bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1216 bfin_write_SEC_SCI(1, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1217
1218 init_software_driven_irq();
36c47239
SM
1219
1220#ifdef CONFIG_PM
11b27cb5 1221 register_syscore_ops(&sec_pm_syscore_ops);
36c47239 1222#endif
11b27cb5 1223
1b601239
SZ
1224 bfin_fault_irq.handler = bfin_fault_routine;
1225#ifdef CONFIG_L1_PARITY_CHECK
1226 setup_irq(IRQ_C0_NMI_L1_PARITY_ERR, &bfin_fault_irq);
1227#endif
1228 setup_irq(IRQ_C0_DBL_FAULT, &bfin_fault_irq);
1229 setup_irq(IRQ_SEC_ERR, &bfin_fault_irq);
1230
11b27cb5
SZ
1231 return 0;
1232}
1233
1234#ifdef CONFIG_DO_IRQ_L1
1235__attribute__((l1_text))
1236#endif
1237static int vec_to_irq(int vec)
1238{
1239 if (likely(vec == EVT_IVTMR_P))
1240 return IRQ_CORETMR;
1241
4f6b600f 1242 return BFIN_IRQ(bfin_read_SEC_SCI(0, SEC_CSID));
6b108049 1243}
11b27cb5 1244#endif /* SEC_GCTL */
6b108049
MF
1245
1246#ifdef CONFIG_DO_IRQ_L1
1247__attribute__((l1_text))
1248#endif
1249void do_irq(int vec, struct pt_regs *fp)
1250{
1251 int irq = vec_to_irq(vec);
1252 if (irq == -1)
1253 return;
1254 asm_do_IRQ(irq, fp);
1394f032 1255}
6a01f230
YL
1256
1257#ifdef CONFIG_IPIPE
1258
1259int __ipipe_get_irq_priority(unsigned irq)
1260{
1261 int ient, prio;
1262
1263 if (irq <= IRQ_CORETMR)
1264 return irq;
1265
11b27cb5
SZ
1266#ifdef SEC_GCTL
1267 if (irq >= BFIN_IRQ(0))
1268 return IVG11;
1269#else
6a01f230
YL
1270 for (ient = 0; ient < NR_PERI_INTS; ient++) {
1271 struct ivgx *ivg = ivg_table + ient;
1272 if (ivg->irqno == irq) {
1273 for (prio = 0; prio <= IVG13-IVG7; prio++) {
1274 if (ivg7_13[prio].ifirst <= ivg &&
1275 ivg7_13[prio].istop > ivg)
1276 return IVG7 + prio;
1277 }
1278 }
1279 }
11b27cb5 1280#endif
6a01f230
YL
1281
1282 return IVG15;
1283}
1284
6a01f230
YL
1285/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
1286#ifdef CONFIG_DO_IRQ_L1
1287__attribute__((l1_text))
1288#endif
1289asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1290{
9bd50df6 1291 struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
a40494a6 1292 struct ipipe_domain *this_domain = __ipipe_current_domain;
5b5da4c4 1293 int irq, s = 0;
6a01f230 1294
6b108049
MF
1295 irq = vec_to_irq(vec);
1296 if (irq == -1)
1297 return 0;
6a01f230
YL
1298
1299 if (irq == IRQ_SYSTMR) {
a40494a6 1300#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
6a01f230 1301 bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
9bd50df6 1302#endif
6a01f230 1303 /* This is basically what we need from the register frame. */
7e788ab1
CL
1304 __this_cpu_write(__ipipe_tick_regs.ipend, regs->ipend);
1305 __this_cpu_write(__ipipe_tick_regs.pc, regs->pc);
9bd50df6 1306 if (this_domain != ipipe_root_domain)
7e788ab1 1307 __this_cpu_and(__ipipe_tick_regs.ipend, ~0x10);
9bd50df6 1308 else
7e788ab1 1309 __this_cpu_or(__ipipe_tick_regs.ipend, 0x10);
6a01f230
YL
1310 }
1311
5b5da4c4
PG
1312 /*
1313 * We don't want Linux interrupt handlers to run at the
1314 * current core priority level (i.e. < EVT15), since this
1315 * might delay other interrupts handled by a high priority
1316 * domain. Here is what we do instead:
1317 *
1318 * - we raise the SYNCDEFER bit to prevent
1319 * __ipipe_handle_irq() to sync the pipeline for the root
1320 * stage for the incoming interrupt. Upon return, that IRQ is
1321 * pending in the interrupt log.
1322 *
1323 * - we raise the TIF_IRQ_SYNC bit for the current thread, so
1324 * that _schedule_and_signal_from_int will eventually sync the
1325 * pipeline from EVT15.
1326 */
9bd50df6
PG
1327 if (this_domain == ipipe_root_domain) {
1328 s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1329 barrier();
1330 }
6a01f230
YL
1331
1332 ipipe_trace_irq_entry(irq);
1333 __ipipe_handle_irq(irq, regs);
9bd50df6 1334 ipipe_trace_irq_exit(irq);
6a01f230 1335
5b5da4c4
PG
1336 if (user_mode(regs) &&
1337 !ipipe_test_foreign_stack() &&
1338 (current->ipipe_flags & PF_EVTRET) != 0) {
1339 /*
1340 * Testing for user_regs() does NOT fully eliminate
1341 * foreign stack contexts, because of the forged
1342 * interrupt returns we do through
1343 * __ipipe_call_irqtail. In that case, we might have
1344 * preempted a foreign stack context in a high
1345 * priority domain, with a single interrupt level now
1346 * pending after the irqtail unwinding is done. In
1347 * which case user_mode() is now true, and the event
1348 * gets dispatched spuriously.
1349 */
1350 current->ipipe_flags &= ~PF_EVTRET;
1351 __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
1352 }
1353
9bd50df6
PG
1354 if (this_domain == ipipe_root_domain) {
1355 set_thread_flag(TIF_IRQ_SYNC);
1356 if (!s) {
1357 __clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1358 return !test_bit(IPIPE_STALL_FLAG, &p->status);
1359 }
1360 }
6a01f230 1361
1fa9be72 1362 return 0;
6a01f230
YL
1363}
1364
1365#endif /* CONFIG_IPIPE */
This page took 0.605585 seconds and 5 git commands to generate.