2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 #include <linux/bitmap.h>
10 #include <linux/clocksource.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqchip/mips-gic.h>
16 #include <linux/of_address.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
20 #include <asm/mips-cm.h>
21 #include <asm/setup.h>
22 #include <asm/traps.h>
24 #include <dt-bindings/interrupt-controller/mips-gic.h>
26 unsigned int gic_present
;
28 struct gic_pcpu_mask
{
29 DECLARE_BITMAP(pcpu_mask
, GIC_MAX_INTRS
);
39 struct cpumask
*ipimask
;
44 static unsigned long __gic_base_addr
;
46 static void __iomem
*gic_base
;
47 static struct gic_pcpu_mask pcpu_masks
[NR_CPUS
];
48 static DEFINE_SPINLOCK(gic_lock
);
49 static struct irq_domain
*gic_irq_domain
;
50 static struct irq_domain
*gic_dev_domain
;
51 static struct irq_domain
*gic_ipi_domain
;
52 static int gic_shared_intrs
;
54 static unsigned int gic_cpu_pin
;
55 static unsigned int timer_cpu_pin
;
56 static struct irq_chip gic_level_irq_controller
, gic_edge_irq_controller
;
57 DECLARE_BITMAP(ipi_resrv
, GIC_MAX_INTRS
);
59 static void __gic_irq_dispatch(void);
61 static inline u32
gic_read32(unsigned int reg
)
63 return __raw_readl(gic_base
+ reg
);
66 static inline u64
gic_read64(unsigned int reg
)
68 return __raw_readq(gic_base
+ reg
);
71 static inline unsigned long gic_read(unsigned int reg
)
74 return gic_read32(reg
);
76 return gic_read64(reg
);
79 static inline void gic_write32(unsigned int reg
, u32 val
)
81 return __raw_writel(val
, gic_base
+ reg
);
84 static inline void gic_write64(unsigned int reg
, u64 val
)
86 return __raw_writeq(val
, gic_base
+ reg
);
89 static inline void gic_write(unsigned int reg
, unsigned long val
)
92 return gic_write32(reg
, (u32
)val
);
94 return gic_write64(reg
, (u64
)val
);
97 static inline void gic_update_bits(unsigned int reg
, unsigned long mask
,
100 unsigned long regval
;
102 regval
= gic_read(reg
);
105 gic_write(reg
, regval
);
108 static inline void gic_reset_mask(unsigned int intr
)
110 gic_write(GIC_REG(SHARED
, GIC_SH_RMASK
) + GIC_INTR_OFS(intr
),
111 1ul << GIC_INTR_BIT(intr
));
114 static inline void gic_set_mask(unsigned int intr
)
116 gic_write(GIC_REG(SHARED
, GIC_SH_SMASK
) + GIC_INTR_OFS(intr
),
117 1ul << GIC_INTR_BIT(intr
));
120 static inline void gic_set_polarity(unsigned int intr
, unsigned int pol
)
122 gic_update_bits(GIC_REG(SHARED
, GIC_SH_SET_POLARITY
) +
123 GIC_INTR_OFS(intr
), 1ul << GIC_INTR_BIT(intr
),
124 (unsigned long)pol
<< GIC_INTR_BIT(intr
));
127 static inline void gic_set_trigger(unsigned int intr
, unsigned int trig
)
129 gic_update_bits(GIC_REG(SHARED
, GIC_SH_SET_TRIGGER
) +
130 GIC_INTR_OFS(intr
), 1ul << GIC_INTR_BIT(intr
),
131 (unsigned long)trig
<< GIC_INTR_BIT(intr
));
134 static inline void gic_set_dual_edge(unsigned int intr
, unsigned int dual
)
136 gic_update_bits(GIC_REG(SHARED
, GIC_SH_SET_DUAL
) + GIC_INTR_OFS(intr
),
137 1ul << GIC_INTR_BIT(intr
),
138 (unsigned long)dual
<< GIC_INTR_BIT(intr
));
141 static inline void gic_map_to_pin(unsigned int intr
, unsigned int pin
)
143 gic_write32(GIC_REG(SHARED
, GIC_SH_INTR_MAP_TO_PIN_BASE
) +
144 GIC_SH_MAP_TO_PIN(intr
), GIC_MAP_TO_PIN_MSK
| pin
);
147 static inline void gic_map_to_vpe(unsigned int intr
, unsigned int vpe
)
149 gic_write(GIC_REG(SHARED
, GIC_SH_INTR_MAP_TO_VPE_BASE
) +
150 GIC_SH_MAP_TO_VPE_REG_OFF(intr
, vpe
),
151 GIC_SH_MAP_TO_VPE_REG_BIT(vpe
));
154 #ifdef CONFIG_CLKSRC_MIPS_GIC
155 cycle_t
gic_read_count(void)
157 unsigned int hi
, hi2
, lo
;
160 return (cycle_t
)gic_read(GIC_REG(SHARED
, GIC_SH_COUNTER
));
163 hi
= gic_read32(GIC_REG(SHARED
, GIC_SH_COUNTER_63_32
));
164 lo
= gic_read32(GIC_REG(SHARED
, GIC_SH_COUNTER_31_00
));
165 hi2
= gic_read32(GIC_REG(SHARED
, GIC_SH_COUNTER_63_32
));
168 return (((cycle_t
) hi
) << 32) + lo
;
171 unsigned int gic_get_count_width(void)
173 unsigned int bits
, config
;
175 config
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
176 bits
= 32 + 4 * ((config
& GIC_SH_CONFIG_COUNTBITS_MSK
) >>
177 GIC_SH_CONFIG_COUNTBITS_SHF
);
182 void gic_write_compare(cycle_t cnt
)
185 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE
), cnt
);
187 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_HI
),
189 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_LO
),
190 (int)(cnt
& 0xffffffff));
194 void gic_write_cpu_compare(cycle_t cnt
, int cpu
)
198 local_irq_save(flags
);
200 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
), mips_cm_vp_id(cpu
));
203 gic_write(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE
), cnt
);
205 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE_HI
),
207 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE_LO
),
208 (int)(cnt
& 0xffffffff));
211 local_irq_restore(flags
);
214 cycle_t
gic_read_compare(void)
219 return (cycle_t
)gic_read(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE
));
221 hi
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_HI
));
222 lo
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_COMPARE_LO
));
224 return (((cycle_t
) hi
) << 32) + lo
;
227 void gic_start_count(void)
231 /* Start the counter */
232 gicconfig
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
233 gicconfig
&= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF
);
234 gic_write(GIC_REG(SHARED
, GIC_SH_CONFIG
), gicconfig
);
237 void gic_stop_count(void)
241 /* Stop the counter */
242 gicconfig
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
243 gicconfig
|= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF
;
244 gic_write(GIC_REG(SHARED
, GIC_SH_CONFIG
), gicconfig
);
249 unsigned gic_read_local_vp_id(void)
253 ident
= gic_read(GIC_REG(VPE_LOCAL
, GIC_VP_IDENT
));
254 return ident
& GIC_VP_IDENT_VCNUM_MSK
;
257 static bool gic_local_irq_is_routable(int intr
)
261 /* All local interrupts are routable in EIC mode. */
265 vpe_ctl
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_CTL
));
267 case GIC_LOCAL_INT_TIMER
:
268 return vpe_ctl
& GIC_VPE_CTL_TIMER_RTBL_MSK
;
269 case GIC_LOCAL_INT_PERFCTR
:
270 return vpe_ctl
& GIC_VPE_CTL_PERFCNT_RTBL_MSK
;
271 case GIC_LOCAL_INT_FDC
:
272 return vpe_ctl
& GIC_VPE_CTL_FDC_RTBL_MSK
;
273 case GIC_LOCAL_INT_SWINT0
:
274 case GIC_LOCAL_INT_SWINT1
:
275 return vpe_ctl
& GIC_VPE_CTL_SWINT_RTBL_MSK
;
281 static void gic_bind_eic_interrupt(int irq
, int set
)
283 /* Convert irq vector # to hw int # */
284 irq
-= GIC_PIN_TO_VEC_OFFSET
;
286 /* Set irq to use shadow set */
287 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_EIC_SHADOW_SET_BASE
) +
288 GIC_VPE_EIC_SS(irq
), set
);
291 static void gic_send_ipi(struct irq_data
*d
, unsigned int cpu
)
293 irq_hw_number_t hwirq
= GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d
));
295 gic_write(GIC_REG(SHARED
, GIC_SH_WEDGE
), GIC_SH_WEDGE_SET(hwirq
));
298 int gic_get_c0_compare_int(void)
300 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER
))
301 return MIPS_CPU_IRQ_BASE
+ cp0_compare_irq
;
302 return irq_create_mapping(gic_irq_domain
,
303 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER
));
306 int gic_get_c0_perfcount_int(void)
308 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR
)) {
309 /* Is the performance counter shared with the timer? */
310 if (cp0_perfcount_irq
< 0)
312 return MIPS_CPU_IRQ_BASE
+ cp0_perfcount_irq
;
314 return irq_create_mapping(gic_irq_domain
,
315 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR
));
318 int gic_get_c0_fdc_int(void)
320 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC
)) {
321 /* Is the FDC IRQ even present? */
324 return MIPS_CPU_IRQ_BASE
+ cp0_fdc_irq
;
327 return irq_create_mapping(gic_irq_domain
,
328 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC
));
331 int gic_get_usm_range(struct resource
*gic_usm_res
)
336 gic_usm_res
->start
= __gic_base_addr
+ USM_VISIBLE_SECTION_OFS
;
337 gic_usm_res
->end
= gic_usm_res
->start
+ (USM_VISIBLE_SECTION_SIZE
- 1);
342 static void gic_handle_shared_int(bool chained
)
344 unsigned int i
, intr
, virq
, gic_reg_step
= mips_cm_is64
? 8 : 4;
345 unsigned long *pcpu_mask
;
346 unsigned long pending_reg
, intrmask_reg
;
347 DECLARE_BITMAP(pending
, GIC_MAX_INTRS
);
348 DECLARE_BITMAP(intrmask
, GIC_MAX_INTRS
);
350 /* Get per-cpu bitmaps */
351 pcpu_mask
= pcpu_masks
[smp_processor_id()].pcpu_mask
;
353 pending_reg
= GIC_REG(SHARED
, GIC_SH_PEND
);
354 intrmask_reg
= GIC_REG(SHARED
, GIC_SH_MASK
);
356 for (i
= 0; i
< BITS_TO_LONGS(gic_shared_intrs
); i
++) {
357 pending
[i
] = gic_read(pending_reg
);
358 intrmask
[i
] = gic_read(intrmask_reg
);
359 pending_reg
+= gic_reg_step
;
360 intrmask_reg
+= gic_reg_step
;
362 if (!IS_ENABLED(CONFIG_64BIT
) || mips_cm_is64
)
365 pending
[i
] |= (u64
)gic_read(pending_reg
) << 32;
366 intrmask
[i
] |= (u64
)gic_read(intrmask_reg
) << 32;
367 pending_reg
+= gic_reg_step
;
368 intrmask_reg
+= gic_reg_step
;
371 bitmap_and(pending
, pending
, intrmask
, gic_shared_intrs
);
372 bitmap_and(pending
, pending
, pcpu_mask
, gic_shared_intrs
);
374 intr
= find_first_bit(pending
, gic_shared_intrs
);
375 while (intr
!= gic_shared_intrs
) {
376 virq
= irq_linear_revmap(gic_irq_domain
,
377 GIC_SHARED_TO_HWIRQ(intr
));
379 generic_handle_irq(virq
);
383 /* go to next pending bit */
384 bitmap_clear(pending
, intr
, 1);
385 intr
= find_first_bit(pending
, gic_shared_intrs
);
389 static void gic_mask_irq(struct irq_data
*d
)
391 gic_reset_mask(GIC_HWIRQ_TO_SHARED(d
->hwirq
));
394 static void gic_unmask_irq(struct irq_data
*d
)
396 gic_set_mask(GIC_HWIRQ_TO_SHARED(d
->hwirq
));
399 static void gic_ack_irq(struct irq_data
*d
)
401 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
403 gic_write(GIC_REG(SHARED
, GIC_SH_WEDGE
), GIC_SH_WEDGE_CLR(irq
));
406 static int gic_set_type(struct irq_data
*d
, unsigned int type
)
408 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
412 spin_lock_irqsave(&gic_lock
, flags
);
413 switch (type
& IRQ_TYPE_SENSE_MASK
) {
414 case IRQ_TYPE_EDGE_FALLING
:
415 gic_set_polarity(irq
, GIC_POL_NEG
);
416 gic_set_trigger(irq
, GIC_TRIG_EDGE
);
417 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
420 case IRQ_TYPE_EDGE_RISING
:
421 gic_set_polarity(irq
, GIC_POL_POS
);
422 gic_set_trigger(irq
, GIC_TRIG_EDGE
);
423 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
426 case IRQ_TYPE_EDGE_BOTH
:
427 /* polarity is irrelevant in this case */
428 gic_set_trigger(irq
, GIC_TRIG_EDGE
);
429 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_ENABLE
);
432 case IRQ_TYPE_LEVEL_LOW
:
433 gic_set_polarity(irq
, GIC_POL_NEG
);
434 gic_set_trigger(irq
, GIC_TRIG_LEVEL
);
435 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
438 case IRQ_TYPE_LEVEL_HIGH
:
440 gic_set_polarity(irq
, GIC_POL_POS
);
441 gic_set_trigger(irq
, GIC_TRIG_LEVEL
);
442 gic_set_dual_edge(irq
, GIC_TRIG_DUAL_DISABLE
);
448 irq_set_chip_handler_name_locked(d
, &gic_edge_irq_controller
,
449 handle_edge_irq
, NULL
);
451 irq_set_chip_handler_name_locked(d
, &gic_level_irq_controller
,
452 handle_level_irq
, NULL
);
453 spin_unlock_irqrestore(&gic_lock
, flags
);
459 static int gic_set_affinity(struct irq_data
*d
, const struct cpumask
*cpumask
,
462 unsigned int irq
= GIC_HWIRQ_TO_SHARED(d
->hwirq
);
463 cpumask_t tmp
= CPU_MASK_NONE
;
467 cpumask_and(&tmp
, cpumask
, cpu_online_mask
);
468 if (cpumask_empty(&tmp
))
471 /* Assumption : cpumask refers to a single CPU */
472 spin_lock_irqsave(&gic_lock
, flags
);
474 /* Re-route this IRQ */
475 gic_map_to_vpe(irq
, mips_cm_vp_id(cpumask_first(&tmp
)));
477 /* Update the pcpu_masks */
478 for (i
= 0; i
< min(gic_vpes
, NR_CPUS
); i
++)
479 clear_bit(irq
, pcpu_masks
[i
].pcpu_mask
);
480 set_bit(irq
, pcpu_masks
[cpumask_first(&tmp
)].pcpu_mask
);
482 cpumask_copy(irq_data_get_affinity_mask(d
), cpumask
);
483 spin_unlock_irqrestore(&gic_lock
, flags
);
485 return IRQ_SET_MASK_OK_NOCOPY
;
489 static struct irq_chip gic_level_irq_controller
= {
491 .irq_mask
= gic_mask_irq
,
492 .irq_unmask
= gic_unmask_irq
,
493 .irq_set_type
= gic_set_type
,
495 .irq_set_affinity
= gic_set_affinity
,
499 static struct irq_chip gic_edge_irq_controller
= {
501 .irq_ack
= gic_ack_irq
,
502 .irq_mask
= gic_mask_irq
,
503 .irq_unmask
= gic_unmask_irq
,
504 .irq_set_type
= gic_set_type
,
506 .irq_set_affinity
= gic_set_affinity
,
508 .ipi_send_single
= gic_send_ipi
,
511 static void gic_handle_local_int(bool chained
)
513 unsigned long pending
, masked
;
514 unsigned int intr
, virq
;
516 pending
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_PEND
));
517 masked
= gic_read32(GIC_REG(VPE_LOCAL
, GIC_VPE_MASK
));
519 bitmap_and(&pending
, &pending
, &masked
, GIC_NUM_LOCAL_INTRS
);
521 intr
= find_first_bit(&pending
, GIC_NUM_LOCAL_INTRS
);
522 while (intr
!= GIC_NUM_LOCAL_INTRS
) {
523 virq
= irq_linear_revmap(gic_irq_domain
,
524 GIC_LOCAL_TO_HWIRQ(intr
));
526 generic_handle_irq(virq
);
530 /* go to next pending bit */
531 bitmap_clear(&pending
, intr
, 1);
532 intr
= find_first_bit(&pending
, GIC_NUM_LOCAL_INTRS
);
536 static void gic_mask_local_irq(struct irq_data
*d
)
538 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
540 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_RMASK
), 1 << intr
);
543 static void gic_unmask_local_irq(struct irq_data
*d
)
545 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
547 gic_write32(GIC_REG(VPE_LOCAL
, GIC_VPE_SMASK
), 1 << intr
);
550 static struct irq_chip gic_local_irq_controller
= {
551 .name
= "MIPS GIC Local",
552 .irq_mask
= gic_mask_local_irq
,
553 .irq_unmask
= gic_unmask_local_irq
,
556 static void gic_mask_local_irq_all_vpes(struct irq_data
*d
)
558 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
562 spin_lock_irqsave(&gic_lock
, flags
);
563 for (i
= 0; i
< gic_vpes
; i
++) {
564 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
),
566 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_RMASK
), 1 << intr
);
568 spin_unlock_irqrestore(&gic_lock
, flags
);
571 static void gic_unmask_local_irq_all_vpes(struct irq_data
*d
)
573 int intr
= GIC_HWIRQ_TO_LOCAL(d
->hwirq
);
577 spin_lock_irqsave(&gic_lock
, flags
);
578 for (i
= 0; i
< gic_vpes
; i
++) {
579 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
),
581 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_SMASK
), 1 << intr
);
583 spin_unlock_irqrestore(&gic_lock
, flags
);
586 static struct irq_chip gic_all_vpes_local_irq_controller
= {
587 .name
= "MIPS GIC Local",
588 .irq_mask
= gic_mask_local_irq_all_vpes
,
589 .irq_unmask
= gic_unmask_local_irq_all_vpes
,
592 static void __gic_irq_dispatch(void)
594 gic_handle_local_int(false);
595 gic_handle_shared_int(false);
598 static void gic_irq_dispatch(struct irq_desc
*desc
)
600 gic_handle_local_int(true);
601 gic_handle_shared_int(true);
604 static void __init
gic_basic_init(void)
608 board_bind_eic_interrupt
= &gic_bind_eic_interrupt
;
611 for (i
= 0; i
< gic_shared_intrs
; i
++) {
612 gic_set_polarity(i
, GIC_POL_POS
);
613 gic_set_trigger(i
, GIC_TRIG_LEVEL
);
617 for (i
= 0; i
< gic_vpes
; i
++) {
620 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
),
622 for (j
= 0; j
< GIC_NUM_LOCAL_INTRS
; j
++) {
623 if (!gic_local_irq_is_routable(j
))
625 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_RMASK
), 1 << j
);
630 static int gic_local_irq_domain_map(struct irq_domain
*d
, unsigned int virq
,
633 int intr
= GIC_HWIRQ_TO_LOCAL(hw
);
638 if (!gic_local_irq_is_routable(intr
))
642 * HACK: These are all really percpu interrupts, but the rest
643 * of the MIPS kernel code does not use the percpu IRQ API for
644 * the CP0 timer and performance counter interrupts.
647 case GIC_LOCAL_INT_TIMER
:
648 case GIC_LOCAL_INT_PERFCTR
:
649 case GIC_LOCAL_INT_FDC
:
650 irq_set_chip_and_handler(virq
,
651 &gic_all_vpes_local_irq_controller
,
655 irq_set_chip_and_handler(virq
,
656 &gic_local_irq_controller
,
657 handle_percpu_devid_irq
);
658 irq_set_percpu_devid(virq
);
662 spin_lock_irqsave(&gic_lock
, flags
);
663 for (i
= 0; i
< gic_vpes
; i
++) {
664 u32 val
= GIC_MAP_TO_PIN_MSK
| gic_cpu_pin
;
666 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
),
670 case GIC_LOCAL_INT_WD
:
671 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_WD_MAP
), val
);
673 case GIC_LOCAL_INT_COMPARE
:
674 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_COMPARE_MAP
),
677 case GIC_LOCAL_INT_TIMER
:
678 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
679 val
= GIC_MAP_TO_PIN_MSK
| timer_cpu_pin
;
680 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_TIMER_MAP
),
683 case GIC_LOCAL_INT_PERFCTR
:
684 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_PERFCTR_MAP
),
687 case GIC_LOCAL_INT_SWINT0
:
688 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_SWINT0_MAP
),
691 case GIC_LOCAL_INT_SWINT1
:
692 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_SWINT1_MAP
),
695 case GIC_LOCAL_INT_FDC
:
696 gic_write32(GIC_REG(VPE_OTHER
, GIC_VPE_FDC_MAP
), val
);
699 pr_err("Invalid local IRQ %d\n", intr
);
704 spin_unlock_irqrestore(&gic_lock
, flags
);
709 static int gic_shared_irq_domain_map(struct irq_domain
*d
, unsigned int virq
,
710 irq_hw_number_t hw
, unsigned int vpe
)
712 int intr
= GIC_HWIRQ_TO_SHARED(hw
);
716 spin_lock_irqsave(&gic_lock
, flags
);
717 gic_map_to_pin(intr
, gic_cpu_pin
);
718 gic_map_to_vpe(intr
, mips_cm_vp_id(vpe
));
719 for (i
= 0; i
< min(gic_vpes
, NR_CPUS
); i
++)
720 clear_bit(intr
, pcpu_masks
[i
].pcpu_mask
);
721 set_bit(intr
, pcpu_masks
[vpe
].pcpu_mask
);
722 spin_unlock_irqrestore(&gic_lock
, flags
);
727 static int gic_irq_domain_map(struct irq_domain
*d
, unsigned int virq
,
730 if (GIC_HWIRQ_TO_LOCAL(hw
) < GIC_NUM_LOCAL_INTRS
)
731 return gic_local_irq_domain_map(d
, virq
, hw
);
733 irq_set_chip_and_handler(virq
, &gic_level_irq_controller
,
736 return gic_shared_irq_domain_map(d
, virq
, hw
, 0);
739 static int gic_irq_domain_alloc(struct irq_domain
*d
, unsigned int virq
,
740 unsigned int nr_irqs
, void *arg
)
742 struct gic_irq_spec
*spec
= arg
;
743 irq_hw_number_t hwirq
, base_hwirq
;
746 if (spec
->type
== GIC_DEVICE
) {
747 /* verify that it doesn't conflict with an IPI irq */
748 if (test_bit(spec
->hwirq
, ipi_resrv
))
751 hwirq
= GIC_SHARED_TO_HWIRQ(spec
->hwirq
);
753 return irq_domain_set_hwirq_and_chip(d
, virq
, hwirq
,
754 &gic_level_irq_controller
,
757 base_hwirq
= find_first_bit(ipi_resrv
, gic_shared_intrs
);
758 if (base_hwirq
== gic_shared_intrs
) {
762 /* check that we have enough space */
763 for (i
= base_hwirq
; i
< nr_irqs
; i
++) {
764 if (!test_bit(i
, ipi_resrv
))
767 bitmap_clear(ipi_resrv
, base_hwirq
, nr_irqs
);
769 /* map the hwirq for each cpu consecutively */
771 for_each_cpu(cpu
, spec
->ipimask
) {
772 hwirq
= GIC_SHARED_TO_HWIRQ(base_hwirq
+ i
);
774 ret
= irq_domain_set_hwirq_and_chip(d
, virq
+ i
, hwirq
,
775 &gic_level_irq_controller
,
780 irq_set_handler(virq
+ i
, handle_level_irq
);
782 ret
= gic_shared_irq_domain_map(d
, virq
+ i
, hwirq
, cpu
);
790 * tell the parent about the base hwirq we allocated so it can
791 * set its own domain data
793 spec
->hwirq
= base_hwirq
;
798 bitmap_set(ipi_resrv
, base_hwirq
, nr_irqs
);
802 void gic_irq_domain_free(struct irq_domain
*d
, unsigned int virq
,
803 unsigned int nr_irqs
)
805 irq_hw_number_t base_hwirq
;
806 struct irq_data
*data
;
808 data
= irq_get_irq_data(virq
);
812 base_hwirq
= GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data
));
813 bitmap_set(ipi_resrv
, base_hwirq
, nr_irqs
);
816 int gic_irq_domain_match(struct irq_domain
*d
, struct device_node
*node
,
817 enum irq_domain_bus_token bus_token
)
819 /* this domain should'nt be accessed directly */
823 static const struct irq_domain_ops gic_irq_domain_ops
= {
824 .map
= gic_irq_domain_map
,
825 .alloc
= gic_irq_domain_alloc
,
826 .free
= gic_irq_domain_free
,
827 .match
= gic_irq_domain_match
,
830 static int gic_dev_domain_xlate(struct irq_domain
*d
, struct device_node
*ctrlr
,
831 const u32
*intspec
, unsigned int intsize
,
832 irq_hw_number_t
*out_hwirq
,
833 unsigned int *out_type
)
838 if (intspec
[0] == GIC_SHARED
)
839 *out_hwirq
= GIC_SHARED_TO_HWIRQ(intspec
[1]);
840 else if (intspec
[0] == GIC_LOCAL
)
841 *out_hwirq
= GIC_LOCAL_TO_HWIRQ(intspec
[1]);
844 *out_type
= intspec
[2] & IRQ_TYPE_SENSE_MASK
;
849 static int gic_dev_domain_alloc(struct irq_domain
*d
, unsigned int virq
,
850 unsigned int nr_irqs
, void *arg
)
852 struct irq_fwspec
*fwspec
= arg
;
853 struct gic_irq_spec spec
= {
855 .hwirq
= fwspec
->param
[1],
858 bool is_shared
= fwspec
->param
[0] == GIC_SHARED
;
861 ret
= irq_domain_alloc_irqs_parent(d
, virq
, nr_irqs
, &spec
);
866 for (i
= 0; i
< nr_irqs
; i
++) {
867 irq_hw_number_t hwirq
;
870 hwirq
= GIC_SHARED_TO_HWIRQ(spec
.hwirq
+ i
);
872 hwirq
= GIC_LOCAL_TO_HWIRQ(spec
.hwirq
+ i
);
874 ret
= irq_domain_set_hwirq_and_chip(d
, virq
+ i
,
876 &gic_level_irq_controller
,
885 irq_domain_free_irqs_parent(d
, virq
, nr_irqs
);
889 void gic_dev_domain_free(struct irq_domain
*d
, unsigned int virq
,
890 unsigned int nr_irqs
)
892 /* no real allocation is done for dev irqs, so no need to free anything */
896 static void gic_dev_domain_activate(struct irq_domain
*domain
,
899 gic_shared_irq_domain_map(domain
, d
->irq
, d
->hwirq
, 0);
902 static struct irq_domain_ops gic_dev_domain_ops
= {
903 .xlate
= gic_dev_domain_xlate
,
904 .alloc
= gic_dev_domain_alloc
,
905 .free
= gic_dev_domain_free
,
906 .activate
= gic_dev_domain_activate
,
909 static int gic_ipi_domain_xlate(struct irq_domain
*d
, struct device_node
*ctrlr
,
910 const u32
*intspec
, unsigned int intsize
,
911 irq_hw_number_t
*out_hwirq
,
912 unsigned int *out_type
)
915 * There's nothing to translate here. hwirq is dynamically allocated and
916 * the irq type is always edge triggered.
919 *out_type
= IRQ_TYPE_EDGE_RISING
;
924 static int gic_ipi_domain_alloc(struct irq_domain
*d
, unsigned int virq
,
925 unsigned int nr_irqs
, void *arg
)
927 struct cpumask
*ipimask
= arg
;
928 struct gic_irq_spec spec
= {
934 ret
= irq_domain_alloc_irqs_parent(d
, virq
, nr_irqs
, &spec
);
938 /* the parent should have set spec.hwirq to the base_hwirq it allocated */
939 for (i
= 0; i
< nr_irqs
; i
++) {
940 ret
= irq_domain_set_hwirq_and_chip(d
, virq
+ i
,
941 GIC_SHARED_TO_HWIRQ(spec
.hwirq
+ i
),
942 &gic_edge_irq_controller
,
947 ret
= irq_set_irq_type(virq
+ i
, IRQ_TYPE_EDGE_RISING
);
954 irq_domain_free_irqs_parent(d
, virq
, nr_irqs
);
958 void gic_ipi_domain_free(struct irq_domain
*d
, unsigned int virq
,
959 unsigned int nr_irqs
)
961 irq_domain_free_irqs_parent(d
, virq
, nr_irqs
);
964 int gic_ipi_domain_match(struct irq_domain
*d
, struct device_node
*node
,
965 enum irq_domain_bus_token bus_token
)
971 is_ipi
= d
->bus_token
== bus_token
;
972 return (!node
|| to_of_node(d
->fwnode
) == node
) && is_ipi
;
979 static struct irq_domain_ops gic_ipi_domain_ops
= {
980 .xlate
= gic_ipi_domain_xlate
,
981 .alloc
= gic_ipi_domain_alloc
,
982 .free
= gic_ipi_domain_free
,
983 .match
= gic_ipi_domain_match
,
986 static void __init
__gic_init(unsigned long gic_base_addr
,
987 unsigned long gic_addrspace_size
,
988 unsigned int cpu_vec
, unsigned int irqbase
,
989 struct device_node
*node
)
991 unsigned int gicconfig
, cpu
;
994 __gic_base_addr
= gic_base_addr
;
996 gic_base
= ioremap_nocache(gic_base_addr
, gic_addrspace_size
);
998 gicconfig
= gic_read(GIC_REG(SHARED
, GIC_SH_CONFIG
));
999 gic_shared_intrs
= (gicconfig
& GIC_SH_CONFIG_NUMINTRS_MSK
) >>
1000 GIC_SH_CONFIG_NUMINTRS_SHF
;
1001 gic_shared_intrs
= ((gic_shared_intrs
+ 1) * 8);
1003 gic_vpes
= (gicconfig
& GIC_SH_CONFIG_NUMVPES_MSK
) >>
1004 GIC_SH_CONFIG_NUMVPES_SHF
;
1005 gic_vpes
= gic_vpes
+ 1;
1008 /* Set EIC mode for all VPEs */
1009 for_each_present_cpu(cpu
) {
1010 gic_write(GIC_REG(VPE_LOCAL
, GIC_VPE_OTHER_ADDR
),
1011 mips_cm_vp_id(cpu
));
1012 gic_write(GIC_REG(VPE_OTHER
, GIC_VPE_CTL
),
1013 GIC_VPE_CTL_EIC_MODE_MSK
);
1016 /* Always use vector 1 in EIC mode */
1018 timer_cpu_pin
= gic_cpu_pin
;
1019 set_vi_handler(gic_cpu_pin
+ GIC_PIN_TO_VEC_OFFSET
,
1020 __gic_irq_dispatch
);
1022 gic_cpu_pin
= cpu_vec
- GIC_CPU_PIN_OFFSET
;
1023 irq_set_chained_handler(MIPS_CPU_IRQ_BASE
+ cpu_vec
,
1026 * With the CMP implementation of SMP (deprecated), other CPUs
1027 * are started by the bootloader and put into a timer based
1028 * waiting poll loop. We must not re-route those CPU's local
1029 * timer interrupts as the wait instruction will never finish,
1030 * so just handle whatever CPU interrupt it is routed to by
1033 * This workaround should be removed when CMP support is
1036 if (IS_ENABLED(CONFIG_MIPS_CMP
) &&
1037 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER
)) {
1038 timer_cpu_pin
= gic_read32(GIC_REG(VPE_LOCAL
,
1039 GIC_VPE_TIMER_MAP
)) &
1041 irq_set_chained_handler(MIPS_CPU_IRQ_BASE
+
1042 GIC_CPU_PIN_OFFSET
+
1046 timer_cpu_pin
= gic_cpu_pin
;
1050 gic_irq_domain
= irq_domain_add_simple(node
, GIC_NUM_LOCAL_INTRS
+
1051 gic_shared_intrs
, irqbase
,
1052 &gic_irq_domain_ops
, NULL
);
1053 if (!gic_irq_domain
)
1054 panic("Failed to add GIC IRQ domain");
1055 gic_irq_domain
->name
= "mips-gic-irq";
1057 gic_dev_domain
= irq_domain_add_hierarchy(gic_irq_domain
, 0,
1058 GIC_NUM_LOCAL_INTRS
+ gic_shared_intrs
,
1059 node
, &gic_dev_domain_ops
, NULL
);
1060 if (!gic_dev_domain
)
1061 panic("Failed to add GIC DEV domain");
1062 gic_dev_domain
->name
= "mips-gic-dev";
1064 gic_ipi_domain
= irq_domain_add_hierarchy(gic_irq_domain
,
1065 IRQ_DOMAIN_FLAG_IPI_PER_CPU
,
1066 GIC_NUM_LOCAL_INTRS
+ gic_shared_intrs
,
1067 node
, &gic_ipi_domain_ops
, NULL
);
1068 if (!gic_ipi_domain
)
1069 panic("Failed to add GIC IPI domain");
1071 gic_ipi_domain
->name
= "mips-gic-ipi";
1072 gic_ipi_domain
->bus_token
= DOMAIN_BUS_IPI
;
1075 !of_property_read_u32_array(node
, "mti,reserved-ipi-vectors", v
, 2)) {
1076 bitmap_set(ipi_resrv
, v
[0], v
[1]);
1078 /* Make the last 2 * gic_vpes available for IPIs */
1079 bitmap_set(ipi_resrv
,
1080 gic_shared_intrs
- 2 * gic_vpes
,
1087 void __init
gic_init(unsigned long gic_base_addr
,
1088 unsigned long gic_addrspace_size
,
1089 unsigned int cpu_vec
, unsigned int irqbase
)
1091 __gic_init(gic_base_addr
, gic_addrspace_size
, cpu_vec
, irqbase
, NULL
);
1094 static int __init
gic_of_init(struct device_node
*node
,
1095 struct device_node
*parent
)
1097 struct resource res
;
1098 unsigned int cpu_vec
, i
= 0, reserved
= 0;
1099 phys_addr_t gic_base
;
1102 /* Find the first available CPU vector. */
1103 while (!of_property_read_u32_index(node
, "mti,reserved-cpu-vectors",
1105 reserved
|= BIT(cpu_vec
);
1106 for (cpu_vec
= 2; cpu_vec
< 8; cpu_vec
++) {
1107 if (!(reserved
& BIT(cpu_vec
)))
1111 pr_err("No CPU vectors available for GIC\n");
1115 if (of_address_to_resource(node
, 0, &res
)) {
1117 * Probe the CM for the GIC base address if not specified
1118 * in the device-tree.
1120 if (mips_cm_present()) {
1121 gic_base
= read_gcr_gic_base() &
1122 ~CM_GCR_GIC_BASE_GICEN_MSK
;
1125 pr_err("Failed to get GIC memory range\n");
1129 gic_base
= res
.start
;
1130 gic_len
= resource_size(&res
);
1133 if (mips_cm_present())
1134 write_gcr_gic_base(gic_base
| CM_GCR_GIC_BASE_GICEN_MSK
);
1137 __gic_init(gic_base
, gic_len
, cpu_vec
, 0, node
);
1141 IRQCHIP_DECLARE(mips_gic
, "mti,gic", gic_of_init
);