2 * arch/powerpc/platforms/pseries/xics.c
4 * Copyright 2000 IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
14 #include <linux/types.h>
15 #include <linux/threads.h>
16 #include <linux/kernel.h>
17 #include <linux/irq.h>
18 #include <linux/smp.h>
19 #include <linux/interrupt.h>
20 #include <linux/signal.h>
21 #include <linux/init.h>
22 #include <linux/gfp.h>
23 #include <linux/radix-tree.h>
24 #include <linux/cpu.h>
26 #include <asm/firmware.h>
29 #include <asm/pgtable.h>
32 #include <asm/hvcall.h>
33 #include <asm/machdep.h>
34 #include <asm/i8259.h>
37 #include "plpar_wrappers.h"
40 #define XICS_IRQ_SPURIOUS 0
42 /* Want a priority other than 0. Various HW issues require this. */
43 #define DEFAULT_PRIORITY 5
46 * Mark IPIs as higher priority so we can take them inside interrupts that
47 * arent marked IRQF_DISABLED
49 #define IPI_PRIORITY 4
67 static struct xics_ipl __iomem
*xics_per_cpu
[NR_CPUS
];
69 static unsigned int default_server
= 0xFF;
70 static unsigned int default_distrib_server
= 0;
71 static unsigned int interrupt_server_size
= 8;
73 static struct irq_host
*xics_host
;
76 * XICS only has a single IPI, so encode the messages per CPU
78 struct xics_ipi_struct xics_ipi_message
[NR_CPUS
] __cacheline_aligned
;
80 /* RTAS service tokens */
81 static int ibm_get_xive
;
82 static int ibm_set_xive
;
83 static int ibm_int_on
;
84 static int ibm_int_off
;
87 /* Direct HW low level accessors */
90 static inline unsigned int direct_xirr_info_get(int n_cpu
)
92 return in_be32(&xics_per_cpu
[n_cpu
]->xirr
.word
);
95 static inline void direct_xirr_info_set(int n_cpu
, int value
)
97 out_be32(&xics_per_cpu
[n_cpu
]->xirr
.word
, value
);
100 static inline void direct_cppr_info(int n_cpu
, u8 value
)
102 out_8(&xics_per_cpu
[n_cpu
]->xirr
.bytes
[0], value
);
105 static inline void direct_qirr_info(int n_cpu
, u8 value
)
107 out_8(&xics_per_cpu
[n_cpu
]->qirr
.bytes
[0], value
);
111 /* LPAR low level accessors */
114 static inline unsigned int lpar_xirr_info_get(int n_cpu
)
116 unsigned long lpar_rc
;
117 unsigned long return_value
;
119 lpar_rc
= plpar_xirr(&return_value
);
120 if (lpar_rc
!= H_SUCCESS
)
121 panic(" bad return code xirr - rc = %lx \n", lpar_rc
);
122 return (unsigned int)return_value
;
125 static inline void lpar_xirr_info_set(int n_cpu
, int value
)
127 unsigned long lpar_rc
;
128 unsigned long val64
= value
& 0xffffffff;
130 lpar_rc
= plpar_eoi(val64
);
131 if (lpar_rc
!= H_SUCCESS
)
132 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc
,
136 static inline void lpar_cppr_info(int n_cpu
, u8 value
)
138 unsigned long lpar_rc
;
140 lpar_rc
= plpar_cppr(value
);
141 if (lpar_rc
!= H_SUCCESS
)
142 panic("bad return code cppr - rc = %lx\n", lpar_rc
);
145 static inline void lpar_qirr_info(int n_cpu
, u8 value
)
147 unsigned long lpar_rc
;
149 lpar_rc
= plpar_ipi(get_hard_smp_processor_id(n_cpu
), value
);
150 if (lpar_rc
!= H_SUCCESS
)
151 panic("bad return code qirr - rc = %lx\n", lpar_rc
);
155 /* High level handlers and init code */
159 static int get_irq_server(unsigned int virq
)
162 /* For the moment only implement delivery to all cpus or one cpu */
163 cpumask_t cpumask
= irq_desc
[virq
].affinity
;
164 cpumask_t tmp
= CPU_MASK_NONE
;
166 if (!distribute_irqs
)
167 return default_server
;
169 if (cpus_equal(cpumask
, CPU_MASK_ALL
)) {
170 server
= default_distrib_server
;
172 cpus_and(tmp
, cpu_online_map
, cpumask
);
175 server
= default_distrib_server
;
177 server
= get_hard_smp_processor_id(first_cpu(tmp
));
184 static int get_irq_server(unsigned int virq
)
186 return default_server
;
191 static void xics_unmask_irq(unsigned int virq
)
197 pr_debug("xics: unmask virq %d\n", virq
);
199 irq
= (unsigned int)irq_map
[virq
].hwirq
;
200 pr_debug(" -> map to hwirq 0x%x\n", irq
);
201 if (irq
== XICS_IPI
|| irq
== XICS_IRQ_SPURIOUS
)
204 server
= get_irq_server(virq
);
206 call_status
= rtas_call(ibm_set_xive
, 3, 1, NULL
, irq
, server
,
208 if (call_status
!= 0) {
209 printk(KERN_ERR
"xics_enable_irq: irq=%u: ibm_set_xive "
210 "returned %d\n", irq
, call_status
);
211 printk("set_xive %x, server %x\n", ibm_set_xive
, server
);
215 /* Now unmask the interrupt (often a no-op) */
216 call_status
= rtas_call(ibm_int_on
, 1, 1, NULL
, irq
);
217 if (call_status
!= 0) {
218 printk(KERN_ERR
"xics_enable_irq: irq=%u: ibm_int_on "
219 "returned %d\n", irq
, call_status
);
224 static void xics_mask_real_irq(unsigned int irq
)
232 call_status
= rtas_call(ibm_int_off
, 1, 1, NULL
, irq
);
233 if (call_status
!= 0) {
234 printk(KERN_ERR
"xics_disable_real_irq: irq=%u: "
235 "ibm_int_off returned %d\n", irq
, call_status
);
239 server
= get_irq_server(irq
);
240 /* Have to set XIVE to 0xff to be able to remove a slot */
241 call_status
= rtas_call(ibm_set_xive
, 3, 1, NULL
, irq
, server
, 0xff);
242 if (call_status
!= 0) {
243 printk(KERN_ERR
"xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
244 " returned %d\n", irq
, call_status
);
249 static void xics_mask_irq(unsigned int virq
)
253 pr_debug("xics: mask virq %d\n", virq
);
255 irq
= (unsigned int)irq_map
[virq
].hwirq
;
256 if (irq
== XICS_IPI
|| irq
== XICS_IRQ_SPURIOUS
)
258 xics_mask_real_irq(irq
);
261 static unsigned int xics_startup(unsigned int virq
)
265 /* force a reverse mapping of the interrupt so it gets in the cache */
266 irq
= (unsigned int)irq_map
[virq
].hwirq
;
267 irq_radix_revmap(xics_host
, irq
);
270 xics_unmask_irq(virq
);
274 static void xics_eoi_direct(unsigned int virq
)
276 int cpu
= smp_processor_id();
277 unsigned int irq
= (unsigned int)irq_map
[virq
].hwirq
;
280 direct_xirr_info_set(cpu
, (0xff << 24) | irq
);
284 static void xics_eoi_lpar(unsigned int virq
)
286 int cpu
= smp_processor_id();
287 unsigned int irq
= (unsigned int)irq_map
[virq
].hwirq
;
290 lpar_xirr_info_set(cpu
, (0xff << 24) | irq
);
293 static inline unsigned int xics_remap_irq(unsigned int vec
)
299 if (vec
== XICS_IRQ_SPURIOUS
)
301 irq
= irq_radix_revmap(xics_host
, vec
);
302 if (likely(irq
!= NO_IRQ
))
305 printk(KERN_ERR
"Interrupt %u (real) is invalid,"
306 " disabling it.\n", vec
);
307 xics_mask_real_irq(vec
);
311 static unsigned int xics_get_irq_direct(struct pt_regs
*regs
)
313 unsigned int cpu
= smp_processor_id();
315 return xics_remap_irq(direct_xirr_info_get(cpu
));
318 static unsigned int xics_get_irq_lpar(struct pt_regs
*regs
)
320 unsigned int cpu
= smp_processor_id();
322 return xics_remap_irq(lpar_xirr_info_get(cpu
));
327 static irqreturn_t
xics_ipi_dispatch(int cpu
, struct pt_regs
*regs
)
329 WARN_ON(cpu_is_offline(cpu
));
331 while (xics_ipi_message
[cpu
].value
) {
332 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION
,
333 &xics_ipi_message
[cpu
].value
)) {
335 smp_message_recv(PPC_MSG_CALL_FUNCTION
, regs
);
337 if (test_and_clear_bit(PPC_MSG_RESCHEDULE
,
338 &xics_ipi_message
[cpu
].value
)) {
340 smp_message_recv(PPC_MSG_RESCHEDULE
, regs
);
343 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK
,
344 &xics_ipi_message
[cpu
].value
)) {
346 smp_message_recv(PPC_MSG_MIGRATE_TASK
, regs
);
349 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
350 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK
,
351 &xics_ipi_message
[cpu
].value
)) {
353 smp_message_recv(PPC_MSG_DEBUGGER_BREAK
, regs
);
360 static irqreturn_t
xics_ipi_action_direct(int irq
, void *dev_id
, struct pt_regs
*regs
)
362 int cpu
= smp_processor_id();
364 direct_qirr_info(cpu
, 0xff);
366 return xics_ipi_dispatch(cpu
, regs
);
369 static irqreturn_t
xics_ipi_action_lpar(int irq
, void *dev_id
, struct pt_regs
*regs
)
371 int cpu
= smp_processor_id();
373 lpar_qirr_info(cpu
, 0xff);
375 return xics_ipi_dispatch(cpu
, regs
);
378 void xics_cause_IPI(int cpu
)
380 if (firmware_has_feature(FW_FEATURE_LPAR
))
381 lpar_qirr_info(cpu
, IPI_PRIORITY
);
383 direct_qirr_info(cpu
, IPI_PRIORITY
);
386 #endif /* CONFIG_SMP */
388 static void xics_set_cpu_priority(int cpu
, unsigned char cppr
)
390 if (firmware_has_feature(FW_FEATURE_LPAR
))
391 lpar_cppr_info(cpu
, cppr
);
393 direct_cppr_info(cpu
, cppr
);
397 static void xics_set_affinity(unsigned int virq
, cpumask_t cpumask
)
402 unsigned long newmask
;
403 cpumask_t tmp
= CPU_MASK_NONE
;
405 irq
= (unsigned int)irq_map
[virq
].hwirq
;
406 if (irq
== XICS_IPI
|| irq
== XICS_IRQ_SPURIOUS
)
409 status
= rtas_call(ibm_get_xive
, 1, 3, xics_status
, irq
);
412 printk(KERN_ERR
"xics_set_affinity: irq=%u ibm,get-xive "
413 "returns %d\n", irq
, status
);
417 /* For the moment only implement delivery to all cpus or one cpu */
418 if (cpus_equal(cpumask
, CPU_MASK_ALL
)) {
419 newmask
= default_distrib_server
;
421 cpus_and(tmp
, cpu_online_map
, cpumask
);
424 newmask
= get_hard_smp_processor_id(first_cpu(tmp
));
427 status
= rtas_call(ibm_set_xive
, 3, 1, NULL
,
428 irq
, newmask
, xics_status
[1]);
431 printk(KERN_ERR
"xics_set_affinity: irq=%u ibm,set-xive "
432 "returns %d\n", irq
, status
);
437 void xics_setup_cpu(void)
439 int cpu
= smp_processor_id();
441 xics_set_cpu_priority(cpu
, 0xff);
444 * Put the calling processor into the GIQ. This is really only
445 * necessary from a secondary thread as the OF start-cpu interface
446 * performs this function for us on primary threads.
448 * XXX: undo of teardown on kexec needs this too, as may hotplug
450 rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE
,
451 (1UL << interrupt_server_size
) - 1 - default_distrib_server
, 1);
455 static struct irq_chip xics_pic_direct
= {
456 .typename
= " XICS ",
457 .startup
= xics_startup
,
458 .mask
= xics_mask_irq
,
459 .unmask
= xics_unmask_irq
,
460 .eoi
= xics_eoi_direct
,
461 .set_affinity
= xics_set_affinity
465 static struct irq_chip xics_pic_lpar
= {
466 .typename
= " XICS ",
467 .startup
= xics_startup
,
468 .mask
= xics_mask_irq
,
469 .unmask
= xics_unmask_irq
,
470 .eoi
= xics_eoi_lpar
,
471 .set_affinity
= xics_set_affinity
475 static int xics_host_match(struct irq_host
*h
, struct device_node
*node
)
477 /* IBM machines have interrupt parents of various funky types for things
478 * like vdevices, events, etc... The trick we use here is to match
479 * everything here except the legacy 8259 which is compatible "chrp,iic"
481 return !device_is_compatible(node
, "chrp,iic");
484 static int xics_host_map_direct(struct irq_host
*h
, unsigned int virq
,
487 pr_debug("xics: map_direct virq %d, hwirq 0x%lx\n", virq
, hw
);
489 get_irq_desc(virq
)->status
|= IRQ_LEVEL
;
490 set_irq_chip_and_handler(virq
, &xics_pic_direct
, handle_fasteoi_irq
);
494 static int xics_host_map_lpar(struct irq_host
*h
, unsigned int virq
,
497 pr_debug("xics: map_direct virq %d, hwirq 0x%lx\n", virq
, hw
);
499 get_irq_desc(virq
)->status
|= IRQ_LEVEL
;
500 set_irq_chip_and_handler(virq
, &xics_pic_lpar
, handle_fasteoi_irq
);
504 static int xics_host_xlate(struct irq_host
*h
, struct device_node
*ct
,
505 u32
*intspec
, unsigned int intsize
,
506 irq_hw_number_t
*out_hwirq
, unsigned int *out_flags
)
509 /* Current xics implementation translates everything
510 * to level. It is not technically right for MSIs but this
511 * is irrelevant at this point. We might get smarter in the future
513 *out_hwirq
= intspec
[0];
514 *out_flags
= IRQ_TYPE_LEVEL_LOW
;
519 static struct irq_host_ops xics_host_direct_ops
= {
520 .match
= xics_host_match
,
521 .map
= xics_host_map_direct
,
522 .xlate
= xics_host_xlate
,
525 static struct irq_host_ops xics_host_lpar_ops
= {
526 .match
= xics_host_match
,
527 .map
= xics_host_map_lpar
,
528 .xlate
= xics_host_xlate
,
531 static void __init
xics_init_host(void)
533 struct irq_host_ops
*ops
;
535 if (firmware_has_feature(FW_FEATURE_LPAR
))
536 ops
= &xics_host_lpar_ops
;
538 ops
= &xics_host_direct_ops
;
539 xics_host
= irq_alloc_host(IRQ_HOST_MAP_TREE
, 0, ops
,
541 BUG_ON(xics_host
== NULL
);
542 irq_set_default_host(xics_host
);
545 static void __init
xics_map_one_cpu(int hw_id
, unsigned long addr
,
551 /* This may look gross but it's good enough for now, we don't quite
552 * have a hard -> linux processor id matching.
554 for_each_possible_cpu(i
) {
557 if (hw_id
== get_hard_smp_processor_id(i
)) {
558 xics_per_cpu
[i
] = ioremap(addr
, size
);
565 xics_per_cpu
[0] = ioremap(addr
, size
);
566 #endif /* CONFIG_SMP */
569 static void __init
xics_init_one_node(struct device_node
*np
,
575 /* This code does the theorically broken assumption that the interrupt
576 * server numbers are the same as the hard CPU numbers.
577 * This happens to be the case so far but we are playing with fire...
578 * should be fixed one of these days. -BenH.
580 ireg
= get_property(np
, "ibm,interrupt-server-ranges", NULL
);
582 /* Do that ever happen ? we'll know soon enough... but even good'old
583 * f80 does have that property ..
585 WARN_ON(ireg
== NULL
);
588 * set node starting index for this node
592 ireg
= get_property(np
, "reg", &ilen
);
594 panic("xics_init_IRQ: can't find interrupt reg property");
596 while (ilen
>= (4 * sizeof(u32
))) {
597 unsigned long addr
, size
;
599 /* XXX Use proper OF parsing code here !!! */
600 addr
= (unsigned long)*ireg
++ << 32;
604 size
= (unsigned long)*ireg
++ << 32;
608 xics_map_one_cpu(*indx
, addr
, size
);
614 static void __init
xics_setup_8259_cascade(void)
616 struct device_node
*np
, *old
, *found
= NULL
;
619 unsigned long intack
= 0;
621 for_each_node_by_type(np
, "interrupt-controller")
622 if (device_is_compatible(np
, "chrp,iic")) {
627 printk(KERN_DEBUG
"xics: no ISA interrupt controller\n");
630 cascade
= irq_of_parse_and_map(found
, 0);
631 if (cascade
== NO_IRQ
) {
632 printk(KERN_ERR
"xics: failed to map cascade interrupt");
635 pr_debug("xics: cascade mapped to irq %d\n", cascade
);
637 for (old
= of_node_get(found
); old
!= NULL
; old
= np
) {
638 np
= of_get_parent(old
);
642 if (strcmp(np
->name
, "pci") != 0)
644 addrp
= get_property(np
, "8259-interrupt-acknowledge", NULL
);
647 naddr
= prom_n_addr_cells(np
);
648 intack
= addrp
[naddr
-1];
650 intack
|= ((unsigned long)addrp
[naddr
-2]) << 32;
653 printk(KERN_DEBUG
"xics: PCI 8259 intack at 0x%016lx\n", intack
);
654 i8259_init(found
, intack
);
656 set_irq_chained_handler(cascade
, pseries_8259_cascade
);
659 void __init
xics_init_IRQ(void)
662 struct device_node
*np
;
667 ppc64_boot_msg(0x20, "XICS Init");
669 ibm_get_xive
= rtas_token("ibm,get-xive");
670 ibm_set_xive
= rtas_token("ibm,set-xive");
671 ibm_int_on
= rtas_token("ibm,int-on");
672 ibm_int_off
= rtas_token("ibm,int-off");
674 for_each_node_by_type(np
, "PowerPC-External-Interrupt-Presentation") {
676 if (firmware_has_feature(FW_FEATURE_LPAR
))
678 xics_init_one_node(np
, &indx
);
685 /* Find the server numbers for the boot cpu. */
686 for (np
= of_find_node_by_type(NULL
, "cpu");
688 np
= of_find_node_by_type(np
, "cpu")) {
689 ireg
= get_property(np
, "reg", &ilen
);
690 if (ireg
&& ireg
[0] == get_hard_smp_processor_id(boot_cpuid
)) {
691 ireg
= get_property(np
,
692 "ibm,ppc-interrupt-gserver#s", &ilen
);
693 i
= ilen
/ sizeof(int);
695 default_server
= ireg
[0];
696 /* take last element */
697 default_distrib_server
= ireg
[i
-1];
699 ireg
= get_property(np
,
700 "ibm,interrupt-server#-size", NULL
);
702 interrupt_server_size
= *ireg
;
708 if (firmware_has_feature(FW_FEATURE_LPAR
))
709 ppc_md
.get_irq
= xics_get_irq_lpar
;
711 ppc_md
.get_irq
= xics_get_irq_direct
;
715 xics_setup_8259_cascade();
717 ppc64_boot_msg(0x21, "XICS Done");
722 void xics_request_IPIs(void)
726 ipi
= irq_create_mapping(xics_host
, XICS_IPI
);
727 BUG_ON(ipi
== NO_IRQ
);
730 * IPIs are marked IRQF_DISABLED as they must run with irqs
733 set_irq_handler(ipi
, handle_percpu_irq
);
734 if (firmware_has_feature(FW_FEATURE_LPAR
))
735 request_irq(ipi
, xics_ipi_action_lpar
, IRQF_DISABLED
,
738 request_irq(ipi
, xics_ipi_action_direct
, IRQF_DISABLED
,
741 #endif /* CONFIG_SMP */
743 void xics_teardown_cpu(int secondary
)
745 int cpu
= smp_processor_id();
747 struct irq_desc
*desc
;
749 xics_set_cpu_priority(cpu
, 0);
754 if (firmware_has_feature(FW_FEATURE_LPAR
))
755 lpar_qirr_info(cpu
, 0xff);
757 direct_qirr_info(cpu
, 0xff);
760 * we need to EOI the IPI if we got here from kexec down IPI
762 * probably need to check all the other interrupts too
763 * should we be flagging idle loop instead?
764 * or creating some task to be scheduled?
767 ipi
= irq_find_mapping(xics_host
, XICS_IPI
);
768 if (ipi
== XICS_IRQ_SPURIOUS
)
770 desc
= get_irq_desc(ipi
);
771 if (desc
->chip
&& desc
->chip
->eoi
)
772 desc
->chip
->eoi(ipi
);
775 * Some machines need to have at least one cpu in the GIQ,
776 * so leave the master cpu in the group.
779 rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE
,
780 (1UL << interrupt_server_size
) - 1 -
781 default_distrib_server
, 0);
784 #ifdef CONFIG_HOTPLUG_CPU
786 /* Interrupts are disabled. */
787 void xics_migrate_irqs_away(void)
790 unsigned int irq
, virq
, cpu
= smp_processor_id();
792 /* Reject any interrupt that was queued to us... */
793 xics_set_cpu_priority(cpu
, 0);
795 /* remove ourselves from the global interrupt queue */
796 status
= rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE
,
797 (1UL << interrupt_server_size
) - 1 - default_distrib_server
, 0);
800 /* Allow IPIs again... */
801 xics_set_cpu_priority(cpu
, DEFAULT_PRIORITY
);
804 struct irq_desc
*desc
;
808 /* We cant set affinity on ISA interrupts */
809 if (virq
< NUM_ISA_INTERRUPTS
)
811 if (irq_map
[virq
].host
!= xics_host
)
813 irq
= (unsigned int)irq_map
[virq
].hwirq
;
814 /* We need to get IPIs still. */
815 if (irq
== XICS_IPI
|| irq
== XICS_IRQ_SPURIOUS
)
817 desc
= get_irq_desc(virq
);
819 /* We only need to migrate enabled IRQS */
820 if (desc
== NULL
|| desc
->chip
== NULL
821 || desc
->action
== NULL
822 || desc
->chip
->set_affinity
== NULL
)
825 spin_lock_irqsave(&desc
->lock
, flags
);
827 status
= rtas_call(ibm_get_xive
, 1, 3, xics_status
, irq
);
829 printk(KERN_ERR
"migrate_irqs_away: irq=%u "
830 "ibm,get-xive returns %d\n",
836 * We only support delivery to all cpus or to one cpu.
837 * The irq has to be migrated only in the single cpu
840 if (xics_status
[0] != get_hard_smp_processor_id(cpu
))
843 printk(KERN_WARNING
"IRQ %u affinity broken off cpu %u\n",
846 /* Reset affinity to all cpus */
847 desc
->chip
->set_affinity(virq
, CPU_MASK_ALL
);
848 irq_desc
[irq
].affinity
= CPU_MASK_ALL
;
850 spin_unlock_irqrestore(&desc
->lock
, flags
);