1 /* -*- mode: c; c-basic-offset: 8 -*- */
3 /* Copyright (C) 1999,2001
5 * Author: J.E.J.Bottomley@HansenPartnership.com
7 * linux/arch/i386/kernel/voyager_smp.c
9 * This file provides all the same external entries as smp.c but uses
10 * the voyager hal to provide the functionality
12 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/mc146818rtc.h>
17 #include <linux/cache.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/bootmem.h>
22 #include <linux/completion.h>
24 #include <asm/voyager.h>
27 #include <asm/pgalloc.h>
28 #include <asm/tlbflush.h>
29 #include <asm/arch_hooks.h>
31 /* TLB state -- visible externally, indexed physically */
32 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state
, cpu_tlbstate
) = { &init_mm
, 0 };
34 /* CPU IRQ affinity -- set to all ones initially */
35 static unsigned long cpu_irq_affinity
[NR_CPUS
] __cacheline_aligned
=
36 {[0 ... NR_CPUS
-1] = ~0UL };
38 /* per CPU data structure (for /proc/cpuinfo et al), visible externally
39 * indexed physically */
40 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86
, cpu_info
);
41 EXPORT_PER_CPU_SYMBOL(cpu_info
);
43 /* physical ID of the CPU used to boot the system */
44 unsigned char boot_cpu_id
;
46 /* The memory line addresses for the Quad CPIs */
47 struct voyager_qic_cpi
*voyager_quad_cpi_addr
[NR_CPUS
] __cacheline_aligned
;
49 /* The masks for the Extended VIC processors, filled in by cat_init */
50 __u32 voyager_extended_vic_processors
= 0;
52 /* Masks for the extended Quad processors which cannot be VIC booted */
53 __u32 voyager_allowed_boot_processors
= 0;
55 /* The mask for the Quad Processors (both extended and non-extended) */
56 __u32 voyager_quad_processors
= 0;
58 /* Total count of live CPUs, used in process.c to display
59 * the CPU information and in irq.c for the per CPU irq
60 * activity count. Finally exported by i386_ksyms.c */
61 static int voyager_extended_cpus
= 1;
63 /* Have we found an SMP box - used by time.c to do the profiling
64 interrupt for timeslicing; do not set to 1 until the per CPU timer
65 interrupt is active */
66 int smp_found_config
= 0;
68 /* Used for the invalidate map that's also checked in the spinlock */
69 static volatile unsigned long smp_invalidate_needed
;
71 /* Bitmask of currently online CPUs - used by setup.c for
72 /proc/cpuinfo, visible externally but still physical */
73 cpumask_t cpu_online_map
= CPU_MASK_NONE
;
74 EXPORT_SYMBOL(cpu_online_map
);
76 /* Bitmask of CPUs present in the system - exported by i386_syms.c, used
77 * by scheduler but indexed physically */
78 cpumask_t phys_cpu_present_map
= CPU_MASK_NONE
;
80 /* The internal functions */
81 static void send_CPI(__u32 cpuset
, __u8 cpi
);
82 static void ack_CPI(__u8 cpi
);
83 static int ack_QIC_CPI(__u8 cpi
);
84 static void ack_special_QIC_CPI(__u8 cpi
);
85 static void ack_VIC_CPI(__u8 cpi
);
86 static void send_CPI_allbutself(__u8 cpi
);
87 static void mask_vic_irq(unsigned int irq
);
88 static void unmask_vic_irq(unsigned int irq
);
89 static unsigned int startup_vic_irq(unsigned int irq
);
90 static void enable_local_vic_irq(unsigned int irq
);
91 static void disable_local_vic_irq(unsigned int irq
);
92 static void before_handle_vic_irq(unsigned int irq
);
93 static void after_handle_vic_irq(unsigned int irq
);
94 static void set_vic_irq_affinity(unsigned int irq
, cpumask_t mask
);
95 static void ack_vic_irq(unsigned int irq
);
96 static void vic_enable_cpi(void);
97 static void do_boot_cpu(__u8 cpuid
);
98 static void do_quad_bootstrap(void);
100 int hard_smp_processor_id(void);
101 int safe_smp_processor_id(void);
103 /* Inline functions */
104 static inline void send_one_QIC_CPI(__u8 cpu
, __u8 cpi
)
106 voyager_quad_cpi_addr
[cpu
]->qic_cpi
[cpi
].cpi
=
107 (smp_processor_id() << 16) + cpi
;
110 static inline void send_QIC_CPI(__u32 cpuset
, __u8 cpi
)
114 for_each_online_cpu(cpu
) {
115 if (cpuset
& (1 << cpu
)) {
117 if (!cpu_isset(cpu
, cpu_online_map
))
118 VDEBUG(("CPU%d sending cpi %d to CPU%d not in "
120 hard_smp_processor_id(), cpi
, cpu
));
122 send_one_QIC_CPI(cpu
, cpi
- QIC_CPI_OFFSET
);
127 static inline void wrapper_smp_local_timer_interrupt(void)
130 smp_local_timer_interrupt();
134 static inline void send_one_CPI(__u8 cpu
, __u8 cpi
)
136 if (voyager_quad_processors
& (1 << cpu
))
137 send_one_QIC_CPI(cpu
, cpi
- QIC_CPI_OFFSET
);
139 send_CPI(1 << cpu
, cpi
);
142 static inline void send_CPI_allbutself(__u8 cpi
)
144 __u8 cpu
= smp_processor_id();
145 __u32 mask
= cpus_addr(cpu_online_map
)[0] & ~(1 << cpu
);
149 static inline int is_cpu_quad(void)
151 __u8 cpumask
= inb(VIC_PROC_WHO_AM_I
);
152 return ((cpumask
& QUAD_IDENTIFIER
) == QUAD_IDENTIFIER
);
155 static inline int is_cpu_extended(void)
157 __u8 cpu
= hard_smp_processor_id();
159 return (voyager_extended_vic_processors
& (1 << cpu
));
162 static inline int is_cpu_vic_boot(void)
164 __u8 cpu
= hard_smp_processor_id();
166 return (voyager_extended_vic_processors
167 & voyager_allowed_boot_processors
& (1 << cpu
));
170 static inline void ack_CPI(__u8 cpi
)
173 case VIC_CPU_BOOT_CPI
:
174 if (is_cpu_quad() && !is_cpu_vic_boot())
181 /* These are slightly strange. Even on the Quad card,
182 * They are vectored as VIC CPIs */
184 ack_special_QIC_CPI(cpi
);
189 printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi
);
194 /* local variables */
196 /* The VIC IRQ descriptors -- these look almost identical to the
197 * 8259 IRQs except that masks and things must be kept per processor
199 static struct irq_chip vic_chip
= {
201 .startup
= startup_vic_irq
,
202 .mask
= mask_vic_irq
,
203 .unmask
= unmask_vic_irq
,
204 .set_affinity
= set_vic_irq_affinity
,
207 /* used to count up as CPUs are brought on line (starts at 0) */
208 static int cpucount
= 0;
210 /* steal a page from the bottom of memory for the trampoline and
211 * squirrel its address away here. This will be in kernel virtual
213 static __u32 trampoline_base
;
215 /* The per cpu profile stuff - used in smp_local_timer_interrupt */
216 static DEFINE_PER_CPU(int, prof_multiplier
) = 1;
217 static DEFINE_PER_CPU(int, prof_old_multiplier
) = 1;
218 static DEFINE_PER_CPU(int, prof_counter
) = 1;
220 /* the map used to check if a CPU has booted */
221 static __u32 cpu_booted_map
;
223 /* the synchronize flag used to hold all secondary CPUs spinning in
224 * a tight loop until the boot sequence is ready for them */
225 static cpumask_t smp_commenced_mask
= CPU_MASK_NONE
;
227 /* This is for the new dynamic CPU boot code */
228 cpumask_t cpu_callin_map
= CPU_MASK_NONE
;
229 cpumask_t cpu_callout_map
= CPU_MASK_NONE
;
230 EXPORT_SYMBOL(cpu_callout_map
);
231 cpumask_t cpu_possible_map
= CPU_MASK_NONE
;
232 EXPORT_SYMBOL(cpu_possible_map
);
234 /* The per processor IRQ masks (these are usually kept in sync) */
235 static __u16 vic_irq_mask
[NR_CPUS
] __cacheline_aligned
;
237 /* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */
238 static __u16 vic_irq_enable_mask
[NR_CPUS
] __cacheline_aligned
= { 0 };
240 /* Lock for enable/disable of VIC interrupts */
241 static __cacheline_aligned
DEFINE_SPINLOCK(vic_irq_lock
);
243 /* The boot processor is correctly set up in PC mode when it
244 * comes up, but the secondaries need their master/slave 8259
245 * pairs initializing correctly */
247 /* Interrupt counters (per cpu) and total - used to try to
248 * even up the interrupt handling routines */
249 static long vic_intr_total
= 0;
250 static long vic_intr_count
[NR_CPUS
] __cacheline_aligned
= { 0 };
251 static unsigned long vic_tick
[NR_CPUS
] __cacheline_aligned
= { 0 };
253 /* Since we can only use CPI0, we fake all the other CPIs */
254 static unsigned long vic_cpi_mailbox
[NR_CPUS
] __cacheline_aligned
;
256 /* debugging routine to read the isr of the cpu's pic */
257 static inline __u16
vic_read_isr(void)
262 isr
= inb(0xa0) << 8;
269 static __init
void qic_setup(void)
271 if (!is_cpu_quad()) {
272 /* not a quad, no setup */
275 outb(QIC_DEFAULT_MASK0
, QIC_MASK_REGISTER0
);
276 outb(QIC_CPI_ENABLE
, QIC_MASK_REGISTER1
);
278 if (is_cpu_extended()) {
279 /* the QIC duplicate of the VIC base register */
280 outb(VIC_DEFAULT_CPI_BASE
, QIC_VIC_CPI_BASE_REGISTER
);
281 outb(QIC_DEFAULT_CPI_BASE
, QIC_CPI_BASE_REGISTER
);
283 /* FIXME: should set up the QIC timer and memory parity
284 * error vectors here */
288 static __init
void vic_setup_pic(void)
290 outb(1, VIC_REDIRECT_REGISTER_1
);
291 /* clear the claim registers for dynamic routing */
292 outb(0, VIC_CLAIM_REGISTER_0
);
293 outb(0, VIC_CLAIM_REGISTER_1
);
295 outb(0, VIC_PRIORITY_REGISTER
);
296 /* Set the Primary and Secondary Microchannel vector
297 * bases to be the same as the ordinary interrupts
299 * FIXME: This would be more efficient using separate
301 outb(FIRST_EXTERNAL_VECTOR
, VIC_PRIMARY_MC_BASE
);
302 outb(FIRST_EXTERNAL_VECTOR
, VIC_SECONDARY_MC_BASE
);
303 /* Now initiallise the master PIC belonging to this CPU by
304 * sending the four ICWs */
306 /* ICW1: level triggered, ICW4 needed */
309 /* ICW2: vector base */
310 outb(FIRST_EXTERNAL_VECTOR
, 0x21);
312 /* ICW3: slave at line 2 */
315 /* ICW4: 8086 mode */
318 /* now the same for the slave PIC */
320 /* ICW1: level trigger, ICW4 needed */
323 /* ICW2: slave vector base */
324 outb(FIRST_EXTERNAL_VECTOR
+ 8, 0xA1);
329 /* ICW4: 8086 mode */
333 static void do_quad_bootstrap(void)
335 if (is_cpu_quad() && is_cpu_vic_boot()) {
338 __u8 cpuid
= hard_smp_processor_id();
340 local_irq_save(flags
);
342 for (i
= 0; i
< 4; i
++) {
343 /* FIXME: this would be >>3 &0x7 on the 32 way */
344 if (((cpuid
>> 2) & 0x03) == i
)
345 /* don't lower our own mask! */
348 /* masquerade as local Quad CPU */
349 outb(QIC_CPUID_ENABLE
| i
, QIC_PROCESSOR_ID
);
350 /* enable the startup CPI */
351 outb(QIC_BOOT_CPI_MASK
, QIC_MASK_REGISTER1
);
353 outb(0, QIC_PROCESSOR_ID
);
355 local_irq_restore(flags
);
359 /* Set up all the basic stuff: read the SMP config and make all the
360 * SMP information reflect only the boot cpu. All others will be
361 * brought on-line later. */
362 void __init
find_smp_config(void)
366 boot_cpu_id
= hard_smp_processor_id();
368 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id
);
370 /* initialize the CPU structures (moved from smp_boot_cpus) */
371 for (i
= 0; i
< NR_CPUS
; i
++) {
372 cpu_irq_affinity
[i
] = ~0;
374 cpu_online_map
= cpumask_of_cpu(boot_cpu_id
);
376 /* The boot CPU must be extended */
377 voyager_extended_vic_processors
= 1 << boot_cpu_id
;
378 /* initially, all of the first 8 CPUs can boot */
379 voyager_allowed_boot_processors
= 0xff;
380 /* set up everything for just this CPU, we can alter
381 * this as we start the other CPUs later */
382 /* now get the CPU disposition from the extended CMOS */
383 cpus_addr(phys_cpu_present_map
)[0] =
384 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
);
385 cpus_addr(phys_cpu_present_map
)[0] |=
386 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
+ 1) << 8;
387 cpus_addr(phys_cpu_present_map
)[0] |=
388 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
+
390 cpus_addr(phys_cpu_present_map
)[0] |=
391 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
+
393 cpu_possible_map
= phys_cpu_present_map
;
394 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
395 cpus_addr(phys_cpu_present_map
)[0]);
396 /* Here we set up the VIC to enable SMP */
397 /* enable the CPIs by writing the base vector to their register */
398 outb(VIC_DEFAULT_CPI_BASE
, VIC_CPI_BASE_REGISTER
);
399 outb(1, VIC_REDIRECT_REGISTER_1
);
400 /* set the claim registers for static routing --- Boot CPU gets
401 * all interrupts untill all other CPUs started */
402 outb(0xff, VIC_CLAIM_REGISTER_0
);
403 outb(0xff, VIC_CLAIM_REGISTER_1
);
404 /* Set the Primary and Secondary Microchannel vector
405 * bases to be the same as the ordinary interrupts
407 * FIXME: This would be more efficient using separate
409 outb(FIRST_EXTERNAL_VECTOR
, VIC_PRIMARY_MC_BASE
);
410 outb(FIRST_EXTERNAL_VECTOR
, VIC_SECONDARY_MC_BASE
);
412 /* Finally tell the firmware that we're driving */
413 outb(inb(VOYAGER_SUS_IN_CONTROL_PORT
) | VOYAGER_IN_CONTROL_FLAG
,
414 VOYAGER_SUS_IN_CONTROL_PORT
);
416 current_thread_info()->cpu
= boot_cpu_id
;
417 x86_write_percpu(cpu_number
, boot_cpu_id
);
421 * The bootstrap kernel entry code has set these up. Save them
422 * for a given CPU, id is physical */
423 void __init
smp_store_cpu_info(int id
)
425 struct cpuinfo_x86
*c
= &cpu_data(id
);
429 identify_secondary_cpu(c
);
432 /* set up the trampoline and return the physical address of the code */
433 static __u32 __init
setup_trampoline(void)
435 /* these two are global symbols in trampoline.S */
436 extern const __u8 trampoline_end
[];
437 extern const __u8 trampoline_data
[];
439 memcpy((__u8
*) trampoline_base
, trampoline_data
,
440 trampoline_end
- trampoline_data
);
441 return virt_to_phys((__u8
*) trampoline_base
);
444 /* Routine initially called when a non-boot CPU is brought online */
445 static void __init
start_secondary(void *unused
)
447 __u8 cpuid
= hard_smp_processor_id();
448 /* external functions not defined in the headers */
449 extern void calibrate_delay(void);
453 /* OK, we're in the routine */
454 ack_CPI(VIC_CPU_BOOT_CPI
);
456 /* setup the 8259 master slave pair belonging to this CPU ---
457 * we won't actually receive any until the boot CPU
458 * relinquishes it's static routing mask */
463 if (is_cpu_quad() && !is_cpu_vic_boot()) {
464 /* clear the boot CPI */
468 voyager_quad_cpi_addr
[cpuid
]->qic_cpi
[VIC_CPU_BOOT_CPI
].cpi
;
469 printk("read dummy %d\n", dummy
);
472 /* lower the mask to receive CPIs */
475 VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid
, &cpuid
));
477 /* enable interrupts */
480 /* get our bogomips */
483 /* save our processor parameters */
484 smp_store_cpu_info(cpuid
);
486 /* if we're a quad, we may need to bootstrap other CPUs */
489 /* FIXME: this is rather a poor hack to prevent the CPU
490 * activating softirqs while it's supposed to be waiting for
491 * permission to proceed. Without this, the new per CPU stuff
492 * in the softirqs will fail */
494 cpu_set(cpuid
, cpu_callin_map
);
496 /* signal that we're done */
499 while (!cpu_isset(cpuid
, smp_commenced_mask
))
505 cpu_set(cpuid
, cpu_online_map
);
510 /* Routine to kick start the given CPU and wait for it to report ready
511 * (or timeout in startup). When this routine returns, the requested
512 * CPU is either fully running and configured or known to be dead.
514 * We call this routine sequentially 1 CPU at a time, so no need for
517 static void __init
do_boot_cpu(__u8 cpu
)
519 struct task_struct
*idle
;
522 int quad_boot
= (1 << cpu
) & voyager_quad_processors
523 & ~(voyager_extended_vic_processors
524 & voyager_allowed_boot_processors
);
526 /* This is an area in head.S which was used to set up the
527 * initial kernel stack. We need to alter this to give the
528 * booting CPU a new stack (taken from its idle process) */
533 /* This is the format of the CPI IDT gate (in real mode) which
534 * we're hijacking to boot the CPU */
543 __u32
*hijack_vector
;
544 __u32 start_phys_address
= setup_trampoline();
546 /* There's a clever trick to this: The linux trampoline is
547 * compiled to begin at absolute location zero, so make the
548 * address zero but have the data segment selector compensate
549 * for the actual address */
550 hijack_source
.idt
.Offset
= start_phys_address
& 0x000F;
551 hijack_source
.idt
.Segment
= (start_phys_address
>> 4) & 0xFFFF;
554 alternatives_smp_switch(1);
556 idle
= fork_idle(cpu
);
558 panic("failed fork for CPU%d", cpu
);
559 idle
->thread
.eip
= (unsigned long)start_secondary
;
560 /* init_tasks (in sched.c) is indexed logically */
561 stack_start
.esp
= (void *)idle
->thread
.esp
;
564 per_cpu(current_task
, cpu
) = idle
;
565 early_gdt_descr
.address
= (unsigned long)get_cpu_gdt_table(cpu
);
568 /* Note: Don't modify initial ss override */
569 VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu
,
570 (unsigned long)hijack_source
.val
, hijack_source
.idt
.Segment
,
571 hijack_source
.idt
.Offset
, stack_start
.esp
));
573 /* init lowmem identity mapping */
574 clone_pgd_range(swapper_pg_dir
, swapper_pg_dir
+ USER_PGD_PTRS
,
575 min_t(unsigned long, KERNEL_PGD_PTRS
, USER_PGD_PTRS
));
579 printk("CPU %d: non extended Quad boot\n", cpu
);
582 phys_to_virt((VIC_CPU_BOOT_CPI
+ QIC_DEFAULT_CPI_BASE
) * 4);
583 *hijack_vector
= hijack_source
.val
;
585 printk("CPU%d: extended VIC boot\n", cpu
);
588 phys_to_virt((VIC_CPU_BOOT_CPI
+ VIC_DEFAULT_CPI_BASE
) * 4);
589 *hijack_vector
= hijack_source
.val
;
590 /* VIC errata, may also receive interrupt at this address */
593 phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI
+
594 VIC_DEFAULT_CPI_BASE
) * 4);
595 *hijack_vector
= hijack_source
.val
;
597 /* All non-boot CPUs start with interrupts fully masked. Need
598 * to lower the mask of the CPI we're about to send. We do
599 * this in the VIC by masquerading as the processor we're
600 * about to boot and lowering its interrupt mask */
601 local_irq_save(flags
);
603 send_one_QIC_CPI(cpu
, VIC_CPU_BOOT_CPI
);
605 outb(VIC_CPU_MASQUERADE_ENABLE
| cpu
, VIC_PROCESSOR_ID
);
606 /* here we're altering registers belonging to `cpu' */
608 outb(VIC_BOOT_INTERRUPT_MASK
, 0x21);
609 /* now go back to our original identity */
610 outb(boot_cpu_id
, VIC_PROCESSOR_ID
);
612 /* and boot the CPU */
614 send_CPI((1 << cpu
), VIC_CPU_BOOT_CPI
);
617 local_irq_restore(flags
);
619 /* now wait for it to become ready (or timeout) */
620 for (timeout
= 0; timeout
< 50000; timeout
++) {
625 /* reset the page table */
628 if (cpu_booted_map
) {
629 VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
630 cpu
, smp_processor_id()));
632 printk("CPU%d: ", cpu
);
633 print_cpu_info(&cpu_data(cpu
));
635 cpu_set(cpu
, cpu_callout_map
);
636 cpu_set(cpu
, cpu_present_map
);
638 printk("CPU%d FAILED TO BOOT: ", cpu
);
640 ((volatile unsigned char *)phys_to_virt(start_phys_address
))
644 printk("Not responding.\n");
650 void __init
smp_boot_cpus(void)
654 /* CAT BUS initialisation must be done after the memory */
655 /* FIXME: The L4 has a catbus too, it just needs to be
656 * accessed in a totally different way */
657 if (voyager_level
== 5) {
660 /* now that the cat has probed the Voyager System Bus, sanity
661 * check the cpu map */
662 if (((voyager_quad_processors
| voyager_extended_vic_processors
)
663 & cpus_addr(phys_cpu_present_map
)[0]) !=
664 cpus_addr(phys_cpu_present_map
)[0]) {
666 printk("\n\n***WARNING*** "
667 "Sanity check of CPU present map FAILED\n");
669 } else if (voyager_level
== 4)
670 voyager_extended_vic_processors
=
671 cpus_addr(phys_cpu_present_map
)[0];
673 /* this sets up the idle task to run on the current cpu */
674 voyager_extended_cpus
= 1;
675 /* Remove the global_irq_holder setting, it triggers a BUG() on
676 * schedule at the moment */
677 //global_irq_holder = boot_cpu_id;
679 /* FIXME: Need to do something about this but currently only works
680 * on CPUs with a tsc which none of mine have.
681 smp_tune_scheduling();
683 smp_store_cpu_info(boot_cpu_id
);
684 printk("CPU%d: ", boot_cpu_id
);
685 print_cpu_info(&cpu_data(boot_cpu_id
));
688 /* booting on a Quad CPU */
689 printk("VOYAGER SMP: Boot CPU is Quad\n");
694 /* enable our own CPIs */
697 cpu_set(boot_cpu_id
, cpu_online_map
);
698 cpu_set(boot_cpu_id
, cpu_callout_map
);
700 /* loop over all the extended VIC CPUs and boot them. The
701 * Quad CPUs must be bootstrapped by their extended VIC cpu */
702 for (i
= 0; i
< NR_CPUS
; i
++) {
703 if (i
== boot_cpu_id
|| !cpu_isset(i
, phys_cpu_present_map
))
706 /* This udelay seems to be needed for the Quad boots
707 * don't remove unless you know what you're doing */
710 /* we could compute the total bogomips here, but why bother?,
711 * Code added from smpboot.c */
713 unsigned long bogosum
= 0;
714 for (i
= 0; i
< NR_CPUS
; i
++)
715 if (cpu_isset(i
, cpu_online_map
))
716 bogosum
+= cpu_data(i
).loops_per_jiffy
;
717 printk(KERN_INFO
"Total of %d processors activated "
718 "(%lu.%02lu BogoMIPS).\n",
719 cpucount
+ 1, bogosum
/ (500000 / HZ
),
720 (bogosum
/ (5000 / HZ
)) % 100);
722 voyager_extended_cpus
= hweight32(voyager_extended_vic_processors
);
723 printk("VOYAGER: Extended (interrupt handling CPUs): "
724 "%d, non-extended: %d\n", voyager_extended_cpus
,
725 num_booting_cpus() - voyager_extended_cpus
);
726 /* that's it, switch to symmetric mode */
727 outb(0, VIC_PRIORITY_REGISTER
);
728 outb(0, VIC_CLAIM_REGISTER_0
);
729 outb(0, VIC_CLAIM_REGISTER_1
);
731 VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus()));
734 /* Reload the secondary CPUs task structure (this function does not
736 void __init
initialize_secondary(void)
740 set_current(hard_get_current());
744 * We don't actually need to load the full TSS,
745 * basically just the stack pointer and the eip.
748 asm volatile ("movl %0,%%esp\n\t"
749 "jmp *%1"::"r" (current
->thread
.esp
),
750 "r"(current
->thread
.eip
));
753 /* handle a Voyager SYS_INT -- If we don't, the base board will
756 * System interrupts occur because some problem was detected on the
757 * various busses. To find out what you have to probe all the
758 * hardware via the CAT bus. FIXME: At the moment we do nothing. */
759 fastcall
void smp_vic_sys_interrupt(struct pt_regs
*regs
)
761 ack_CPI(VIC_SYS_INT
);
762 printk("Voyager SYSTEM INTERRUPT\n");
765 /* Handle a voyager CMN_INT; These interrupts occur either because of
766 * a system status change or because a single bit memory error
767 * occurred. FIXME: At the moment, ignore all this. */
768 fastcall
void smp_vic_cmn_interrupt(struct pt_regs
*regs
)
770 static __u8 in_cmn_int
= 0;
771 static DEFINE_SPINLOCK(cmn_int_lock
);
773 /* common ints are broadcast, so make sure we only do this once */
774 _raw_spin_lock(&cmn_int_lock
);
779 _raw_spin_unlock(&cmn_int_lock
);
781 VDEBUG(("Voyager COMMON INTERRUPT\n"));
783 if (voyager_level
== 5)
784 voyager_cat_do_common_interrupt();
786 _raw_spin_lock(&cmn_int_lock
);
789 _raw_spin_unlock(&cmn_int_lock
);
790 ack_CPI(VIC_CMN_INT
);
794 * Reschedule call back. Nothing to do, all the work is done
795 * automatically when we return from the interrupt. */
796 static void smp_reschedule_interrupt(void)
801 static struct mm_struct
*flush_mm
;
802 static unsigned long flush_va
;
803 static DEFINE_SPINLOCK(tlbstate_lock
);
804 #define FLUSH_ALL 0xffffffff
807 * We cannot call mmdrop() because we are in interrupt context,
808 * instead update mm->cpu_vm_mask.
810 * We need to reload %cr3 since the page tables may be going
811 * away from under us..
813 static inline void leave_mm(unsigned long cpu
)
815 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_OK
)
817 cpu_clear(cpu
, per_cpu(cpu_tlbstate
, cpu
).active_mm
->cpu_vm_mask
);
818 load_cr3(swapper_pg_dir
);
822 * Invalidate call-back
824 static void smp_invalidate_interrupt(void)
826 __u8 cpu
= smp_processor_id();
828 if (!test_bit(cpu
, &smp_invalidate_needed
))
830 /* This will flood messages. Don't uncomment unless you see
831 * Problems with cross cpu invalidation
832 VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",
833 smp_processor_id()));
836 if (flush_mm
== per_cpu(cpu_tlbstate
, cpu
).active_mm
) {
837 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_OK
) {
838 if (flush_va
== FLUSH_ALL
)
841 __flush_tlb_one(flush_va
);
845 smp_mb__before_clear_bit();
846 clear_bit(cpu
, &smp_invalidate_needed
);
847 smp_mb__after_clear_bit();
850 /* All the new flush operations for 2.4 */
852 /* This routine is called with a physical cpu mask */
854 voyager_flush_tlb_others(unsigned long cpumask
, struct mm_struct
*mm
,
861 if ((cpumask
& cpus_addr(cpu_online_map
)[0]) != cpumask
)
863 if (cpumask
& (1 << smp_processor_id()))
868 spin_lock(&tlbstate_lock
);
872 atomic_set_mask(cpumask
, &smp_invalidate_needed
);
874 * We have to send the CPI only to
877 send_CPI(cpumask
, VIC_INVALIDATE_CPI
);
879 while (smp_invalidate_needed
) {
882 printk("***WARNING*** Stuck doing invalidate CPI "
883 "(CPU%d)\n", smp_processor_id());
888 /* Uncomment only to debug invalidation problems
889 VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));
894 spin_unlock(&tlbstate_lock
);
897 void flush_tlb_current_task(void)
899 struct mm_struct
*mm
= current
->mm
;
900 unsigned long cpu_mask
;
904 cpu_mask
= cpus_addr(mm
->cpu_vm_mask
)[0] & ~(1 << smp_processor_id());
907 voyager_flush_tlb_others(cpu_mask
, mm
, FLUSH_ALL
);
912 void flush_tlb_mm(struct mm_struct
*mm
)
914 unsigned long cpu_mask
;
918 cpu_mask
= cpus_addr(mm
->cpu_vm_mask
)[0] & ~(1 << smp_processor_id());
920 if (current
->active_mm
== mm
) {
924 leave_mm(smp_processor_id());
927 voyager_flush_tlb_others(cpu_mask
, mm
, FLUSH_ALL
);
932 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long va
)
934 struct mm_struct
*mm
= vma
->vm_mm
;
935 unsigned long cpu_mask
;
939 cpu_mask
= cpus_addr(mm
->cpu_vm_mask
)[0] & ~(1 << smp_processor_id());
940 if (current
->active_mm
== mm
) {
944 leave_mm(smp_processor_id());
948 voyager_flush_tlb_others(cpu_mask
, mm
, va
);
953 EXPORT_SYMBOL(flush_tlb_page
);
955 /* enable the requested IRQs */
956 static void smp_enable_irq_interrupt(void)
959 __u8 cpu
= get_cpu();
961 VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu
,
962 vic_irq_enable_mask
[cpu
]));
964 spin_lock(&vic_irq_lock
);
965 for (irq
= 0; irq
< 16; irq
++) {
966 if (vic_irq_enable_mask
[cpu
] & (1 << irq
))
967 enable_local_vic_irq(irq
);
969 vic_irq_enable_mask
[cpu
] = 0;
970 spin_unlock(&vic_irq_lock
);
972 put_cpu_no_resched();
978 static void smp_stop_cpu_function(void *dummy
)
980 VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id()));
981 cpu_clear(smp_processor_id(), cpu_online_map
);
987 static DEFINE_SPINLOCK(call_lock
);
989 struct call_data_struct
{
990 void (*func
) (void *info
);
992 volatile unsigned long started
;
993 volatile unsigned long finished
;
997 static struct call_data_struct
*call_data
;
999 /* execute a thread on a new CPU. The function to be called must be
1000 * previously set up. This is used to schedule a function for
1001 * execution on all CPUs - set up the function then broadcast a
1002 * function_interrupt CPI to come here on each CPU */
1003 static void smp_call_function_interrupt(void)
1005 void (*func
) (void *info
) = call_data
->func
;
1006 void *info
= call_data
->info
;
1007 /* must take copy of wait because call_data may be replaced
1008 * unless the function is waiting for us to finish */
1009 int wait
= call_data
->wait
;
1010 __u8 cpu
= smp_processor_id();
1013 * Notify initiating CPU that I've grabbed the data and am
1014 * about to execute the function
1017 if (!test_and_clear_bit(cpu
, &call_data
->started
)) {
1018 /* If the bit wasn't set, this could be a replay */
1019 printk(KERN_WARNING
"VOYAGER SMP: CPU %d received call funtion"
1020 " with no call pending\n", cpu
);
1024 * At this point the info structure may be out of scope unless wait==1
1028 __get_cpu_var(irq_stat
).irq_call_count
++;
1032 clear_bit(cpu
, &call_data
->finished
);
1037 voyager_smp_call_function_mask(cpumask_t cpumask
,
1038 void (*func
) (void *info
), void *info
, int wait
)
1040 struct call_data_struct data
;
1041 u32 mask
= cpus_addr(cpumask
)[0];
1043 mask
&= ~(1 << smp_processor_id());
1048 /* Can deadlock when called with interrupts disabled */
1049 WARN_ON(irqs_disabled());
1053 data
.started
= mask
;
1056 data
.finished
= mask
;
1058 spin_lock(&call_lock
);
1061 /* Send a message to all other CPUs and wait for them to respond */
1062 send_CPI(mask
, VIC_CALL_FUNCTION_CPI
);
1064 /* Wait for response */
1065 while (data
.started
)
1069 while (data
.finished
)
1072 spin_unlock(&call_lock
);
1077 /* Sorry about the name. In an APIC based system, the APICs
1078 * themselves are programmed to send a timer interrupt. This is used
1079 * by linux to reschedule the processor. Voyager doesn't have this,
1080 * so we use the system clock to interrupt one processor, which in
1081 * turn, broadcasts a timer CPI to all the others --- we receive that
1082 * CPI here. We don't use this actually for counting so losing
1083 * ticks doesn't matter
1085 * FIXME: For those CPUs which actually have a local APIC, we could
1086 * try to use it to trigger this interrupt instead of having to
1087 * broadcast the timer tick. Unfortunately, all my pentium DYADs have
1088 * no local APIC, so I can't do this
1090 * This function is currently a placeholder and is unused in the code */
1091 fastcall
void smp_apic_timer_interrupt(struct pt_regs
*regs
)
1093 struct pt_regs
*old_regs
= set_irq_regs(regs
);
1094 wrapper_smp_local_timer_interrupt();
1095 set_irq_regs(old_regs
);
1098 /* All of the QUAD interrupt GATES */
1099 fastcall
void smp_qic_timer_interrupt(struct pt_regs
*regs
)
1101 struct pt_regs
*old_regs
= set_irq_regs(regs
);
1102 ack_QIC_CPI(QIC_TIMER_CPI
);
1103 wrapper_smp_local_timer_interrupt();
1104 set_irq_regs(old_regs
);
1107 fastcall
void smp_qic_invalidate_interrupt(struct pt_regs
*regs
)
1109 ack_QIC_CPI(QIC_INVALIDATE_CPI
);
1110 smp_invalidate_interrupt();
1113 fastcall
void smp_qic_reschedule_interrupt(struct pt_regs
*regs
)
1115 ack_QIC_CPI(QIC_RESCHEDULE_CPI
);
1116 smp_reschedule_interrupt();
1119 fastcall
void smp_qic_enable_irq_interrupt(struct pt_regs
*regs
)
1121 ack_QIC_CPI(QIC_ENABLE_IRQ_CPI
);
1122 smp_enable_irq_interrupt();
1125 fastcall
void smp_qic_call_function_interrupt(struct pt_regs
*regs
)
1127 ack_QIC_CPI(QIC_CALL_FUNCTION_CPI
);
1128 smp_call_function_interrupt();
1131 fastcall
void smp_vic_cpi_interrupt(struct pt_regs
*regs
)
1133 struct pt_regs
*old_regs
= set_irq_regs(regs
);
1134 __u8 cpu
= smp_processor_id();
1137 ack_QIC_CPI(VIC_CPI_LEVEL0
);
1139 ack_VIC_CPI(VIC_CPI_LEVEL0
);
1141 if (test_and_clear_bit(VIC_TIMER_CPI
, &vic_cpi_mailbox
[cpu
]))
1142 wrapper_smp_local_timer_interrupt();
1143 if (test_and_clear_bit(VIC_INVALIDATE_CPI
, &vic_cpi_mailbox
[cpu
]))
1144 smp_invalidate_interrupt();
1145 if (test_and_clear_bit(VIC_RESCHEDULE_CPI
, &vic_cpi_mailbox
[cpu
]))
1146 smp_reschedule_interrupt();
1147 if (test_and_clear_bit(VIC_ENABLE_IRQ_CPI
, &vic_cpi_mailbox
[cpu
]))
1148 smp_enable_irq_interrupt();
1149 if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI
, &vic_cpi_mailbox
[cpu
]))
1150 smp_call_function_interrupt();
1151 set_irq_regs(old_regs
);
1154 static void do_flush_tlb_all(void *info
)
1156 unsigned long cpu
= smp_processor_id();
1159 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_LAZY
)
1163 /* flush the TLB of every active CPU in the system */
1164 void flush_tlb_all(void)
1166 on_each_cpu(do_flush_tlb_all
, 0, 1, 1);
1169 /* used to set up the trampoline for other CPUs when the memory manager
1171 void __init
smp_alloc_memory(void)
1173 trampoline_base
= (__u32
) alloc_bootmem_low_pages(PAGE_SIZE
);
1174 if (__pa(trampoline_base
) >= 0x93000)
1178 /* send a reschedule CPI to one CPU by physical CPU number*/
1179 static void voyager_smp_send_reschedule(int cpu
)
1181 send_one_CPI(cpu
, VIC_RESCHEDULE_CPI
);
1184 int hard_smp_processor_id(void)
1187 __u8 cpumask
= inb(VIC_PROC_WHO_AM_I
);
1188 if ((cpumask
& QUAD_IDENTIFIER
) == QUAD_IDENTIFIER
)
1189 return cpumask
& 0x1F;
1191 for (i
= 0; i
< 8; i
++) {
1192 if (cpumask
& (1 << i
))
1195 printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask
);
1199 int safe_smp_processor_id(void)
1201 return hard_smp_processor_id();
1204 /* broadcast a halt to all other CPUs */
1205 static void voyager_smp_send_stop(void)
1207 smp_call_function(smp_stop_cpu_function
, NULL
, 1, 1);
1210 /* this function is triggered in time.c when a clock tick fires
1211 * we need to re-broadcast the tick to all CPUs */
1212 void smp_vic_timer_interrupt(void)
1214 send_CPI_allbutself(VIC_TIMER_CPI
);
1215 smp_local_timer_interrupt();
1218 /* local (per CPU) timer interrupt. It does both profiling and
1219 * process statistics/rescheduling.
1221 * We do profiling in every local tick, statistics/rescheduling
1222 * happen only every 'profiling multiplier' ticks. The default
1223 * multiplier is 1 and it can be changed by writing the new multiplier
1224 * value into /proc/profile.
1226 void smp_local_timer_interrupt(void)
1228 int cpu
= smp_processor_id();
1231 profile_tick(CPU_PROFILING
);
1232 if (--per_cpu(prof_counter
, cpu
) <= 0) {
1234 * The multiplier may have changed since the last time we got
1235 * to this point as a result of the user writing to
1236 * /proc/profile. In this case we need to adjust the APIC
1237 * timer accordingly.
1239 * Interrupts are already masked off at this point.
1241 per_cpu(prof_counter
, cpu
) = per_cpu(prof_multiplier
, cpu
);
1242 if (per_cpu(prof_counter
, cpu
) !=
1243 per_cpu(prof_old_multiplier
, cpu
)) {
1244 /* FIXME: need to update the vic timer tick here */
1245 per_cpu(prof_old_multiplier
, cpu
) =
1246 per_cpu(prof_counter
, cpu
);
1249 update_process_times(user_mode_vm(get_irq_regs()));
1252 if (((1 << cpu
) & voyager_extended_vic_processors
) == 0)
1253 /* only extended VIC processors participate in
1254 * interrupt distribution */
1258 * We take the 'long' return path, and there every subsystem
1259 * grabs the appropriate locks (kernel lock/ irq lock).
1261 * we might want to decouple profiling from the 'long path',
1262 * and do the profiling totally in assembly.
1264 * Currently this isn't too much of an issue (performance wise),
1265 * we can take more than 100K local irqs per second on a 100 MHz P5.
1268 if ((++vic_tick
[cpu
] & 0x7) != 0)
1270 /* get here every 16 ticks (about every 1/6 of a second) */
1272 /* Change our priority to give someone else a chance at getting
1273 * the IRQ. The algorithm goes like this:
1275 * In the VIC, the dynamically routed interrupt is always
1276 * handled by the lowest priority eligible (i.e. receiving
1277 * interrupts) CPU. If >1 eligible CPUs are equal lowest, the
1278 * lowest processor number gets it.
1280 * The priority of a CPU is controlled by a special per-CPU
1281 * VIC priority register which is 3 bits wide 0 being lowest
1282 * and 7 highest priority..
1284 * Therefore we subtract the average number of interrupts from
1285 * the number we've fielded. If this number is negative, we
1286 * lower the activity count and if it is positive, we raise
1289 * I'm afraid this still leads to odd looking interrupt counts:
1290 * the totals are all roughly equal, but the individual ones
1291 * look rather skewed.
1293 * FIXME: This algorithm is total crap when mixed with SMP
1294 * affinity code since we now try to even up the interrupt
1295 * counts when an affinity binding is keeping them on a
1297 weight
= (vic_intr_count
[cpu
] * voyager_extended_cpus
1298 - vic_intr_total
) >> 4;
1305 outb((__u8
) weight
, VIC_PRIORITY_REGISTER
);
1307 #ifdef VOYAGER_DEBUG
1308 if ((vic_tick
[cpu
] & 0xFFF) == 0) {
1309 /* print this message roughly every 25 secs */
1310 printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n",
1311 cpu
, vic_tick
[cpu
], weight
);
1316 /* setup the profiling timer */
1317 int setup_profiling_timer(unsigned int multiplier
)
1325 * Set the new multiplier for each CPU. CPUs don't start using the
1326 * new values until the next timer interrupt in which they do process
1329 for (i
= 0; i
< NR_CPUS
; ++i
)
1330 per_cpu(prof_multiplier
, i
) = multiplier
;
1335 /* This is a bit of a mess, but forced on us by the genirq changes
1336 * there's no genirq handler that really does what voyager wants
1337 * so hack it up with the simple IRQ handler */
1338 static void fastcall
handle_vic_irq(unsigned int irq
, struct irq_desc
*desc
)
1340 before_handle_vic_irq(irq
);
1341 handle_simple_irq(irq
, desc
);
1342 after_handle_vic_irq(irq
);
1345 /* The CPIs are handled in the per cpu 8259s, so they must be
1346 * enabled to be received: FIX: enabling the CPIs in the early
1347 * boot sequence interferes with bug checking; enable them later
1349 #define VIC_SET_GATE(cpi, vector) \
1350 set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector))
1351 #define QIC_SET_GATE(cpi, vector) \
1352 set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))
1354 void __init
smp_intr_init(void)
1358 /* initialize the per cpu irq mask to all disabled */
1359 for (i
= 0; i
< NR_CPUS
; i
++)
1360 vic_irq_mask
[i
] = 0xFFFF;
1362 VIC_SET_GATE(VIC_CPI_LEVEL0
, vic_cpi_interrupt
);
1364 VIC_SET_GATE(VIC_SYS_INT
, vic_sys_interrupt
);
1365 VIC_SET_GATE(VIC_CMN_INT
, vic_cmn_interrupt
);
1367 QIC_SET_GATE(QIC_TIMER_CPI
, qic_timer_interrupt
);
1368 QIC_SET_GATE(QIC_INVALIDATE_CPI
, qic_invalidate_interrupt
);
1369 QIC_SET_GATE(QIC_RESCHEDULE_CPI
, qic_reschedule_interrupt
);
1370 QIC_SET_GATE(QIC_ENABLE_IRQ_CPI
, qic_enable_irq_interrupt
);
1371 QIC_SET_GATE(QIC_CALL_FUNCTION_CPI
, qic_call_function_interrupt
);
1373 /* now put the VIC descriptor into the first 48 IRQs
1375 * This is for later: first 16 correspond to PC IRQs; next 16
1376 * are Primary MC IRQs and final 16 are Secondary MC IRQs */
1377 for (i
= 0; i
< 48; i
++)
1378 set_irq_chip_and_handler(i
, &vic_chip
, handle_vic_irq
);
1381 /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
1382 * processor to receive CPI */
1383 static void send_CPI(__u32 cpuset
, __u8 cpi
)
1386 __u32 quad_cpuset
= (cpuset
& voyager_quad_processors
);
1388 if (cpi
< VIC_START_FAKE_CPI
) {
1389 /* fake CPI are only used for booting, so send to the
1390 * extended quads as well---Quads must be VIC booted */
1391 outb((__u8
) (cpuset
), VIC_CPI_Registers
[cpi
]);
1395 send_QIC_CPI(quad_cpuset
, cpi
);
1396 cpuset
&= ~quad_cpuset
;
1397 cpuset
&= 0xff; /* only first 8 CPUs vaild for VIC CPI */
1400 for_each_online_cpu(cpu
) {
1401 if (cpuset
& (1 << cpu
))
1402 set_bit(cpi
, &vic_cpi_mailbox
[cpu
]);
1405 outb((__u8
) cpuset
, VIC_CPI_Registers
[VIC_CPI_LEVEL0
]);
1408 /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and
1409 * set the cache line to shared by reading it.
1411 * DON'T make this inline otherwise the cache line read will be
1414 static int ack_QIC_CPI(__u8 cpi
)
1416 __u8 cpu
= hard_smp_processor_id();
1420 outb(1 << cpi
, QIC_INTERRUPT_CLEAR1
);
1421 return voyager_quad_cpi_addr
[cpu
]->qic_cpi
[cpi
].cpi
;
1424 static void ack_special_QIC_CPI(__u8 cpi
)
1428 outb(QIC_CMN_INT
, QIC_INTERRUPT_CLEAR0
);
1431 outb(QIC_SYS_INT
, QIC_INTERRUPT_CLEAR0
);
1434 /* also clear at the VIC, just in case (nop for non-extended proc) */
1438 /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */
1439 static void ack_VIC_CPI(__u8 cpi
)
1441 #ifdef VOYAGER_DEBUG
1442 unsigned long flags
;
1444 __u8 cpu
= smp_processor_id();
1446 local_irq_save(flags
);
1447 isr
= vic_read_isr();
1448 if ((isr
& (1 << (cpi
& 7))) == 0) {
1449 printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu
, cpi
);
1452 /* send specific EOI; the two system interrupts have
1453 * bit 4 set for a separate vector but behave as the
1454 * corresponding 3 bit intr */
1455 outb_p(0x60 | (cpi
& 7), 0x20);
1457 #ifdef VOYAGER_DEBUG
1458 if ((vic_read_isr() & (1 << (cpi
& 7))) != 0) {
1459 printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu
, cpi
);
1461 local_irq_restore(flags
);
1465 /* cribbed with thanks from irq.c */
1466 #define __byte(x,y) (((unsigned char *)&(y))[x])
1467 #define cached_21(cpu) (__byte(0,vic_irq_mask[cpu]))
1468 #define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu]))
1470 static unsigned int startup_vic_irq(unsigned int irq
)
1472 unmask_vic_irq(irq
);
1477 /* The enable and disable routines. This is where we run into
1478 * conflicting architectural philosophy. Fundamentally, the voyager
1479 * architecture does not expect to have to disable interrupts globally
1480 * (the IRQ controllers belong to each CPU). The processor masquerade
1481 * which is used to start the system shouldn't be used in a running OS
1482 * since it will cause great confusion if two separate CPUs drive to
1483 * the same IRQ controller (I know, I've tried it).
1485 * The solution is a variant on the NCR lazy SPL design:
1487 * 1) To disable an interrupt, do nothing (other than set the
1488 * IRQ_DISABLED flag). This dares the interrupt actually to arrive.
1490 * 2) If the interrupt dares to come in, raise the local mask against
1491 * it (this will result in all the CPU masks being raised
1494 * 3) To enable the interrupt, lower the mask on the local CPU and
1495 * broadcast an Interrupt enable CPI which causes all other CPUs to
1496 * adjust their masks accordingly. */
1498 static void unmask_vic_irq(unsigned int irq
)
1500 /* linux doesn't to processor-irq affinity, so enable on
1501 * all CPUs we know about */
1502 int cpu
= smp_processor_id(), real_cpu
;
1503 __u16 mask
= (1 << irq
);
1504 __u32 processorList
= 0;
1505 unsigned long flags
;
1507 VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n",
1508 irq
, cpu
, cpu_irq_affinity
[cpu
]));
1509 spin_lock_irqsave(&vic_irq_lock
, flags
);
1510 for_each_online_cpu(real_cpu
) {
1511 if (!(voyager_extended_vic_processors
& (1 << real_cpu
)))
1513 if (!(cpu_irq_affinity
[real_cpu
] & mask
)) {
1514 /* irq has no affinity for this CPU, ignore */
1517 if (real_cpu
== cpu
) {
1518 enable_local_vic_irq(irq
);
1519 } else if (vic_irq_mask
[real_cpu
] & mask
) {
1520 vic_irq_enable_mask
[real_cpu
] |= mask
;
1521 processorList
|= (1 << real_cpu
);
1524 spin_unlock_irqrestore(&vic_irq_lock
, flags
);
1526 send_CPI(processorList
, VIC_ENABLE_IRQ_CPI
);
1529 static void mask_vic_irq(unsigned int irq
)
1531 /* lazy disable, do nothing */
1534 static void enable_local_vic_irq(unsigned int irq
)
1536 __u8 cpu
= smp_processor_id();
1537 __u16 mask
= ~(1 << irq
);
1538 __u16 old_mask
= vic_irq_mask
[cpu
];
1540 vic_irq_mask
[cpu
] &= mask
;
1541 if (vic_irq_mask
[cpu
] == old_mask
)
1544 VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n",
1548 outb_p(cached_A1(cpu
), 0xA1);
1551 outb_p(cached_21(cpu
), 0x21);
1556 static void disable_local_vic_irq(unsigned int irq
)
1558 __u8 cpu
= smp_processor_id();
1559 __u16 mask
= (1 << irq
);
1560 __u16 old_mask
= vic_irq_mask
[cpu
];
1565 vic_irq_mask
[cpu
] |= mask
;
1566 if (old_mask
== vic_irq_mask
[cpu
])
1569 VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n",
1573 outb_p(cached_A1(cpu
), 0xA1);
1576 outb_p(cached_21(cpu
), 0x21);
1581 /* The VIC is level triggered, so the ack can only be issued after the
1582 * interrupt completes. However, we do Voyager lazy interrupt
1583 * handling here: It is an extremely expensive operation to mask an
1584 * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If
1585 * this interrupt actually comes in, then we mask and ack here to push
1586 * the interrupt off to another CPU */
1587 static void before_handle_vic_irq(unsigned int irq
)
1589 irq_desc_t
*desc
= irq_desc
+ irq
;
1590 __u8 cpu
= smp_processor_id();
1592 _raw_spin_lock(&vic_irq_lock
);
1594 vic_intr_count
[cpu
]++;
1596 if (!(cpu_irq_affinity
[cpu
] & (1 << irq
))) {
1597 /* The irq is not in our affinity mask, push it off
1598 * onto another CPU */
1599 VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d "
1600 "on cpu %d\n", irq
, cpu
));
1601 disable_local_vic_irq(irq
);
1602 /* set IRQ_INPROGRESS to prevent the handler in irq.c from
1603 * actually calling the interrupt routine */
1604 desc
->status
|= IRQ_REPLAY
| IRQ_INPROGRESS
;
1605 } else if (desc
->status
& IRQ_DISABLED
) {
1606 /* Damn, the interrupt actually arrived, do the lazy
1607 * disable thing. The interrupt routine in irq.c will
1608 * not handle a IRQ_DISABLED interrupt, so nothing more
1609 * need be done here */
1610 VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n",
1612 disable_local_vic_irq(irq
);
1613 desc
->status
|= IRQ_REPLAY
;
1615 desc
->status
&= ~IRQ_REPLAY
;
1618 _raw_spin_unlock(&vic_irq_lock
);
1621 /* Finish the VIC interrupt: basically mask */
1622 static void after_handle_vic_irq(unsigned int irq
)
1624 irq_desc_t
*desc
= irq_desc
+ irq
;
1626 _raw_spin_lock(&vic_irq_lock
);
1628 unsigned int status
= desc
->status
& ~IRQ_INPROGRESS
;
1629 #ifdef VOYAGER_DEBUG
1633 desc
->status
= status
;
1634 if ((status
& IRQ_DISABLED
))
1635 disable_local_vic_irq(irq
);
1636 #ifdef VOYAGER_DEBUG
1637 /* DEBUG: before we ack, check what's in progress */
1638 isr
= vic_read_isr();
1639 if ((isr
& (1 << irq
) && !(status
& IRQ_REPLAY
)) == 0) {
1641 __u8 cpu
= smp_processor_id();
1643 int mask
; /* Um... initialize me??? --RR */
1645 printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
1647 for_each_possible_cpu(real_cpu
, mask
) {
1649 outb(VIC_CPU_MASQUERADE_ENABLE
| real_cpu
,
1651 isr
= vic_read_isr();
1652 if (isr
& (1 << irq
)) {
1654 ("VOYAGER SMP: CPU%d ack irq %d\n",
1658 outb(cpu
, VIC_PROCESSOR_ID
);
1661 #endif /* VOYAGER_DEBUG */
1662 /* as soon as we ack, the interrupt is eligible for
1663 * receipt by another CPU so everything must be in
1666 if (status
& IRQ_REPLAY
) {
1667 /* replay is set if we disable the interrupt
1668 * in the before_handle_vic_irq() routine, so
1669 * clear the in progress bit here to allow the
1670 * next CPU to handle this correctly */
1671 desc
->status
&= ~(IRQ_REPLAY
| IRQ_INPROGRESS
);
1673 #ifdef VOYAGER_DEBUG
1674 isr
= vic_read_isr();
1675 if ((isr
& (1 << irq
)) != 0)
1676 printk("VOYAGER SMP: after_handle_vic_irq() after "
1677 "ack irq=%d, isr=0x%x\n", irq
, isr
);
1678 #endif /* VOYAGER_DEBUG */
1680 _raw_spin_unlock(&vic_irq_lock
);
1682 /* All code after this point is out of the main path - the IRQ
1683 * may be intercepted by another CPU if reasserted */
1686 /* Linux processor - interrupt affinity manipulations.
1688 * For each processor, we maintain a 32 bit irq affinity mask.
1689 * Initially it is set to all 1's so every processor accepts every
1690 * interrupt. In this call, we change the processor's affinity mask:
1692 * Change from enable to disable:
1694 * If the interrupt ever comes in to the processor, we will disable it
1695 * and ack it to push it off to another CPU, so just accept the mask here.
1697 * Change from disable to enable:
1699 * change the mask and then do an interrupt enable CPI to re-enable on
1700 * the selected processors */
1702 void set_vic_irq_affinity(unsigned int irq
, cpumask_t mask
)
1704 /* Only extended processors handle interrupts */
1705 unsigned long real_mask
;
1706 unsigned long irq_mask
= 1 << irq
;
1709 real_mask
= cpus_addr(mask
)[0] & voyager_extended_vic_processors
;
1711 if (cpus_addr(mask
)[0] == 0)
1712 /* can't have no CPUs to accept the interrupt -- extremely
1713 * bad things will happen */
1717 /* can't change the affinity of the timer IRQ. This
1718 * is due to the constraint in the voyager
1719 * architecture that the CPI also comes in on and IRQ
1720 * line and we have chosen IRQ0 for this. If you
1721 * raise the mask on this interrupt, the processor
1722 * will no-longer be able to accept VIC CPIs */
1726 /* You can only have 32 interrupts in a voyager system
1727 * (and 32 only if you have a secondary microchannel
1731 for_each_online_cpu(cpu
) {
1732 unsigned long cpu_mask
= 1 << cpu
;
1734 if (cpu_mask
& real_mask
) {
1735 /* enable the interrupt for this cpu */
1736 cpu_irq_affinity
[cpu
] |= irq_mask
;
1738 /* disable the interrupt for this cpu */
1739 cpu_irq_affinity
[cpu
] &= ~irq_mask
;
1742 /* this is magic, we now have the correct affinity maps, so
1743 * enable the interrupt. This will send an enable CPI to
1744 * those CPUs who need to enable it in their local masks,
1745 * causing them to correct for the new affinity . If the
1746 * interrupt is currently globally disabled, it will simply be
1747 * disabled again as it comes in (voyager lazy disable). If
1748 * the affinity map is tightened to disable the interrupt on a
1749 * cpu, it will be pushed off when it comes in */
1750 unmask_vic_irq(irq
);
1753 static void ack_vic_irq(unsigned int irq
)
1756 outb(0x62, 0x20); /* Specific EOI to cascade */
1757 outb(0x60 | (irq
& 7), 0xA0);
1759 outb(0x60 | (irq
& 7), 0x20);
1763 /* enable the CPIs. In the VIC, the CPIs are delivered by the 8259
1764 * but are not vectored by it. This means that the 8259 mask must be
1765 * lowered to receive them */
1766 static __init
void vic_enable_cpi(void)
1768 __u8 cpu
= smp_processor_id();
1770 /* just take a copy of the current mask (nop for boot cpu) */
1771 vic_irq_mask
[cpu
] = vic_irq_mask
[boot_cpu_id
];
1773 enable_local_vic_irq(VIC_CPI_LEVEL0
);
1774 enable_local_vic_irq(VIC_CPI_LEVEL1
);
1775 /* for sys int and cmn int */
1776 enable_local_vic_irq(7);
1778 if (is_cpu_quad()) {
1779 outb(QIC_DEFAULT_MASK0
, QIC_MASK_REGISTER0
);
1780 outb(QIC_CPI_ENABLE
, QIC_MASK_REGISTER1
);
1781 VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n",
1782 cpu
, QIC_CPI_ENABLE
));
1785 VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n",
1786 cpu
, vic_irq_mask
[cpu
]));
1789 void voyager_smp_dump()
1791 int old_cpu
= smp_processor_id(), cpu
;
1793 /* dump the interrupt masks of each processor */
1794 for_each_online_cpu(cpu
) {
1795 __u16 imr
, isr
, irr
;
1796 unsigned long flags
;
1798 local_irq_save(flags
);
1799 outb(VIC_CPU_MASQUERADE_ENABLE
| cpu
, VIC_PROCESSOR_ID
);
1800 imr
= (inb(0xa1) << 8) | inb(0x21);
1802 irr
= inb(0xa0) << 8;
1806 isr
= inb(0xa0) << 8;
1809 outb(old_cpu
, VIC_PROCESSOR_ID
);
1810 local_irq_restore(flags
);
1811 printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n",
1812 cpu
, vic_irq_mask
[cpu
], imr
, irr
, isr
);
1814 /* These lines are put in to try to unstick an un ack'd irq */
1817 for (irq
= 0; irq
< 16; irq
++) {
1818 if (isr
& (1 << irq
)) {
1819 printk("\tCPU%d: ack irq %d\n",
1821 local_irq_save(flags
);
1822 outb(VIC_CPU_MASQUERADE_ENABLE
| cpu
,
1825 outb(old_cpu
, VIC_PROCESSOR_ID
);
1826 local_irq_restore(flags
);
1834 void smp_voyager_power_off(void *dummy
)
1836 if (smp_processor_id() == boot_cpu_id
)
1837 voyager_power_off();
1839 smp_stop_cpu_function(NULL
);
1842 static void __init
voyager_smp_prepare_cpus(unsigned int max_cpus
)
1844 /* FIXME: ignore max_cpus for now */
1848 static void __cpuinit
voyager_smp_prepare_boot_cpu(void)
1850 init_gdt(smp_processor_id());
1851 switch_to_new_gdt();
1853 cpu_set(smp_processor_id(), cpu_online_map
);
1854 cpu_set(smp_processor_id(), cpu_callout_map
);
1855 cpu_set(smp_processor_id(), cpu_possible_map
);
1856 cpu_set(smp_processor_id(), cpu_present_map
);
1859 static int __cpuinit
voyager_cpu_up(unsigned int cpu
)
1861 /* This only works at boot for x86. See "rewrite" above. */
1862 if (cpu_isset(cpu
, smp_commenced_mask
))
1865 /* In case one didn't come up */
1866 if (!cpu_isset(cpu
, cpu_callin_map
))
1868 /* Unleash the CPU! */
1869 cpu_set(cpu
, smp_commenced_mask
);
1870 while (!cpu_isset(cpu
, cpu_online_map
))
1875 static void __init
voyager_smp_cpus_done(unsigned int max_cpus
)
1880 void __init
smp_setup_processor_id(void)
1882 current_thread_info()->cpu
= hard_smp_processor_id();
1883 x86_write_percpu(cpu_number
, hard_smp_processor_id());
1886 struct smp_ops smp_ops
= {
1887 .smp_prepare_boot_cpu
= voyager_smp_prepare_boot_cpu
,
1888 .smp_prepare_cpus
= voyager_smp_prepare_cpus
,
1889 .cpu_up
= voyager_cpu_up
,
1890 .smp_cpus_done
= voyager_smp_cpus_done
,
1892 .smp_send_stop
= voyager_smp_send_stop
,
1893 .smp_send_reschedule
= voyager_smp_send_reschedule
,
1894 .smp_call_function_mask
= voyager_smp_call_function_mask
,