Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* -*- mode: c; c-basic-offset: 8 -*- */ |
2 | ||
3 | /* Copyright (C) 1999,2001 | |
4 | * | |
5 | * Author: J.E.J.Bottomley@HansenPartnership.com | |
6 | * | |
7 | * linux/arch/i386/kernel/voyager_smp.c | |
8 | * | |
9 | * This file provides all the same external entries as smp.c but uses | |
10 | * the voyager hal to provide the functionality | |
11 | */ | |
153f8057 | 12 | #include <linux/module.h> |
1da177e4 LT |
13 | #include <linux/mm.h> |
14 | #include <linux/kernel_stat.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/mc146818rtc.h> | |
17 | #include <linux/cache.h> | |
18 | #include <linux/interrupt.h> | |
1da177e4 LT |
19 | #include <linux/init.h> |
20 | #include <linux/kernel.h> | |
21 | #include <linux/bootmem.h> | |
22 | #include <linux/completion.h> | |
23 | #include <asm/desc.h> | |
24 | #include <asm/voyager.h> | |
25 | #include <asm/vic.h> | |
26 | #include <asm/mtrr.h> | |
27 | #include <asm/pgalloc.h> | |
28 | #include <asm/tlbflush.h> | |
29 | #include <asm/arch_hooks.h> | |
62111195 | 30 | #include <asm/pda.h> |
1da177e4 | 31 | |
1da177e4 LT |
32 | /* TLB state -- visible externally, indexed physically */ |
33 | DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; | |
34 | ||
35 | /* CPU IRQ affinity -- set to all ones initially */ | |
36 | static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL }; | |
37 | ||
38 | /* per CPU data structure (for /proc/cpuinfo et al), visible externally | |
39 | * indexed physically */ | |
40 | struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; | |
153f8057 | 41 | EXPORT_SYMBOL(cpu_data); |
1da177e4 LT |
42 | |
43 | /* physical ID of the CPU used to boot the system */ | |
44 | unsigned char boot_cpu_id; | |
45 | ||
46 | /* The memory line addresses for the Quad CPIs */ | |
47 | struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS] __cacheline_aligned; | |
48 | ||
49 | /* The masks for the Extended VIC processors, filled in by cat_init */ | |
50 | __u32 voyager_extended_vic_processors = 0; | |
51 | ||
52 | /* Masks for the extended Quad processors which cannot be VIC booted */ | |
53 | __u32 voyager_allowed_boot_processors = 0; | |
54 | ||
55 | /* The mask for the Quad Processors (both extended and non-extended) */ | |
56 | __u32 voyager_quad_processors = 0; | |
57 | ||
58 | /* Total count of live CPUs, used in process.c to display | |
59 | * the CPU information and in irq.c for the per CPU irq | |
60 | * activity count. Finally exported by i386_ksyms.c */ | |
61 | static int voyager_extended_cpus = 1; | |
62 | ||
63 | /* Have we found an SMP box - used by time.c to do the profiling | |
64 | interrupt for timeslicing; do not set to 1 until the per CPU timer | |
65 | interrupt is active */ | |
66 | int smp_found_config = 0; | |
67 | ||
68 | /* Used for the invalidate map that's also checked in the spinlock */ | |
69 | static volatile unsigned long smp_invalidate_needed; | |
70 | ||
71 | /* Bitmask of currently online CPUs - used by setup.c for | |
72 | /proc/cpuinfo, visible externally but still physical */ | |
73 | cpumask_t cpu_online_map = CPU_MASK_NONE; | |
153f8057 | 74 | EXPORT_SYMBOL(cpu_online_map); |
1da177e4 LT |
75 | |
76 | /* Bitmask of CPUs present in the system - exported by i386_syms.c, used | |
77 | * by scheduler but indexed physically */ | |
78 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | |
79 | ||
80 | ||
81 | /* The internal functions */ | |
82 | static void send_CPI(__u32 cpuset, __u8 cpi); | |
83 | static void ack_CPI(__u8 cpi); | |
84 | static int ack_QIC_CPI(__u8 cpi); | |
85 | static void ack_special_QIC_CPI(__u8 cpi); | |
86 | static void ack_VIC_CPI(__u8 cpi); | |
87 | static void send_CPI_allbutself(__u8 cpi); | |
c771746e JB |
88 | static void mask_vic_irq(unsigned int irq); |
89 | static void unmask_vic_irq(unsigned int irq); | |
1da177e4 LT |
90 | static unsigned int startup_vic_irq(unsigned int irq); |
91 | static void enable_local_vic_irq(unsigned int irq); | |
92 | static void disable_local_vic_irq(unsigned int irq); | |
93 | static void before_handle_vic_irq(unsigned int irq); | |
94 | static void after_handle_vic_irq(unsigned int irq); | |
95 | static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); | |
96 | static void ack_vic_irq(unsigned int irq); | |
97 | static void vic_enable_cpi(void); | |
98 | static void do_boot_cpu(__u8 cpuid); | |
99 | static void do_quad_bootstrap(void); | |
1da177e4 LT |
100 | |
101 | int hard_smp_processor_id(void); | |
2654c08c | 102 | int safe_smp_processor_id(void); |
1da177e4 LT |
103 | |
104 | /* Inline functions */ | |
105 | static inline void | |
106 | send_one_QIC_CPI(__u8 cpu, __u8 cpi) | |
107 | { | |
108 | voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi = | |
109 | (smp_processor_id() << 16) + cpi; | |
110 | } | |
111 | ||
112 | static inline void | |
113 | send_QIC_CPI(__u32 cpuset, __u8 cpi) | |
114 | { | |
115 | int cpu; | |
116 | ||
117 | for_each_online_cpu(cpu) { | |
118 | if(cpuset & (1<<cpu)) { | |
119 | #ifdef VOYAGER_DEBUG | |
120 | if(!cpu_isset(cpu, cpu_online_map)) | |
121 | VDEBUG(("CPU%d sending cpi %d to CPU%d not in cpu_online_map\n", hard_smp_processor_id(), cpi, cpu)); | |
122 | #endif | |
123 | send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); | |
124 | } | |
125 | } | |
126 | } | |
127 | ||
6431e6a2 | 128 | static inline void |
7d12e780 | 129 | wrapper_smp_local_timer_interrupt(void) |
6431e6a2 DH |
130 | { |
131 | irq_enter(); | |
7d12e780 | 132 | smp_local_timer_interrupt(); |
6431e6a2 DH |
133 | irq_exit(); |
134 | } | |
135 | ||
1da177e4 LT |
136 | static inline void |
137 | send_one_CPI(__u8 cpu, __u8 cpi) | |
138 | { | |
139 | if(voyager_quad_processors & (1<<cpu)) | |
140 | send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); | |
141 | else | |
142 | send_CPI(1<<cpu, cpi); | |
143 | } | |
144 | ||
145 | static inline void | |
146 | send_CPI_allbutself(__u8 cpi) | |
147 | { | |
148 | __u8 cpu = smp_processor_id(); | |
149 | __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu); | |
150 | send_CPI(mask, cpi); | |
151 | } | |
152 | ||
153 | static inline int | |
154 | is_cpu_quad(void) | |
155 | { | |
156 | __u8 cpumask = inb(VIC_PROC_WHO_AM_I); | |
157 | return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER); | |
158 | } | |
159 | ||
160 | static inline int | |
161 | is_cpu_extended(void) | |
162 | { | |
163 | __u8 cpu = hard_smp_processor_id(); | |
164 | ||
165 | return(voyager_extended_vic_processors & (1<<cpu)); | |
166 | } | |
167 | ||
168 | static inline int | |
169 | is_cpu_vic_boot(void) | |
170 | { | |
171 | __u8 cpu = hard_smp_processor_id(); | |
172 | ||
173 | return(voyager_extended_vic_processors | |
174 | & voyager_allowed_boot_processors & (1<<cpu)); | |
175 | } | |
176 | ||
177 | ||
178 | static inline void | |
179 | ack_CPI(__u8 cpi) | |
180 | { | |
181 | switch(cpi) { | |
182 | case VIC_CPU_BOOT_CPI: | |
183 | if(is_cpu_quad() && !is_cpu_vic_boot()) | |
184 | ack_QIC_CPI(cpi); | |
185 | else | |
186 | ack_VIC_CPI(cpi); | |
187 | break; | |
188 | case VIC_SYS_INT: | |
189 | case VIC_CMN_INT: | |
190 | /* These are slightly strange. Even on the Quad card, | |
191 | * They are vectored as VIC CPIs */ | |
192 | if(is_cpu_quad()) | |
193 | ack_special_QIC_CPI(cpi); | |
194 | else | |
195 | ack_VIC_CPI(cpi); | |
196 | break; | |
197 | default: | |
198 | printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi); | |
199 | break; | |
200 | } | |
201 | } | |
202 | ||
203 | /* local variables */ | |
204 | ||
205 | /* The VIC IRQ descriptors -- these look almost identical to the | |
206 | * 8259 IRQs except that masks and things must be kept per processor | |
207 | */ | |
c771746e JB |
208 | static struct irq_chip vic_chip = { |
209 | .name = "VIC", | |
210 | .startup = startup_vic_irq, | |
211 | .mask = mask_vic_irq, | |
212 | .unmask = unmask_vic_irq, | |
213 | .set_affinity = set_vic_irq_affinity, | |
1da177e4 LT |
214 | }; |
215 | ||
216 | /* used to count up as CPUs are brought on line (starts at 0) */ | |
217 | static int cpucount = 0; | |
218 | ||
219 | /* steal a page from the bottom of memory for the trampoline and | |
220 | * squirrel its address away here. This will be in kernel virtual | |
221 | * space */ | |
222 | static __u32 trampoline_base; | |
223 | ||
224 | /* The per cpu profile stuff - used in smp_local_timer_interrupt */ | |
225 | static DEFINE_PER_CPU(int, prof_multiplier) = 1; | |
226 | static DEFINE_PER_CPU(int, prof_old_multiplier) = 1; | |
227 | static DEFINE_PER_CPU(int, prof_counter) = 1; | |
228 | ||
229 | /* the map used to check if a CPU has booted */ | |
230 | static __u32 cpu_booted_map; | |
231 | ||
232 | /* the synchronize flag used to hold all secondary CPUs spinning in | |
233 | * a tight loop until the boot sequence is ready for them */ | |
234 | static cpumask_t smp_commenced_mask = CPU_MASK_NONE; | |
235 | ||
236 | /* This is for the new dynamic CPU boot code */ | |
237 | cpumask_t cpu_callin_map = CPU_MASK_NONE; | |
238 | cpumask_t cpu_callout_map = CPU_MASK_NONE; | |
153f8057 | 239 | EXPORT_SYMBOL(cpu_callout_map); |
7a8ef1cb | 240 | cpumask_t cpu_possible_map = CPU_MASK_NONE; |
4ad8d383 | 241 | EXPORT_SYMBOL(cpu_possible_map); |
1da177e4 LT |
242 | |
243 | /* The per processor IRQ masks (these are usually kept in sync) */ | |
244 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; | |
245 | ||
246 | /* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */ | |
247 | static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 }; | |
248 | ||
249 | /* Lock for enable/disable of VIC interrupts */ | |
250 | static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock); | |
251 | ||
252 | /* The boot processor is correctly set up in PC mode when it | |
253 | * comes up, but the secondaries need their master/slave 8259 | |
254 | * pairs initializing correctly */ | |
255 | ||
256 | /* Interrupt counters (per cpu) and total - used to try to | |
257 | * even up the interrupt handling routines */ | |
258 | static long vic_intr_total = 0; | |
259 | static long vic_intr_count[NR_CPUS] __cacheline_aligned = { 0 }; | |
260 | static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 }; | |
261 | ||
262 | /* Since we can only use CPI0, we fake all the other CPIs */ | |
263 | static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned; | |
264 | ||
265 | /* debugging routine to read the isr of the cpu's pic */ | |
266 | static inline __u16 | |
267 | vic_read_isr(void) | |
268 | { | |
269 | __u16 isr; | |
270 | ||
271 | outb(0x0b, 0xa0); | |
272 | isr = inb(0xa0) << 8; | |
273 | outb(0x0b, 0x20); | |
274 | isr |= inb(0x20); | |
275 | ||
276 | return isr; | |
277 | } | |
278 | ||
279 | static __init void | |
280 | qic_setup(void) | |
281 | { | |
282 | if(!is_cpu_quad()) { | |
283 | /* not a quad, no setup */ | |
284 | return; | |
285 | } | |
286 | outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); | |
287 | outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); | |
288 | ||
289 | if(is_cpu_extended()) { | |
290 | /* the QIC duplicate of the VIC base register */ | |
291 | outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER); | |
292 | outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER); | |
293 | ||
294 | /* FIXME: should set up the QIC timer and memory parity | |
295 | * error vectors here */ | |
296 | } | |
297 | } | |
298 | ||
299 | static __init void | |
300 | vic_setup_pic(void) | |
301 | { | |
302 | outb(1, VIC_REDIRECT_REGISTER_1); | |
303 | /* clear the claim registers for dynamic routing */ | |
304 | outb(0, VIC_CLAIM_REGISTER_0); | |
305 | outb(0, VIC_CLAIM_REGISTER_1); | |
306 | ||
307 | outb(0, VIC_PRIORITY_REGISTER); | |
308 | /* Set the Primary and Secondary Microchannel vector | |
309 | * bases to be the same as the ordinary interrupts | |
310 | * | |
311 | * FIXME: This would be more efficient using separate | |
312 | * vectors. */ | |
313 | outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE); | |
314 | outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE); | |
315 | /* Now initiallise the master PIC belonging to this CPU by | |
316 | * sending the four ICWs */ | |
317 | ||
318 | /* ICW1: level triggered, ICW4 needed */ | |
319 | outb(0x19, 0x20); | |
320 | ||
321 | /* ICW2: vector base */ | |
322 | outb(FIRST_EXTERNAL_VECTOR, 0x21); | |
323 | ||
324 | /* ICW3: slave at line 2 */ | |
325 | outb(0x04, 0x21); | |
326 | ||
327 | /* ICW4: 8086 mode */ | |
328 | outb(0x01, 0x21); | |
329 | ||
330 | /* now the same for the slave PIC */ | |
331 | ||
332 | /* ICW1: level trigger, ICW4 needed */ | |
333 | outb(0x19, 0xA0); | |
334 | ||
335 | /* ICW2: slave vector base */ | |
336 | outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1); | |
337 | ||
338 | /* ICW3: slave ID */ | |
339 | outb(0x02, 0xA1); | |
340 | ||
341 | /* ICW4: 8086 mode */ | |
342 | outb(0x01, 0xA1); | |
343 | } | |
344 | ||
345 | static void | |
346 | do_quad_bootstrap(void) | |
347 | { | |
348 | if(is_cpu_quad() && is_cpu_vic_boot()) { | |
349 | int i; | |
350 | unsigned long flags; | |
351 | __u8 cpuid = hard_smp_processor_id(); | |
352 | ||
353 | local_irq_save(flags); | |
354 | ||
355 | for(i = 0; i<4; i++) { | |
356 | /* FIXME: this would be >>3 &0x7 on the 32 way */ | |
357 | if(((cpuid >> 2) & 0x03) == i) | |
358 | /* don't lower our own mask! */ | |
359 | continue; | |
360 | ||
361 | /* masquerade as local Quad CPU */ | |
362 | outb(QIC_CPUID_ENABLE | i, QIC_PROCESSOR_ID); | |
363 | /* enable the startup CPI */ | |
364 | outb(QIC_BOOT_CPI_MASK, QIC_MASK_REGISTER1); | |
365 | /* restore cpu id */ | |
366 | outb(0, QIC_PROCESSOR_ID); | |
367 | } | |
368 | local_irq_restore(flags); | |
369 | } | |
370 | } | |
371 | ||
372 | ||
373 | /* Set up all the basic stuff: read the SMP config and make all the | |
374 | * SMP information reflect only the boot cpu. All others will be | |
375 | * brought on-line later. */ | |
376 | void __init | |
377 | find_smp_config(void) | |
378 | { | |
379 | int i; | |
380 | ||
381 | boot_cpu_id = hard_smp_processor_id(); | |
382 | ||
383 | printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); | |
384 | ||
385 | /* initialize the CPU structures (moved from smp_boot_cpus) */ | |
386 | for(i=0; i<NR_CPUS; i++) { | |
387 | cpu_irq_affinity[i] = ~0; | |
388 | } | |
389 | cpu_online_map = cpumask_of_cpu(boot_cpu_id); | |
390 | ||
391 | /* The boot CPU must be extended */ | |
392 | voyager_extended_vic_processors = 1<<boot_cpu_id; | |
393 | /* initially, all of the first 8 cpu's can boot */ | |
394 | voyager_allowed_boot_processors = 0xff; | |
395 | /* set up everything for just this CPU, we can alter | |
396 | * this as we start the other CPUs later */ | |
397 | /* now get the CPU disposition from the extended CMOS */ | |
398 | cpus_addr(phys_cpu_present_map)[0] = voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK); | |
399 | cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8; | |
400 | cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16; | |
401 | cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24; | |
f68a106f | 402 | cpu_possible_map = phys_cpu_present_map; |
1da177e4 LT |
403 | printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]); |
404 | /* Here we set up the VIC to enable SMP */ | |
405 | /* enable the CPIs by writing the base vector to their register */ | |
406 | outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER); | |
407 | outb(1, VIC_REDIRECT_REGISTER_1); | |
408 | /* set the claim registers for static routing --- Boot CPU gets | |
409 | * all interrupts untill all other CPUs started */ | |
410 | outb(0xff, VIC_CLAIM_REGISTER_0); | |
411 | outb(0xff, VIC_CLAIM_REGISTER_1); | |
412 | /* Set the Primary and Secondary Microchannel vector | |
413 | * bases to be the same as the ordinary interrupts | |
414 | * | |
415 | * FIXME: This would be more efficient using separate | |
416 | * vectors. */ | |
417 | outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE); | |
418 | outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE); | |
419 | ||
420 | /* Finally tell the firmware that we're driving */ | |
421 | outb(inb(VOYAGER_SUS_IN_CONTROL_PORT) | VOYAGER_IN_CONTROL_FLAG, | |
422 | VOYAGER_SUS_IN_CONTROL_PORT); | |
423 | ||
424 | current_thread_info()->cpu = boot_cpu_id; | |
62111195 | 425 | write_pda(cpu_number, boot_cpu_id); |
1da177e4 LT |
426 | } |
427 | ||
428 | /* | |
429 | * The bootstrap kernel entry code has set these up. Save them | |
430 | * for a given CPU, id is physical */ | |
431 | void __init | |
432 | smp_store_cpu_info(int id) | |
433 | { | |
434 | struct cpuinfo_x86 *c=&cpu_data[id]; | |
435 | ||
436 | *c = boot_cpu_data; | |
437 | ||
438 | identify_cpu(c); | |
439 | } | |
440 | ||
441 | /* set up the trampoline and return the physical address of the code */ | |
442 | static __u32 __init | |
443 | setup_trampoline(void) | |
444 | { | |
445 | /* these two are global symbols in trampoline.S */ | |
446 | extern __u8 trampoline_end[]; | |
447 | extern __u8 trampoline_data[]; | |
448 | ||
449 | memcpy((__u8 *)trampoline_base, trampoline_data, | |
450 | trampoline_end - trampoline_data); | |
451 | return virt_to_phys((__u8 *)trampoline_base); | |
452 | } | |
453 | ||
454 | /* Routine initially called when a non-boot CPU is brought online */ | |
455 | static void __init | |
456 | start_secondary(void *unused) | |
457 | { | |
458 | __u8 cpuid = hard_smp_processor_id(); | |
459 | /* external functions not defined in the headers */ | |
460 | extern void calibrate_delay(void); | |
461 | ||
62111195 | 462 | secondary_cpu_init(); |
1da177e4 LT |
463 | |
464 | /* OK, we're in the routine */ | |
465 | ack_CPI(VIC_CPU_BOOT_CPI); | |
466 | ||
467 | /* setup the 8259 master slave pair belonging to this CPU --- | |
468 | * we won't actually receive any until the boot CPU | |
469 | * relinquishes it's static routing mask */ | |
470 | vic_setup_pic(); | |
471 | ||
472 | qic_setup(); | |
473 | ||
474 | if(is_cpu_quad() && !is_cpu_vic_boot()) { | |
475 | /* clear the boot CPI */ | |
476 | __u8 dummy; | |
477 | ||
478 | dummy = voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi; | |
479 | printk("read dummy %d\n", dummy); | |
480 | } | |
481 | ||
482 | /* lower the mask to receive CPIs */ | |
483 | vic_enable_cpi(); | |
484 | ||
485 | VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid)); | |
486 | ||
487 | /* enable interrupts */ | |
488 | local_irq_enable(); | |
489 | ||
490 | /* get our bogomips */ | |
491 | calibrate_delay(); | |
492 | ||
493 | /* save our processor parameters */ | |
494 | smp_store_cpu_info(cpuid); | |
495 | ||
496 | /* if we're a quad, we may need to bootstrap other CPUs */ | |
497 | do_quad_bootstrap(); | |
498 | ||
499 | /* FIXME: this is rather a poor hack to prevent the CPU | |
500 | * activating softirqs while it's supposed to be waiting for | |
501 | * permission to proceed. Without this, the new per CPU stuff | |
502 | * in the softirqs will fail */ | |
503 | local_irq_disable(); | |
504 | cpu_set(cpuid, cpu_callin_map); | |
505 | ||
506 | /* signal that we're done */ | |
507 | cpu_booted_map = 1; | |
508 | ||
509 | while (!cpu_isset(cpuid, smp_commenced_mask)) | |
510 | rep_nop(); | |
511 | local_irq_enable(); | |
512 | ||
513 | local_flush_tlb(); | |
514 | ||
515 | cpu_set(cpuid, cpu_online_map); | |
516 | wmb(); | |
517 | cpu_idle(); | |
518 | } | |
519 | ||
520 | ||
521 | /* Routine to kick start the given CPU and wait for it to report ready | |
522 | * (or timeout in startup). When this routine returns, the requested | |
523 | * CPU is either fully running and configured or known to be dead. | |
524 | * | |
525 | * We call this routine sequentially 1 CPU at a time, so no need for | |
526 | * locking */ | |
527 | ||
528 | static void __init | |
529 | do_boot_cpu(__u8 cpu) | |
530 | { | |
531 | struct task_struct *idle; | |
532 | int timeout; | |
533 | unsigned long flags; | |
534 | int quad_boot = (1<<cpu) & voyager_quad_processors | |
535 | & ~( voyager_extended_vic_processors | |
536 | & voyager_allowed_boot_processors); | |
537 | ||
1da177e4 LT |
538 | /* This is an area in head.S which was used to set up the |
539 | * initial kernel stack. We need to alter this to give the | |
540 | * booting CPU a new stack (taken from its idle process) */ | |
541 | extern struct { | |
542 | __u8 *esp; | |
543 | unsigned short ss; | |
544 | } stack_start; | |
545 | /* This is the format of the CPI IDT gate (in real mode) which | |
546 | * we're hijacking to boot the CPU */ | |
547 | union IDTFormat { | |
548 | struct seg { | |
549 | __u16 Offset; | |
550 | __u16 Segment; | |
551 | } idt; | |
552 | __u32 val; | |
553 | } hijack_source; | |
554 | ||
555 | __u32 *hijack_vector; | |
556 | __u32 start_phys_address = setup_trampoline(); | |
557 | ||
558 | /* There's a clever trick to this: The linux trampoline is | |
559 | * compiled to begin at absolute location zero, so make the | |
560 | * address zero but have the data segment selector compensate | |
561 | * for the actual address */ | |
562 | hijack_source.idt.Offset = start_phys_address & 0x000F; | |
563 | hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF; | |
564 | ||
565 | cpucount++; | |
d6444514 JB |
566 | alternatives_smp_switch(1); |
567 | ||
1da177e4 LT |
568 | idle = fork_idle(cpu); |
569 | if(IS_ERR(idle)) | |
570 | panic("failed fork for CPU%d", cpu); | |
571 | idle->thread.eip = (unsigned long) start_secondary; | |
572 | /* init_tasks (in sched.c) is indexed logically */ | |
573 | stack_start.esp = (void *) idle->thread.esp; | |
574 | ||
ae1ee11b | 575 | init_gdt(cpu, idle); |
1da177e4 LT |
576 | irq_ctx_init(cpu); |
577 | ||
578 | /* Note: Don't modify initial ss override */ | |
579 | VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, | |
580 | (unsigned long)hijack_source.val, hijack_source.idt.Segment, | |
581 | hijack_source.idt.Offset, stack_start.esp)); | |
9d0e59a3 EB |
582 | |
583 | /* init lowmem identity mapping */ | |
584 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, | |
585 | min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); | |
586 | flush_tlb_all(); | |
1da177e4 LT |
587 | |
588 | if(quad_boot) { | |
589 | printk("CPU %d: non extended Quad boot\n", cpu); | |
590 | hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE)*4); | |
591 | *hijack_vector = hijack_source.val; | |
592 | } else { | |
593 | printk("CPU%d: extended VIC boot\n", cpu); | |
594 | hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE)*4); | |
595 | *hijack_vector = hijack_source.val; | |
596 | /* VIC errata, may also receive interrupt at this address */ | |
597 | hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI + VIC_DEFAULT_CPI_BASE)*4); | |
598 | *hijack_vector = hijack_source.val; | |
599 | } | |
600 | /* All non-boot CPUs start with interrupts fully masked. Need | |
601 | * to lower the mask of the CPI we're about to send. We do | |
602 | * this in the VIC by masquerading as the processor we're | |
603 | * about to boot and lowering its interrupt mask */ | |
604 | local_irq_save(flags); | |
605 | if(quad_boot) { | |
606 | send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI); | |
607 | } else { | |
608 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); | |
609 | /* here we're altering registers belonging to `cpu' */ | |
610 | ||
611 | outb(VIC_BOOT_INTERRUPT_MASK, 0x21); | |
612 | /* now go back to our original identity */ | |
613 | outb(boot_cpu_id, VIC_PROCESSOR_ID); | |
614 | ||
615 | /* and boot the CPU */ | |
616 | ||
617 | send_CPI((1<<cpu), VIC_CPU_BOOT_CPI); | |
618 | } | |
619 | cpu_booted_map = 0; | |
620 | local_irq_restore(flags); | |
621 | ||
622 | /* now wait for it to become ready (or timeout) */ | |
623 | for(timeout = 0; timeout < 50000; timeout++) { | |
624 | if(cpu_booted_map) | |
625 | break; | |
626 | udelay(100); | |
627 | } | |
628 | /* reset the page table */ | |
9d0e59a3 | 629 | zap_low_mappings(); |
1da177e4 LT |
630 | |
631 | if (cpu_booted_map) { | |
632 | VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n", | |
633 | cpu, smp_processor_id())); | |
634 | ||
635 | printk("CPU%d: ", cpu); | |
636 | print_cpu_info(&cpu_data[cpu]); | |
637 | wmb(); | |
638 | cpu_set(cpu, cpu_callout_map); | |
3c101cf0 | 639 | cpu_set(cpu, cpu_present_map); |
1da177e4 LT |
640 | } |
641 | else { | |
642 | printk("CPU%d FAILED TO BOOT: ", cpu); | |
643 | if (*((volatile unsigned char *)phys_to_virt(start_phys_address))==0xA5) | |
644 | printk("Stuck.\n"); | |
645 | else | |
646 | printk("Not responding.\n"); | |
647 | ||
648 | cpucount--; | |
649 | } | |
650 | } | |
651 | ||
652 | void __init | |
653 | smp_boot_cpus(void) | |
654 | { | |
655 | int i; | |
656 | ||
657 | /* CAT BUS initialisation must be done after the memory */ | |
658 | /* FIXME: The L4 has a catbus too, it just needs to be | |
659 | * accessed in a totally different way */ | |
660 | if(voyager_level == 5) { | |
661 | voyager_cat_init(); | |
662 | ||
663 | /* now that the cat has probed the Voyager System Bus, sanity | |
664 | * check the cpu map */ | |
665 | if( ((voyager_quad_processors | voyager_extended_vic_processors) | |
666 | & cpus_addr(phys_cpu_present_map)[0]) != cpus_addr(phys_cpu_present_map)[0]) { | |
667 | /* should panic */ | |
668 | printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n"); | |
669 | } | |
670 | } else if(voyager_level == 4) | |
671 | voyager_extended_vic_processors = cpus_addr(phys_cpu_present_map)[0]; | |
672 | ||
673 | /* this sets up the idle task to run on the current cpu */ | |
674 | voyager_extended_cpus = 1; | |
675 | /* Remove the global_irq_holder setting, it triggers a BUG() on | |
676 | * schedule at the moment */ | |
677 | //global_irq_holder = boot_cpu_id; | |
678 | ||
679 | /* FIXME: Need to do something about this but currently only works | |
680 | * on CPUs with a tsc which none of mine have. | |
681 | smp_tune_scheduling(); | |
682 | */ | |
683 | smp_store_cpu_info(boot_cpu_id); | |
684 | printk("CPU%d: ", boot_cpu_id); | |
685 | print_cpu_info(&cpu_data[boot_cpu_id]); | |
686 | ||
687 | if(is_cpu_quad()) { | |
688 | /* booting on a Quad CPU */ | |
689 | printk("VOYAGER SMP: Boot CPU is Quad\n"); | |
690 | qic_setup(); | |
691 | do_quad_bootstrap(); | |
692 | } | |
693 | ||
694 | /* enable our own CPIs */ | |
695 | vic_enable_cpi(); | |
696 | ||
697 | cpu_set(boot_cpu_id, cpu_online_map); | |
698 | cpu_set(boot_cpu_id, cpu_callout_map); | |
699 | ||
700 | /* loop over all the extended VIC CPUs and boot them. The | |
701 | * Quad CPUs must be bootstrapped by their extended VIC cpu */ | |
702 | for(i = 0; i < NR_CPUS; i++) { | |
703 | if(i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) | |
704 | continue; | |
705 | do_boot_cpu(i); | |
706 | /* This udelay seems to be needed for the Quad boots | |
707 | * don't remove unless you know what you're doing */ | |
708 | udelay(1000); | |
709 | } | |
710 | /* we could compute the total bogomips here, but why bother?, | |
711 | * Code added from smpboot.c */ | |
712 | { | |
713 | unsigned long bogosum = 0; | |
714 | for (i = 0; i < NR_CPUS; i++) | |
715 | if (cpu_isset(i, cpu_online_map)) | |
716 | bogosum += cpu_data[i].loops_per_jiffy; | |
717 | printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | |
718 | cpucount+1, | |
719 | bogosum/(500000/HZ), | |
720 | (bogosum/(5000/HZ))%100); | |
721 | } | |
722 | voyager_extended_cpus = hweight32(voyager_extended_vic_processors); | |
723 | printk("VOYAGER: Extended (interrupt handling CPUs): %d, non-extended: %d\n", voyager_extended_cpus, num_booting_cpus() - voyager_extended_cpus); | |
724 | /* that's it, switch to symmetric mode */ | |
725 | outb(0, VIC_PRIORITY_REGISTER); | |
726 | outb(0, VIC_CLAIM_REGISTER_0); | |
727 | outb(0, VIC_CLAIM_REGISTER_1); | |
728 | ||
729 | VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus())); | |
730 | } | |
731 | ||
732 | /* Reload the secondary CPUs task structure (this function does not | |
733 | * return ) */ | |
734 | void __init | |
735 | initialize_secondary(void) | |
736 | { | |
737 | #if 0 | |
738 | // AC kernels only | |
739 | set_current(hard_get_current()); | |
740 | #endif | |
741 | ||
742 | /* | |
743 | * We don't actually need to load the full TSS, | |
744 | * basically just the stack pointer and the eip. | |
745 | */ | |
746 | ||
747 | asm volatile( | |
748 | "movl %0,%%esp\n\t" | |
749 | "jmp *%1" | |
750 | : | |
751 | :"r" (current->thread.esp),"r" (current->thread.eip)); | |
752 | } | |
753 | ||
754 | /* handle a Voyager SYS_INT -- If we don't, the base board will | |
755 | * panic the system. | |
756 | * | |
757 | * System interrupts occur because some problem was detected on the | |
758 | * various busses. To find out what you have to probe all the | |
759 | * hardware via the CAT bus. FIXME: At the moment we do nothing. */ | |
760 | fastcall void | |
761 | smp_vic_sys_interrupt(struct pt_regs *regs) | |
762 | { | |
763 | ack_CPI(VIC_SYS_INT); | |
7d12e780 | 764 | printk("Voyager SYSTEM INTERRUPT\n"); |
1da177e4 LT |
765 | } |
766 | ||
767 | /* Handle a voyager CMN_INT; These interrupts occur either because of | |
768 | * a system status change or because a single bit memory error | |
769 | * occurred. FIXME: At the moment, ignore all this. */ | |
770 | fastcall void | |
771 | smp_vic_cmn_interrupt(struct pt_regs *regs) | |
772 | { | |
773 | static __u8 in_cmn_int = 0; | |
774 | static DEFINE_SPINLOCK(cmn_int_lock); | |
775 | ||
776 | /* common ints are broadcast, so make sure we only do this once */ | |
777 | _raw_spin_lock(&cmn_int_lock); | |
778 | if(in_cmn_int) | |
779 | goto unlock_end; | |
780 | ||
781 | in_cmn_int++; | |
782 | _raw_spin_unlock(&cmn_int_lock); | |
783 | ||
784 | VDEBUG(("Voyager COMMON INTERRUPT\n")); | |
785 | ||
786 | if(voyager_level == 5) | |
787 | voyager_cat_do_common_interrupt(); | |
788 | ||
789 | _raw_spin_lock(&cmn_int_lock); | |
790 | in_cmn_int = 0; | |
791 | unlock_end: | |
792 | _raw_spin_unlock(&cmn_int_lock); | |
793 | ack_CPI(VIC_CMN_INT); | |
794 | } | |
795 | ||
796 | /* | |
797 | * Reschedule call back. Nothing to do, all the work is done | |
798 | * automatically when we return from the interrupt. */ | |
799 | static void | |
800 | smp_reschedule_interrupt(void) | |
801 | { | |
802 | /* do nothing */ | |
803 | } | |
804 | ||
805 | static struct mm_struct * flush_mm; | |
806 | static unsigned long flush_va; | |
807 | static DEFINE_SPINLOCK(tlbstate_lock); | |
808 | #define FLUSH_ALL 0xffffffff | |
809 | ||
810 | /* | |
811 | * We cannot call mmdrop() because we are in interrupt context, | |
812 | * instead update mm->cpu_vm_mask. | |
813 | * | |
814 | * We need to reload %cr3 since the page tables may be going | |
815 | * away from under us.. | |
816 | */ | |
817 | static inline void | |
818 | leave_mm (unsigned long cpu) | |
819 | { | |
820 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | |
821 | BUG(); | |
822 | cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); | |
823 | load_cr3(swapper_pg_dir); | |
824 | } | |
825 | ||
826 | ||
827 | /* | |
828 | * Invalidate call-back | |
829 | */ | |
830 | static void | |
831 | smp_invalidate_interrupt(void) | |
832 | { | |
833 | __u8 cpu = smp_processor_id(); | |
834 | ||
835 | if (!test_bit(cpu, &smp_invalidate_needed)) | |
836 | return; | |
837 | /* This will flood messages. Don't uncomment unless you see | |
838 | * Problems with cross cpu invalidation | |
839 | VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n", | |
840 | smp_processor_id())); | |
841 | */ | |
842 | ||
843 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { | |
844 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { | |
845 | if (flush_va == FLUSH_ALL) | |
846 | local_flush_tlb(); | |
847 | else | |
848 | __flush_tlb_one(flush_va); | |
849 | } else | |
850 | leave_mm(cpu); | |
851 | } | |
852 | smp_mb__before_clear_bit(); | |
853 | clear_bit(cpu, &smp_invalidate_needed); | |
854 | smp_mb__after_clear_bit(); | |
855 | } | |
856 | ||
857 | /* All the new flush operations for 2.4 */ | |
858 | ||
859 | ||
860 | /* This routine is called with a physical cpu mask */ | |
861 | static void | |
862 | flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, | |
863 | unsigned long va) | |
864 | { | |
865 | int stuck = 50000; | |
866 | ||
867 | if (!cpumask) | |
868 | BUG(); | |
869 | if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask) | |
870 | BUG(); | |
871 | if (cpumask & (1 << smp_processor_id())) | |
872 | BUG(); | |
873 | if (!mm) | |
874 | BUG(); | |
875 | ||
876 | spin_lock(&tlbstate_lock); | |
877 | ||
878 | flush_mm = mm; | |
879 | flush_va = va; | |
880 | atomic_set_mask(cpumask, &smp_invalidate_needed); | |
881 | /* | |
882 | * We have to send the CPI only to | |
883 | * CPUs affected. | |
884 | */ | |
885 | send_CPI(cpumask, VIC_INVALIDATE_CPI); | |
886 | ||
887 | while (smp_invalidate_needed) { | |
888 | mb(); | |
889 | if(--stuck == 0) { | |
890 | printk("***WARNING*** Stuck doing invalidate CPI (CPU%d)\n", smp_processor_id()); | |
891 | break; | |
892 | } | |
893 | } | |
894 | ||
895 | /* Uncomment only to debug invalidation problems | |
896 | VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu)); | |
897 | */ | |
898 | ||
899 | flush_mm = NULL; | |
900 | flush_va = 0; | |
901 | spin_unlock(&tlbstate_lock); | |
902 | } | |
903 | ||
904 | void | |
905 | flush_tlb_current_task(void) | |
906 | { | |
907 | struct mm_struct *mm = current->mm; | |
908 | unsigned long cpu_mask; | |
909 | ||
910 | preempt_disable(); | |
911 | ||
912 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | |
913 | local_flush_tlb(); | |
914 | if (cpu_mask) | |
915 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | |
916 | ||
917 | preempt_enable(); | |
918 | } | |
919 | ||
920 | ||
921 | void | |
922 | flush_tlb_mm (struct mm_struct * mm) | |
923 | { | |
924 | unsigned long cpu_mask; | |
925 | ||
926 | preempt_disable(); | |
927 | ||
928 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | |
929 | ||
930 | if (current->active_mm == mm) { | |
931 | if (current->mm) | |
932 | local_flush_tlb(); | |
933 | else | |
934 | leave_mm(smp_processor_id()); | |
935 | } | |
936 | if (cpu_mask) | |
937 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | |
938 | ||
939 | preempt_enable(); | |
940 | } | |
941 | ||
942 | void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | |
943 | { | |
944 | struct mm_struct *mm = vma->vm_mm; | |
945 | unsigned long cpu_mask; | |
946 | ||
947 | preempt_disable(); | |
948 | ||
949 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | |
950 | if (current->active_mm == mm) { | |
951 | if(current->mm) | |
952 | __flush_tlb_one(va); | |
953 | else | |
954 | leave_mm(smp_processor_id()); | |
955 | } | |
956 | ||
957 | if (cpu_mask) | |
958 | flush_tlb_others(cpu_mask, mm, va); | |
959 | ||
960 | preempt_enable(); | |
961 | } | |
153f8057 | 962 | EXPORT_SYMBOL(flush_tlb_page); |
1da177e4 LT |
963 | |
964 | /* enable the requested IRQs */ | |
965 | static void | |
966 | smp_enable_irq_interrupt(void) | |
967 | { | |
968 | __u8 irq; | |
969 | __u8 cpu = get_cpu(); | |
970 | ||
971 | VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu, | |
972 | vic_irq_enable_mask[cpu])); | |
973 | ||
974 | spin_lock(&vic_irq_lock); | |
975 | for(irq = 0; irq < 16; irq++) { | |
976 | if(vic_irq_enable_mask[cpu] & (1<<irq)) | |
977 | enable_local_vic_irq(irq); | |
978 | } | |
979 | vic_irq_enable_mask[cpu] = 0; | |
980 | spin_unlock(&vic_irq_lock); | |
981 | ||
982 | put_cpu_no_resched(); | |
983 | } | |
984 | ||
985 | /* | |
986 | * CPU halt call-back | |
987 | */ | |
988 | static void | |
989 | smp_stop_cpu_function(void *dummy) | |
990 | { | |
991 | VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id())); | |
992 | cpu_clear(smp_processor_id(), cpu_online_map); | |
993 | local_irq_disable(); | |
994 | for(;;) | |
f2ab4461 | 995 | halt(); |
1da177e4 LT |
996 | } |
997 | ||
998 | static DEFINE_SPINLOCK(call_lock); | |
999 | ||
1000 | struct call_data_struct { | |
1001 | void (*func) (void *info); | |
1002 | void *info; | |
1003 | volatile unsigned long started; | |
1004 | volatile unsigned long finished; | |
1005 | int wait; | |
1006 | }; | |
1007 | ||
1008 | static struct call_data_struct * call_data; | |
1009 | ||
1010 | /* execute a thread on a new CPU. The function to be called must be | |
1011 | * previously set up. This is used to schedule a function for | |
1012 | * execution on all CPU's - set up the function then broadcast a | |
1013 | * function_interrupt CPI to come here on each CPU */ | |
1014 | static void | |
1015 | smp_call_function_interrupt(void) | |
1016 | { | |
1017 | void (*func) (void *info) = call_data->func; | |
1018 | void *info = call_data->info; | |
1019 | /* must take copy of wait because call_data may be replaced | |
1020 | * unless the function is waiting for us to finish */ | |
1021 | int wait = call_data->wait; | |
1022 | __u8 cpu = smp_processor_id(); | |
1023 | ||
1024 | /* | |
1025 | * Notify initiating CPU that I've grabbed the data and am | |
1026 | * about to execute the function | |
1027 | */ | |
1028 | mb(); | |
1029 | if(!test_and_clear_bit(cpu, &call_data->started)) { | |
1030 | /* If the bit wasn't set, this could be a replay */ | |
1031 | printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion with no call pending\n", cpu); | |
1032 | return; | |
1033 | } | |
1034 | /* | |
1035 | * At this point the info structure may be out of scope unless wait==1 | |
1036 | */ | |
1037 | irq_enter(); | |
1038 | (*func)(info); | |
1039 | irq_exit(); | |
1040 | if (wait) { | |
1041 | mb(); | |
1042 | clear_bit(cpu, &call_data->finished); | |
1043 | } | |
1044 | } | |
1045 | ||
0293ca81 JB |
1046 | static int |
1047 | __smp_call_function_mask (void (*func) (void *info), void *info, int retry, | |
1048 | int wait, __u32 mask) | |
1da177e4 LT |
1049 | { |
1050 | struct call_data_struct data; | |
1da177e4 LT |
1051 | |
1052 | mask &= ~(1<<smp_processor_id()); | |
1053 | ||
1054 | if (!mask) | |
1055 | return 0; | |
1056 | ||
1057 | /* Can deadlock when called with interrupts disabled */ | |
1058 | WARN_ON(irqs_disabled()); | |
1059 | ||
1060 | data.func = func; | |
1061 | data.info = info; | |
1062 | data.started = mask; | |
1063 | data.wait = wait; | |
1064 | if (wait) | |
1065 | data.finished = mask; | |
1066 | ||
1067 | spin_lock(&call_lock); | |
1068 | call_data = &data; | |
1069 | wmb(); | |
1070 | /* Send a message to all other CPUs and wait for them to respond */ | |
0293ca81 | 1071 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); |
1da177e4 LT |
1072 | |
1073 | /* Wait for response */ | |
1074 | while (data.started) | |
1075 | barrier(); | |
1076 | ||
1077 | if (wait) | |
1078 | while (data.finished) | |
1079 | barrier(); | |
1080 | ||
1081 | spin_unlock(&call_lock); | |
1082 | ||
1083 | return 0; | |
1084 | } | |
0293ca81 JB |
1085 | |
1086 | /* Call this function on all CPUs using the function_interrupt above | |
1087 | <func> The function to run. This must be fast and non-blocking. | |
1088 | <info> An arbitrary pointer to pass to the function. | |
1089 | <retry> If true, keep retrying until ready. | |
1090 | <wait> If true, wait until function has completed on other CPUs. | |
1091 | [RETURNS] 0 on success, else a negative status code. Does not return until | |
1092 | remote CPUs are nearly ready to execute <<func>> or are or have executed. | |
1093 | */ | |
1094 | int | |
1095 | smp_call_function(void (*func) (void *info), void *info, int retry, | |
1096 | int wait) | |
1097 | { | |
1098 | __u32 mask = cpus_addr(cpu_online_map)[0]; | |
1099 | ||
1100 | return __smp_call_function_mask(func, info, retry, wait, mask); | |
1101 | } | |
153f8057 | 1102 | EXPORT_SYMBOL(smp_call_function); |
1da177e4 | 1103 | |
0293ca81 JB |
1104 | /* |
1105 | * smp_call_function_single - Run a function on another CPU | |
1106 | * @func: The function to run. This must be fast and non-blocking. | |
1107 | * @info: An arbitrary pointer to pass to the function. | |
1108 | * @nonatomic: Currently unused. | |
1109 | * @wait: If true, wait until function has completed on other CPUs. | |
1110 | * | |
1111 | * Retrurns 0 on success, else a negative status code. | |
1112 | * | |
1113 | * Does not return until the remote CPU is nearly ready to execute <func> | |
1114 | * or is or has executed. | |
1115 | */ | |
1116 | ||
1117 | int | |
1118 | smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |
1119 | int nonatomic, int wait) | |
1120 | { | |
1121 | __u32 mask = 1 << cpu; | |
1122 | ||
1123 | return __smp_call_function_mask(func, info, nonatomic, wait, mask); | |
1124 | } | |
1125 | EXPORT_SYMBOL(smp_call_function_single); | |
1126 | ||
1da177e4 LT |
1127 | /* Sorry about the name. In an APIC based system, the APICs |
1128 | * themselves are programmed to send a timer interrupt. This is used | |
1129 | * by linux to reschedule the processor. Voyager doesn't have this, | |
1130 | * so we use the system clock to interrupt one processor, which in | |
1131 | * turn, broadcasts a timer CPI to all the others --- we receive that | |
1132 | * CPI here. We don't use this actually for counting so losing | |
1133 | * ticks doesn't matter | |
1134 | * | |
1135 | * FIXME: For those CPU's which actually have a local APIC, we could | |
1136 | * try to use it to trigger this interrupt instead of having to | |
1137 | * broadcast the timer tick. Unfortunately, all my pentium DYADs have | |
1138 | * no local APIC, so I can't do this | |
1139 | * | |
1140 | * This function is currently a placeholder and is unused in the code */ | |
1141 | fastcall void | |
1142 | smp_apic_timer_interrupt(struct pt_regs *regs) | |
1143 | { | |
7d12e780 DH |
1144 | struct pt_regs *old_regs = set_irq_regs(regs); |
1145 | wrapper_smp_local_timer_interrupt(); | |
1146 | set_irq_regs(old_regs); | |
1da177e4 LT |
1147 | } |
1148 | ||
1149 | /* All of the QUAD interrupt GATES */ | |
1150 | fastcall void | |
1151 | smp_qic_timer_interrupt(struct pt_regs *regs) | |
1152 | { | |
7d12e780 | 1153 | struct pt_regs *old_regs = set_irq_regs(regs); |
81c06b10 JB |
1154 | ack_QIC_CPI(QIC_TIMER_CPI); |
1155 | wrapper_smp_local_timer_interrupt(); | |
7d12e780 | 1156 | set_irq_regs(old_regs); |
1da177e4 LT |
1157 | } |
1158 | ||
1159 | fastcall void | |
1160 | smp_qic_invalidate_interrupt(struct pt_regs *regs) | |
1161 | { | |
1162 | ack_QIC_CPI(QIC_INVALIDATE_CPI); | |
1163 | smp_invalidate_interrupt(); | |
1164 | } | |
1165 | ||
1166 | fastcall void | |
1167 | smp_qic_reschedule_interrupt(struct pt_regs *regs) | |
1168 | { | |
1169 | ack_QIC_CPI(QIC_RESCHEDULE_CPI); | |
1170 | smp_reschedule_interrupt(); | |
1171 | } | |
1172 | ||
1173 | fastcall void | |
1174 | smp_qic_enable_irq_interrupt(struct pt_regs *regs) | |
1175 | { | |
1176 | ack_QIC_CPI(QIC_ENABLE_IRQ_CPI); | |
1177 | smp_enable_irq_interrupt(); | |
1178 | } | |
1179 | ||
1180 | fastcall void | |
1181 | smp_qic_call_function_interrupt(struct pt_regs *regs) | |
1182 | { | |
1183 | ack_QIC_CPI(QIC_CALL_FUNCTION_CPI); | |
1184 | smp_call_function_interrupt(); | |
1185 | } | |
1186 | ||
1187 | fastcall void | |
1188 | smp_vic_cpi_interrupt(struct pt_regs *regs) | |
1189 | { | |
7d12e780 | 1190 | struct pt_regs *old_regs = set_irq_regs(regs); |
1da177e4 LT |
1191 | __u8 cpu = smp_processor_id(); |
1192 | ||
1193 | if(is_cpu_quad()) | |
1194 | ack_QIC_CPI(VIC_CPI_LEVEL0); | |
1195 | else | |
1196 | ack_VIC_CPI(VIC_CPI_LEVEL0); | |
1197 | ||
1198 | if(test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu])) | |
7d12e780 | 1199 | wrapper_smp_local_timer_interrupt(); |
1da177e4 LT |
1200 | if(test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu])) |
1201 | smp_invalidate_interrupt(); | |
1202 | if(test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu])) | |
1203 | smp_reschedule_interrupt(); | |
1204 | if(test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu])) | |
1205 | smp_enable_irq_interrupt(); | |
1206 | if(test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) | |
1207 | smp_call_function_interrupt(); | |
7d12e780 | 1208 | set_irq_regs(old_regs); |
1da177e4 LT |
1209 | } |
1210 | ||
1211 | static void | |
1212 | do_flush_tlb_all(void* info) | |
1213 | { | |
1214 | unsigned long cpu = smp_processor_id(); | |
1215 | ||
1216 | __flush_tlb_all(); | |
1217 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) | |
1218 | leave_mm(cpu); | |
1219 | } | |
1220 | ||
1221 | ||
1222 | /* flush the TLB of every active CPU in the system */ | |
1223 | void | |
1224 | flush_tlb_all(void) | |
1225 | { | |
1226 | on_each_cpu(do_flush_tlb_all, 0, 1, 1); | |
1227 | } | |
1228 | ||
1229 | /* used to set up the trampoline for other CPUs when the memory manager | |
1230 | * is sorted out */ | |
1231 | void __init | |
1232 | smp_alloc_memory(void) | |
1233 | { | |
1234 | trampoline_base = (__u32)alloc_bootmem_low_pages(PAGE_SIZE); | |
1235 | if(__pa(trampoline_base) >= 0x93000) | |
1236 | BUG(); | |
1237 | } | |
1238 | ||
1239 | /* send a reschedule CPI to one CPU by physical CPU number*/ | |
1240 | void | |
1241 | smp_send_reschedule(int cpu) | |
1242 | { | |
1243 | send_one_CPI(cpu, VIC_RESCHEDULE_CPI); | |
1244 | } | |
1245 | ||
1246 | ||
1247 | int | |
1248 | hard_smp_processor_id(void) | |
1249 | { | |
1250 | __u8 i; | |
1251 | __u8 cpumask = inb(VIC_PROC_WHO_AM_I); | |
1252 | if((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER) | |
1253 | return cpumask & 0x1F; | |
1254 | ||
1255 | for(i = 0; i < 8; i++) { | |
1256 | if(cpumask & (1<<i)) | |
1257 | return i; | |
1258 | } | |
1259 | printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask); | |
1260 | return 0; | |
1261 | } | |
1262 | ||
2654c08c FV |
1263 | int |
1264 | safe_smp_processor_id(void) | |
1265 | { | |
1266 | return hard_smp_processor_id(); | |
1267 | } | |
1268 | ||
1da177e4 LT |
1269 | /* broadcast a halt to all other CPUs */ |
1270 | void | |
1271 | smp_send_stop(void) | |
1272 | { | |
1273 | smp_call_function(smp_stop_cpu_function, NULL, 1, 1); | |
1274 | } | |
1275 | ||
1276 | /* this function is triggered in time.c when a clock tick fires | |
1277 | * we need to re-broadcast the tick to all CPUs */ | |
1278 | void | |
81c06b10 | 1279 | smp_vic_timer_interrupt(void) |
1da177e4 LT |
1280 | { |
1281 | send_CPI_allbutself(VIC_TIMER_CPI); | |
7d12e780 | 1282 | smp_local_timer_interrupt(); |
1da177e4 LT |
1283 | } |
1284 | ||
1da177e4 LT |
1285 | /* local (per CPU) timer interrupt. It does both profiling and |
1286 | * process statistics/rescheduling. | |
1287 | * | |
1288 | * We do profiling in every local tick, statistics/rescheduling | |
1289 | * happen only every 'profiling multiplier' ticks. The default | |
1290 | * multiplier is 1 and it can be changed by writing the new multiplier | |
1291 | * value into /proc/profile. | |
1292 | */ | |
1293 | void | |
7d12e780 | 1294 | smp_local_timer_interrupt(void) |
1da177e4 LT |
1295 | { |
1296 | int cpu = smp_processor_id(); | |
1297 | long weight; | |
1298 | ||
7d12e780 | 1299 | profile_tick(CPU_PROFILING); |
1da177e4 LT |
1300 | if (--per_cpu(prof_counter, cpu) <= 0) { |
1301 | /* | |
1302 | * The multiplier may have changed since the last time we got | |
1303 | * to this point as a result of the user writing to | |
1304 | * /proc/profile. In this case we need to adjust the APIC | |
1305 | * timer accordingly. | |
1306 | * | |
1307 | * Interrupts are already masked off at this point. | |
1308 | */ | |
1309 | per_cpu(prof_counter,cpu) = per_cpu(prof_multiplier, cpu); | |
1310 | if (per_cpu(prof_counter, cpu) != | |
1311 | per_cpu(prof_old_multiplier, cpu)) { | |
1312 | /* FIXME: need to update the vic timer tick here */ | |
1313 | per_cpu(prof_old_multiplier, cpu) = | |
1314 | per_cpu(prof_counter, cpu); | |
1315 | } | |
1316 | ||
81c06b10 | 1317 | update_process_times(user_mode_vm(get_irq_regs())); |
1da177e4 LT |
1318 | } |
1319 | ||
1320 | if( ((1<<cpu) & voyager_extended_vic_processors) == 0) | |
1321 | /* only extended VIC processors participate in | |
1322 | * interrupt distribution */ | |
1323 | return; | |
1324 | ||
1325 | /* | |
1326 | * We take the 'long' return path, and there every subsystem | |
1327 | * grabs the apropriate locks (kernel lock/ irq lock). | |
1328 | * | |
1329 | * we might want to decouple profiling from the 'long path', | |
1330 | * and do the profiling totally in assembly. | |
1331 | * | |
1332 | * Currently this isn't too much of an issue (performance wise), | |
1333 | * we can take more than 100K local irqs per second on a 100 MHz P5. | |
1334 | */ | |
1335 | ||
1336 | if((++vic_tick[cpu] & 0x7) != 0) | |
1337 | return; | |
1338 | /* get here every 16 ticks (about every 1/6 of a second) */ | |
1339 | ||
1340 | /* Change our priority to give someone else a chance at getting | |
1341 | * the IRQ. The algorithm goes like this: | |
1342 | * | |
1343 | * In the VIC, the dynamically routed interrupt is always | |
1344 | * handled by the lowest priority eligible (i.e. receiving | |
1345 | * interrupts) CPU. If >1 eligible CPUs are equal lowest, the | |
1346 | * lowest processor number gets it. | |
1347 | * | |
1348 | * The priority of a CPU is controlled by a special per-CPU | |
1349 | * VIC priority register which is 3 bits wide 0 being lowest | |
1350 | * and 7 highest priority.. | |
1351 | * | |
1352 | * Therefore we subtract the average number of interrupts from | |
1353 | * the number we've fielded. If this number is negative, we | |
1354 | * lower the activity count and if it is positive, we raise | |
1355 | * it. | |
1356 | * | |
1357 | * I'm afraid this still leads to odd looking interrupt counts: | |
1358 | * the totals are all roughly equal, but the individual ones | |
1359 | * look rather skewed. | |
1360 | * | |
1361 | * FIXME: This algorithm is total crap when mixed with SMP | |
1362 | * affinity code since we now try to even up the interrupt | |
1363 | * counts when an affinity binding is keeping them on a | |
1364 | * particular CPU*/ | |
1365 | weight = (vic_intr_count[cpu]*voyager_extended_cpus | |
1366 | - vic_intr_total) >> 4; | |
1367 | weight += 4; | |
1368 | if(weight > 7) | |
1369 | weight = 7; | |
1370 | if(weight < 0) | |
1371 | weight = 0; | |
1372 | ||
1373 | outb((__u8)weight, VIC_PRIORITY_REGISTER); | |
1374 | ||
1375 | #ifdef VOYAGER_DEBUG | |
1376 | if((vic_tick[cpu] & 0xFFF) == 0) { | |
1377 | /* print this message roughly every 25 secs */ | |
1378 | printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n", | |
1379 | cpu, vic_tick[cpu], weight); | |
1380 | } | |
1381 | #endif | |
1382 | } | |
1383 | ||
1384 | /* setup the profiling timer */ | |
1385 | int | |
1386 | setup_profiling_timer(unsigned int multiplier) | |
1387 | { | |
1388 | int i; | |
1389 | ||
1390 | if ( (!multiplier)) | |
1391 | return -EINVAL; | |
1392 | ||
1393 | /* | |
1394 | * Set the new multiplier for each CPU. CPUs don't start using the | |
1395 | * new values until the next timer interrupt in which they do process | |
1396 | * accounting. | |
1397 | */ | |
1398 | for (i = 0; i < NR_CPUS; ++i) | |
1399 | per_cpu(prof_multiplier, i) = multiplier; | |
1400 | ||
1401 | return 0; | |
1402 | } | |
1403 | ||
c771746e JB |
1404 | /* This is a bit of a mess, but forced on us by the genirq changes |
1405 | * there's no genirq handler that really does what voyager wants | |
1406 | * so hack it up with the simple IRQ handler */ | |
1407 | static void fastcall | |
1408 | handle_vic_irq(unsigned int irq, struct irq_desc *desc) | |
1409 | { | |
1410 | before_handle_vic_irq(irq); | |
1411 | handle_simple_irq(irq, desc); | |
1412 | after_handle_vic_irq(irq); | |
1413 | } | |
1414 | ||
1da177e4 LT |
1415 | |
1416 | /* The CPIs are handled in the per cpu 8259s, so they must be | |
1417 | * enabled to be received: FIX: enabling the CPIs in the early | |
1418 | * boot sequence interferes with bug checking; enable them later | |
1419 | * on in smp_init */ | |
1420 | #define VIC_SET_GATE(cpi, vector) \ | |
1421 | set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector)) | |
1422 | #define QIC_SET_GATE(cpi, vector) \ | |
1423 | set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) | |
1424 | ||
1425 | void __init | |
1426 | smp_intr_init(void) | |
1427 | { | |
1428 | int i; | |
1429 | ||
1430 | /* initialize the per cpu irq mask to all disabled */ | |
1431 | for(i = 0; i < NR_CPUS; i++) | |
1432 | vic_irq_mask[i] = 0xFFFF; | |
1433 | ||
1434 | VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); | |
1435 | ||
1436 | VIC_SET_GATE(VIC_SYS_INT, vic_sys_interrupt); | |
1437 | VIC_SET_GATE(VIC_CMN_INT, vic_cmn_interrupt); | |
1438 | ||
1439 | QIC_SET_GATE(QIC_TIMER_CPI, qic_timer_interrupt); | |
1440 | QIC_SET_GATE(QIC_INVALIDATE_CPI, qic_invalidate_interrupt); | |
1441 | QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt); | |
1442 | QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt); | |
1443 | QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt); | |
1444 | ||
1445 | ||
1446 | /* now put the VIC descriptor into the first 48 IRQs | |
1447 | * | |
1448 | * This is for later: first 16 correspond to PC IRQs; next 16 | |
1449 | * are Primary MC IRQs and final 16 are Secondary MC IRQs */ | |
1450 | for(i = 0; i < 48; i++) | |
c771746e | 1451 | set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq); |
1da177e4 LT |
1452 | } |
1453 | ||
1454 | /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per | |
1455 | * processor to receive CPI */ | |
1456 | static void | |
1457 | send_CPI(__u32 cpuset, __u8 cpi) | |
1458 | { | |
1459 | int cpu; | |
1460 | __u32 quad_cpuset = (cpuset & voyager_quad_processors); | |
1461 | ||
1462 | if(cpi < VIC_START_FAKE_CPI) { | |
1463 | /* fake CPI are only used for booting, so send to the | |
1464 | * extended quads as well---Quads must be VIC booted */ | |
1465 | outb((__u8)(cpuset), VIC_CPI_Registers[cpi]); | |
1466 | return; | |
1467 | } | |
1468 | if(quad_cpuset) | |
1469 | send_QIC_CPI(quad_cpuset, cpi); | |
1470 | cpuset &= ~quad_cpuset; | |
1471 | cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */ | |
1472 | if(cpuset == 0) | |
1473 | return; | |
1474 | for_each_online_cpu(cpu) { | |
1475 | if(cpuset & (1<<cpu)) | |
1476 | set_bit(cpi, &vic_cpi_mailbox[cpu]); | |
1477 | } | |
1478 | if(cpuset) | |
1479 | outb((__u8)cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]); | |
1480 | } | |
1481 | ||
1482 | /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and | |
1483 | * set the cache line to shared by reading it. | |
1484 | * | |
1485 | * DON'T make this inline otherwise the cache line read will be | |
1486 | * optimised away | |
1487 | * */ | |
1488 | static int | |
1489 | ack_QIC_CPI(__u8 cpi) { | |
1490 | __u8 cpu = hard_smp_processor_id(); | |
1491 | ||
1492 | cpi &= 7; | |
1493 | ||
1494 | outb(1<<cpi, QIC_INTERRUPT_CLEAR1); | |
1495 | return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi; | |
1496 | } | |
1497 | ||
1498 | static void | |
1499 | ack_special_QIC_CPI(__u8 cpi) | |
1500 | { | |
1501 | switch(cpi) { | |
1502 | case VIC_CMN_INT: | |
1503 | outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0); | |
1504 | break; | |
1505 | case VIC_SYS_INT: | |
1506 | outb(QIC_SYS_INT, QIC_INTERRUPT_CLEAR0); | |
1507 | break; | |
1508 | } | |
1509 | /* also clear at the VIC, just in case (nop for non-extended proc) */ | |
1510 | ack_VIC_CPI(cpi); | |
1511 | } | |
1512 | ||
1513 | /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */ | |
1514 | static void | |
1515 | ack_VIC_CPI(__u8 cpi) | |
1516 | { | |
1517 | #ifdef VOYAGER_DEBUG | |
1518 | unsigned long flags; | |
1519 | __u16 isr; | |
1520 | __u8 cpu = smp_processor_id(); | |
1521 | ||
1522 | local_irq_save(flags); | |
1523 | isr = vic_read_isr(); | |
1524 | if((isr & (1<<(cpi &7))) == 0) { | |
1525 | printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi); | |
1526 | } | |
1527 | #endif | |
1528 | /* send specific EOI; the two system interrupts have | |
1529 | * bit 4 set for a separate vector but behave as the | |
1530 | * corresponding 3 bit intr */ | |
1531 | outb_p(0x60|(cpi & 7),0x20); | |
1532 | ||
1533 | #ifdef VOYAGER_DEBUG | |
1534 | if((vic_read_isr() & (1<<(cpi &7))) != 0) { | |
1535 | printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi); | |
1536 | } | |
1537 | local_irq_restore(flags); | |
1538 | #endif | |
1539 | } | |
1540 | ||
1541 | /* cribbed with thanks from irq.c */ | |
1542 | #define __byte(x,y) (((unsigned char *)&(y))[x]) | |
1543 | #define cached_21(cpu) (__byte(0,vic_irq_mask[cpu])) | |
1544 | #define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu])) | |
1545 | ||
1546 | static unsigned int | |
1547 | startup_vic_irq(unsigned int irq) | |
1548 | { | |
c771746e | 1549 | unmask_vic_irq(irq); |
1da177e4 LT |
1550 | |
1551 | return 0; | |
1552 | } | |
1553 | ||
1554 | /* The enable and disable routines. This is where we run into | |
1555 | * conflicting architectural philosophy. Fundamentally, the voyager | |
1556 | * architecture does not expect to have to disable interrupts globally | |
1557 | * (the IRQ controllers belong to each CPU). The processor masquerade | |
1558 | * which is used to start the system shouldn't be used in a running OS | |
1559 | * since it will cause great confusion if two separate CPUs drive to | |
1560 | * the same IRQ controller (I know, I've tried it). | |
1561 | * | |
1562 | * The solution is a variant on the NCR lazy SPL design: | |
1563 | * | |
1564 | * 1) To disable an interrupt, do nothing (other than set the | |
1565 | * IRQ_DISABLED flag). This dares the interrupt actually to arrive. | |
1566 | * | |
1567 | * 2) If the interrupt dares to come in, raise the local mask against | |
1568 | * it (this will result in all the CPU masks being raised | |
1569 | * eventually). | |
1570 | * | |
1571 | * 3) To enable the interrupt, lower the mask on the local CPU and | |
1572 | * broadcast an Interrupt enable CPI which causes all other CPUs to | |
1573 | * adjust their masks accordingly. */ | |
1574 | ||
1575 | static void | |
c771746e | 1576 | unmask_vic_irq(unsigned int irq) |
1da177e4 LT |
1577 | { |
1578 | /* linux doesn't to processor-irq affinity, so enable on | |
1579 | * all CPUs we know about */ | |
1580 | int cpu = smp_processor_id(), real_cpu; | |
1581 | __u16 mask = (1<<irq); | |
1582 | __u32 processorList = 0; | |
1583 | unsigned long flags; | |
1584 | ||
c771746e | 1585 | VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n", |
1da177e4 LT |
1586 | irq, cpu, cpu_irq_affinity[cpu])); |
1587 | spin_lock_irqsave(&vic_irq_lock, flags); | |
1588 | for_each_online_cpu(real_cpu) { | |
1589 | if(!(voyager_extended_vic_processors & (1<<real_cpu))) | |
1590 | continue; | |
1591 | if(!(cpu_irq_affinity[real_cpu] & mask)) { | |
1592 | /* irq has no affinity for this CPU, ignore */ | |
1593 | continue; | |
1594 | } | |
1595 | if(real_cpu == cpu) { | |
1596 | enable_local_vic_irq(irq); | |
1597 | } | |
1598 | else if(vic_irq_mask[real_cpu] & mask) { | |
1599 | vic_irq_enable_mask[real_cpu] |= mask; | |
1600 | processorList |= (1<<real_cpu); | |
1601 | } | |
1602 | } | |
1603 | spin_unlock_irqrestore(&vic_irq_lock, flags); | |
1604 | if(processorList) | |
1605 | send_CPI(processorList, VIC_ENABLE_IRQ_CPI); | |
1606 | } | |
1607 | ||
1608 | static void | |
c771746e | 1609 | mask_vic_irq(unsigned int irq) |
1da177e4 LT |
1610 | { |
1611 | /* lazy disable, do nothing */ | |
1612 | } | |
1613 | ||
1614 | static void | |
1615 | enable_local_vic_irq(unsigned int irq) | |
1616 | { | |
1617 | __u8 cpu = smp_processor_id(); | |
1618 | __u16 mask = ~(1 << irq); | |
1619 | __u16 old_mask = vic_irq_mask[cpu]; | |
1620 | ||
1621 | vic_irq_mask[cpu] &= mask; | |
1622 | if(vic_irq_mask[cpu] == old_mask) | |
1623 | return; | |
1624 | ||
1625 | VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n", | |
1626 | irq, cpu)); | |
1627 | ||
1628 | if (irq & 8) { | |
1629 | outb_p(cached_A1(cpu),0xA1); | |
1630 | (void)inb_p(0xA1); | |
1631 | } | |
1632 | else { | |
1633 | outb_p(cached_21(cpu),0x21); | |
1634 | (void)inb_p(0x21); | |
1635 | } | |
1636 | } | |
1637 | ||
1638 | static void | |
1639 | disable_local_vic_irq(unsigned int irq) | |
1640 | { | |
1641 | __u8 cpu = smp_processor_id(); | |
1642 | __u16 mask = (1 << irq); | |
1643 | __u16 old_mask = vic_irq_mask[cpu]; | |
1644 | ||
1645 | if(irq == 7) | |
1646 | return; | |
1647 | ||
1648 | vic_irq_mask[cpu] |= mask; | |
1649 | if(old_mask == vic_irq_mask[cpu]) | |
1650 | return; | |
1651 | ||
1652 | VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n", | |
1653 | irq, cpu)); | |
1654 | ||
1655 | if (irq & 8) { | |
1656 | outb_p(cached_A1(cpu),0xA1); | |
1657 | (void)inb_p(0xA1); | |
1658 | } | |
1659 | else { | |
1660 | outb_p(cached_21(cpu),0x21); | |
1661 | (void)inb_p(0x21); | |
1662 | } | |
1663 | } | |
1664 | ||
1665 | /* The VIC is level triggered, so the ack can only be issued after the | |
1666 | * interrupt completes. However, we do Voyager lazy interrupt | |
1667 | * handling here: It is an extremely expensive operation to mask an | |
1668 | * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If | |
1669 | * this interrupt actually comes in, then we mask and ack here to push | |
1670 | * the interrupt off to another CPU */ | |
1671 | static void | |
1672 | before_handle_vic_irq(unsigned int irq) | |
1673 | { | |
1674 | irq_desc_t *desc = irq_desc + irq; | |
1675 | __u8 cpu = smp_processor_id(); | |
1676 | ||
1677 | _raw_spin_lock(&vic_irq_lock); | |
1678 | vic_intr_total++; | |
1679 | vic_intr_count[cpu]++; | |
1680 | ||
1681 | if(!(cpu_irq_affinity[cpu] & (1<<irq))) { | |
1682 | /* The irq is not in our affinity mask, push it off | |
1683 | * onto another CPU */ | |
1684 | VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d on cpu %d\n", | |
1685 | irq, cpu)); | |
1686 | disable_local_vic_irq(irq); | |
1687 | /* set IRQ_INPROGRESS to prevent the handler in irq.c from | |
1688 | * actually calling the interrupt routine */ | |
1689 | desc->status |= IRQ_REPLAY | IRQ_INPROGRESS; | |
1690 | } else if(desc->status & IRQ_DISABLED) { | |
1691 | /* Damn, the interrupt actually arrived, do the lazy | |
1692 | * disable thing. The interrupt routine in irq.c will | |
1693 | * not handle a IRQ_DISABLED interrupt, so nothing more | |
1694 | * need be done here */ | |
1695 | VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n", | |
1696 | irq, cpu)); | |
1697 | disable_local_vic_irq(irq); | |
1698 | desc->status |= IRQ_REPLAY; | |
1699 | } else { | |
1700 | desc->status &= ~IRQ_REPLAY; | |
1701 | } | |
1702 | ||
1703 | _raw_spin_unlock(&vic_irq_lock); | |
1704 | } | |
1705 | ||
1706 | /* Finish the VIC interrupt: basically mask */ | |
1707 | static void | |
1708 | after_handle_vic_irq(unsigned int irq) | |
1709 | { | |
1710 | irq_desc_t *desc = irq_desc + irq; | |
1711 | ||
1712 | _raw_spin_lock(&vic_irq_lock); | |
1713 | { | |
1714 | unsigned int status = desc->status & ~IRQ_INPROGRESS; | |
1715 | #ifdef VOYAGER_DEBUG | |
1716 | __u16 isr; | |
1717 | #endif | |
1718 | ||
1719 | desc->status = status; | |
1720 | if ((status & IRQ_DISABLED)) | |
1721 | disable_local_vic_irq(irq); | |
1722 | #ifdef VOYAGER_DEBUG | |
1723 | /* DEBUG: before we ack, check what's in progress */ | |
1724 | isr = vic_read_isr(); | |
1725 | if((isr & (1<<irq) && !(status & IRQ_REPLAY)) == 0) { | |
1726 | int i; | |
1727 | __u8 cpu = smp_processor_id(); | |
1728 | __u8 real_cpu; | |
1729 | int mask; /* Um... initialize me??? --RR */ | |
1730 | ||
1731 | printk("VOYAGER SMP: CPU%d lost interrupt %d\n", | |
1732 | cpu, irq); | |
c8912599 | 1733 | for_each_possible_cpu(real_cpu, mask) { |
1da177e4 LT |
1734 | |
1735 | outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu, | |
1736 | VIC_PROCESSOR_ID); | |
1737 | isr = vic_read_isr(); | |
1738 | if(isr & (1<<irq)) { | |
1739 | printk("VOYAGER SMP: CPU%d ack irq %d\n", | |
1740 | real_cpu, irq); | |
1741 | ack_vic_irq(irq); | |
1742 | } | |
1743 | outb(cpu, VIC_PROCESSOR_ID); | |
1744 | } | |
1745 | } | |
1746 | #endif /* VOYAGER_DEBUG */ | |
1747 | /* as soon as we ack, the interrupt is eligible for | |
1748 | * receipt by another CPU so everything must be in | |
1749 | * order here */ | |
1750 | ack_vic_irq(irq); | |
1751 | if(status & IRQ_REPLAY) { | |
1752 | /* replay is set if we disable the interrupt | |
1753 | * in the before_handle_vic_irq() routine, so | |
1754 | * clear the in progress bit here to allow the | |
1755 | * next CPU to handle this correctly */ | |
1756 | desc->status &= ~(IRQ_REPLAY | IRQ_INPROGRESS); | |
1757 | } | |
1758 | #ifdef VOYAGER_DEBUG | |
1759 | isr = vic_read_isr(); | |
1760 | if((isr & (1<<irq)) != 0) | |
1761 | printk("VOYAGER SMP: after_handle_vic_irq() after ack irq=%d, isr=0x%x\n", | |
1762 | irq, isr); | |
1763 | #endif /* VOYAGER_DEBUG */ | |
1764 | } | |
1765 | _raw_spin_unlock(&vic_irq_lock); | |
1766 | ||
1767 | /* All code after this point is out of the main path - the IRQ | |
1768 | * may be intercepted by another CPU if reasserted */ | |
1769 | } | |
1770 | ||
1771 | ||
1772 | /* Linux processor - interrupt affinity manipulations. | |
1773 | * | |
1774 | * For each processor, we maintain a 32 bit irq affinity mask. | |
1775 | * Initially it is set to all 1's so every processor accepts every | |
1776 | * interrupt. In this call, we change the processor's affinity mask: | |
1777 | * | |
1778 | * Change from enable to disable: | |
1779 | * | |
1780 | * If the interrupt ever comes in to the processor, we will disable it | |
1781 | * and ack it to push it off to another CPU, so just accept the mask here. | |
1782 | * | |
1783 | * Change from disable to enable: | |
1784 | * | |
1785 | * change the mask and then do an interrupt enable CPI to re-enable on | |
1786 | * the selected processors */ | |
1787 | ||
1788 | void | |
1789 | set_vic_irq_affinity(unsigned int irq, cpumask_t mask) | |
1790 | { | |
1791 | /* Only extended processors handle interrupts */ | |
1792 | unsigned long real_mask; | |
1793 | unsigned long irq_mask = 1 << irq; | |
1794 | int cpu; | |
1795 | ||
1796 | real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; | |
1797 | ||
1798 | if(cpus_addr(mask)[0] == 0) | |
1799 | /* can't have no cpu's to accept the interrupt -- extremely | |
1800 | * bad things will happen */ | |
1801 | return; | |
1802 | ||
1803 | if(irq == 0) | |
1804 | /* can't change the affinity of the timer IRQ. This | |
1805 | * is due to the constraint in the voyager | |
1806 | * architecture that the CPI also comes in on and IRQ | |
1807 | * line and we have chosen IRQ0 for this. If you | |
1808 | * raise the mask on this interrupt, the processor | |
1809 | * will no-longer be able to accept VIC CPIs */ | |
1810 | return; | |
1811 | ||
1812 | if(irq >= 32) | |
1813 | /* You can only have 32 interrupts in a voyager system | |
1814 | * (and 32 only if you have a secondary microchannel | |
1815 | * bus) */ | |
1816 | return; | |
1817 | ||
1818 | for_each_online_cpu(cpu) { | |
1819 | unsigned long cpu_mask = 1 << cpu; | |
1820 | ||
1821 | if(cpu_mask & real_mask) { | |
1822 | /* enable the interrupt for this cpu */ | |
1823 | cpu_irq_affinity[cpu] |= irq_mask; | |
1824 | } else { | |
1825 | /* disable the interrupt for this cpu */ | |
1826 | cpu_irq_affinity[cpu] &= ~irq_mask; | |
1827 | } | |
1828 | } | |
1829 | /* this is magic, we now have the correct affinity maps, so | |
1830 | * enable the interrupt. This will send an enable CPI to | |
1831 | * those cpu's who need to enable it in their local masks, | |
1832 | * causing them to correct for the new affinity . If the | |
1833 | * interrupt is currently globally disabled, it will simply be | |
1834 | * disabled again as it comes in (voyager lazy disable). If | |
1835 | * the affinity map is tightened to disable the interrupt on a | |
1836 | * cpu, it will be pushed off when it comes in */ | |
c771746e | 1837 | unmask_vic_irq(irq); |
1da177e4 LT |
1838 | } |
1839 | ||
1840 | static void | |
1841 | ack_vic_irq(unsigned int irq) | |
1842 | { | |
1843 | if (irq & 8) { | |
1844 | outb(0x62,0x20); /* Specific EOI to cascade */ | |
1845 | outb(0x60|(irq & 7),0xA0); | |
1846 | } else { | |
1847 | outb(0x60 | (irq & 7),0x20); | |
1848 | } | |
1849 | } | |
1850 | ||
1851 | /* enable the CPIs. In the VIC, the CPIs are delivered by the 8259 | |
1852 | * but are not vectored by it. This means that the 8259 mask must be | |
1853 | * lowered to receive them */ | |
1854 | static __init void | |
1855 | vic_enable_cpi(void) | |
1856 | { | |
1857 | __u8 cpu = smp_processor_id(); | |
1858 | ||
1859 | /* just take a copy of the current mask (nop for boot cpu) */ | |
1860 | vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id]; | |
1861 | ||
1862 | enable_local_vic_irq(VIC_CPI_LEVEL0); | |
1863 | enable_local_vic_irq(VIC_CPI_LEVEL1); | |
1864 | /* for sys int and cmn int */ | |
1865 | enable_local_vic_irq(7); | |
1866 | ||
1867 | if(is_cpu_quad()) { | |
1868 | outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); | |
1869 | outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); | |
1870 | VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n", | |
1871 | cpu, QIC_CPI_ENABLE)); | |
1872 | } | |
1873 | ||
1874 | VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n", | |
1875 | cpu, vic_irq_mask[cpu])); | |
1876 | } | |
1877 | ||
1878 | void | |
1879 | voyager_smp_dump() | |
1880 | { | |
1881 | int old_cpu = smp_processor_id(), cpu; | |
1882 | ||
1883 | /* dump the interrupt masks of each processor */ | |
1884 | for_each_online_cpu(cpu) { | |
1885 | __u16 imr, isr, irr; | |
1886 | unsigned long flags; | |
1887 | ||
1888 | local_irq_save(flags); | |
1889 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); | |
1890 | imr = (inb(0xa1) << 8) | inb(0x21); | |
1891 | outb(0x0a, 0xa0); | |
1892 | irr = inb(0xa0) << 8; | |
1893 | outb(0x0a, 0x20); | |
1894 | irr |= inb(0x20); | |
1895 | outb(0x0b, 0xa0); | |
1896 | isr = inb(0xa0) << 8; | |
1897 | outb(0x0b, 0x20); | |
1898 | isr |= inb(0x20); | |
1899 | outb(old_cpu, VIC_PROCESSOR_ID); | |
1900 | local_irq_restore(flags); | |
1901 | printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n", | |
1902 | cpu, vic_irq_mask[cpu], imr, irr, isr); | |
1903 | #if 0 | |
1904 | /* These lines are put in to try to unstick an un ack'd irq */ | |
1905 | if(isr != 0) { | |
1906 | int irq; | |
1907 | for(irq=0; irq<16; irq++) { | |
1908 | if(isr & (1<<irq)) { | |
1909 | printk("\tCPU%d: ack irq %d\n", | |
1910 | cpu, irq); | |
1911 | local_irq_save(flags); | |
1912 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, | |
1913 | VIC_PROCESSOR_ID); | |
1914 | ack_vic_irq(irq); | |
1915 | outb(old_cpu, VIC_PROCESSOR_ID); | |
1916 | local_irq_restore(flags); | |
1917 | } | |
1918 | } | |
1919 | } | |
1920 | #endif | |
1921 | } | |
1922 | } | |
1923 | ||
1924 | void | |
1925 | smp_voyager_power_off(void *dummy) | |
1926 | { | |
1927 | if(smp_processor_id() == boot_cpu_id) | |
1928 | voyager_power_off(); | |
1929 | else | |
1930 | smp_stop_cpu_function(NULL); | |
1931 | } | |
1932 | ||
1933 | void __init | |
1934 | smp_prepare_cpus(unsigned int max_cpus) | |
1935 | { | |
1936 | /* FIXME: ignore max_cpus for now */ | |
1937 | smp_boot_cpus(); | |
1938 | } | |
1939 | ||
1940 | void __devinit smp_prepare_boot_cpu(void) | |
1941 | { | |
1942 | cpu_set(smp_processor_id(), cpu_online_map); | |
1943 | cpu_set(smp_processor_id(), cpu_callout_map); | |
4ad8d383 | 1944 | cpu_set(smp_processor_id(), cpu_possible_map); |
3c101cf0 | 1945 | cpu_set(smp_processor_id(), cpu_present_map); |
1da177e4 LT |
1946 | } |
1947 | ||
1948 | int __devinit | |
1949 | __cpu_up(unsigned int cpu) | |
1950 | { | |
1951 | /* This only works at boot for x86. See "rewrite" above. */ | |
1952 | if (cpu_isset(cpu, smp_commenced_mask)) | |
1953 | return -ENOSYS; | |
1954 | ||
1955 | /* In case one didn't come up */ | |
1956 | if (!cpu_isset(cpu, cpu_callin_map)) | |
1957 | return -EIO; | |
1958 | /* Unleash the CPU! */ | |
1959 | cpu_set(cpu, smp_commenced_mask); | |
1960 | while (!cpu_isset(cpu, cpu_online_map)) | |
1961 | mb(); | |
1962 | return 0; | |
1963 | } | |
1964 | ||
1965 | void __init | |
1966 | smp_cpus_done(unsigned int max_cpus) | |
1967 | { | |
1968 | zap_low_mappings(); | |
1969 | } | |
033ab7f8 AM |
1970 | |
1971 | void __init | |
1972 | smp_setup_processor_id(void) | |
1973 | { | |
1974 | current_thread_info()->cpu = hard_smp_processor_id(); | |
62111195 | 1975 | write_pda(cpu_number, hard_smp_processor_id()); |
033ab7f8 | 1976 | } |