x86: don't set IO APIC features if IO APIC is not enabled
[deliverable/linux.git] / arch / x86 / kernel / mpparse_32.c
1 /*
2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines.
4 *
5 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 *
8 * Fixes
9 * Erich Boleyn : MP v1.4 and additional changes.
10 * Alan Cox : Added EBDA scanning
11 * Ingo Molnar : various cleanups and rewrites
12 * Maciej W. Rozycki: Bits for default MP configurations
13 * Paul Diefenbaugh: Added full ACPI support
14 */
15
16 #include <linux/mm.h>
17 #include <linux/init.h>
18 #include <linux/acpi.h>
19 #include <linux/delay.h>
20 #include <linux/bootmem.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/mc146818rtc.h>
23 #include <linux/bitops.h>
24
25 #include <asm/smp.h>
26 #include <asm/acpi.h>
27 #include <asm/mtrr.h>
28 #include <asm/mpspec.h>
29 #include <asm/io_apic.h>
30 #include <asm/bios_ebda.h>
31
32 #include <mach_apic.h>
33 #include <mach_apicdef.h>
34 #include <mach_mpparse.h>
35
36 /* Have we found an MP table */
37 int smp_found_config;
38
39 /*
40 * Various Linux-internal data structures created from the
41 * MP-table.
42 */
43 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
44 int mp_bus_id_to_type [MAX_MP_BUSSES];
45 #endif
46 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
47 int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
48 static int mp_current_pci_id;
49
50 /* I/O APIC entries */
51 struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
52
53 /* # of MP IRQ source entries */
54 struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
55
56 /* MP IRQ source entries */
57 int mp_irq_entries;
58
59 int nr_ioapics;
60
61 int pic_mode;
62
63 /* Make it easy to share the UP and SMP code: */
64 #ifndef CONFIG_X86_SMP
65 unsigned int num_processors;
66 unsigned disabled_cpus __cpuinitdata;
67 #ifndef CONFIG_X86_LOCAL_APIC
68 unsigned int boot_cpu_physical_apicid = -1U;
69 #endif
70 #endif
71
72 /*
73 * Intel MP BIOS table parsing routines:
74 */
75
76
77 /*
78 * Checksum an MP configuration block.
79 */
80
81 static int __init mpf_checksum(unsigned char *mp, int len)
82 {
83 int sum = 0;
84
85 while (len--)
86 sum += *mp++;
87
88 return sum & 0xFF;
89 }
90
91 #ifdef CONFIG_X86_NUMAQ
92 /*
93 * Have to match translation table entries to main table entries by counter
94 * hence the mpc_record variable .... can't see a less disgusting way of
95 * doing this ....
96 */
97
98 static int mpc_record;
99 static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinitdata;
100 #endif
101
102 static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
103 {
104 int apicid;
105
106 if (!(m->mpc_cpuflag & CPU_ENABLED)) {
107 #ifdef CONFIG_X86_SMP
108 disabled_cpus++;
109 #endif
110 return;
111 }
112
113 #ifdef CONFIG_X86_NUMAQ
114 apicid = mpc_apic_id(m, translation_table[mpc_record]);
115 #else
116 Dprintk("Processor #%d %u:%u APIC version %d\n",
117 m->mpc_apicid,
118 (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
119 (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
120 m->mpc_apicver);
121 apicid = m->mpc_apicid;
122 #endif
123
124 if (m->mpc_featureflag&(1<<0))
125 Dprintk(" Floating point unit present.\n");
126 if (m->mpc_featureflag&(1<<7))
127 Dprintk(" Machine Exception supported.\n");
128 if (m->mpc_featureflag&(1<<8))
129 Dprintk(" 64 bit compare & exchange supported.\n");
130 if (m->mpc_featureflag&(1<<9))
131 Dprintk(" Internal APIC present.\n");
132 if (m->mpc_featureflag&(1<<11))
133 Dprintk(" SEP present.\n");
134 if (m->mpc_featureflag&(1<<12))
135 Dprintk(" MTRR present.\n");
136 if (m->mpc_featureflag&(1<<13))
137 Dprintk(" PGE present.\n");
138 if (m->mpc_featureflag&(1<<14))
139 Dprintk(" MCA present.\n");
140 if (m->mpc_featureflag&(1<<15))
141 Dprintk(" CMOV present.\n");
142 if (m->mpc_featureflag&(1<<16))
143 Dprintk(" PAT present.\n");
144 if (m->mpc_featureflag&(1<<17))
145 Dprintk(" PSE present.\n");
146 if (m->mpc_featureflag&(1<<18))
147 Dprintk(" PSN present.\n");
148 if (m->mpc_featureflag&(1<<19))
149 Dprintk(" Cache Line Flush Instruction present.\n");
150 /* 20 Reserved */
151 if (m->mpc_featureflag&(1<<21))
152 Dprintk(" Debug Trace and EMON Store present.\n");
153 if (m->mpc_featureflag&(1<<22))
154 Dprintk(" ACPI Thermal Throttle Registers present.\n");
155 if (m->mpc_featureflag&(1<<23))
156 Dprintk(" MMX present.\n");
157 if (m->mpc_featureflag&(1<<24))
158 Dprintk(" FXSR present.\n");
159 if (m->mpc_featureflag&(1<<25))
160 Dprintk(" XMM present.\n");
161 if (m->mpc_featureflag&(1<<26))
162 Dprintk(" Willamette New Instructions present.\n");
163 if (m->mpc_featureflag&(1<<27))
164 Dprintk(" Self Snoop present.\n");
165 if (m->mpc_featureflag&(1<<28))
166 Dprintk(" HT present.\n");
167 if (m->mpc_featureflag&(1<<29))
168 Dprintk(" Thermal Monitor present.\n");
169 /* 30, 31 Reserved */
170
171
172 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
173 Dprintk(" Bootup CPU\n");
174 boot_cpu_physical_apicid = m->mpc_apicid;
175 }
176
177 generic_processor_info(apicid, m->mpc_apicver);
178 }
179
180 static void __init MP_bus_info (struct mpc_config_bus *m)
181 {
182 char str[7];
183
184 memcpy(str, m->mpc_bustype, 6);
185 str[6] = 0;
186
187 #ifdef CONFIG_X86_NUMAQ
188 mpc_oem_bus_info(m, str, translation_table[mpc_record]);
189 #else
190 Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
191 #endif
192
193 #if MAX_MP_BUSSES < 256
194 if (m->mpc_busid >= MAX_MP_BUSSES) {
195 printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
196 " is too large, max. supported is %d\n",
197 m->mpc_busid, str, MAX_MP_BUSSES - 1);
198 return;
199 }
200 #endif
201
202 set_bit(m->mpc_busid, mp_bus_not_pci);
203 if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
204 #ifdef CONFIG_X86_NUMAQ
205 mpc_oem_pci_bus(m, translation_table[mpc_record]);
206 #endif
207 clear_bit(m->mpc_busid, mp_bus_not_pci);
208 mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
209 mp_current_pci_id++;
210 #if defined(CONFIG_EISA) || defined (CONFIG_MCA)
211 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
212 } else if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
213 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
214 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
215 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
216 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
217 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
218 } else {
219 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
220 #endif
221 }
222 }
223
224 #ifdef CONFIG_X86_IO_APIC
225
226 static int bad_ioapic(unsigned long address)
227 {
228 if (nr_ioapics >= MAX_IO_APICS) {
229 printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
230 "(found %d)\n", MAX_IO_APICS, nr_ioapics);
231 panic("Recompile kernel with bigger MAX_IO_APICS!\n");
232 }
233 if (!address) {
234 printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
235 " found in table, skipping!\n");
236 return 1;
237 }
238 return 0;
239 }
240
241 static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
242 {
243 if (!(m->mpc_flags & MPC_APIC_USABLE))
244 return;
245
246 printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
247 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
248
249 if (bad_ioapic(m->mpc_apicaddr))
250 return;
251
252 mp_ioapics[nr_ioapics] = *m;
253 nr_ioapics++;
254 }
255
256 static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
257 {
258 mp_irqs [mp_irq_entries] = *m;
259 Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
260 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
261 m->mpc_irqtype, m->mpc_irqflag & 3,
262 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
263 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
264 if (++mp_irq_entries == MAX_IRQ_SOURCES)
265 panic("Max # of irq sources exceeded!!\n");
266 }
267
268 #endif
269
270 static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
271 {
272 Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
273 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
274 m->mpc_irqtype, m->mpc_irqflag & 3,
275 (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
276 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
277 }
278
279 #ifdef CONFIG_X86_NUMAQ
280 static void __init MP_translation_info (struct mpc_config_translation *m)
281 {
282 printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
283
284 if (mpc_record >= MAX_MPC_ENTRY)
285 printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
286 else
287 translation_table[mpc_record] = m; /* stash this for later */
288 if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
289 node_set_online(m->trans_quad);
290 }
291
292 /*
293 * Read/parse the MPC oem tables
294 */
295
296 static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
297 unsigned short oemsize)
298 {
299 int count = sizeof (*oemtable); /* the header size */
300 unsigned char *oemptr = ((unsigned char *)oemtable)+count;
301
302 mpc_record = 0;
303 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
304 if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
305 {
306 printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
307 oemtable->oem_signature[0],
308 oemtable->oem_signature[1],
309 oemtable->oem_signature[2],
310 oemtable->oem_signature[3]);
311 return;
312 }
313 if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
314 {
315 printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
316 return;
317 }
318 while (count < oemtable->oem_length) {
319 switch (*oemptr) {
320 case MP_TRANSLATION:
321 {
322 struct mpc_config_translation *m=
323 (struct mpc_config_translation *)oemptr;
324 MP_translation_info(m);
325 oemptr += sizeof(*m);
326 count += sizeof(*m);
327 ++mpc_record;
328 break;
329 }
330 default:
331 {
332 printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
333 return;
334 }
335 }
336 }
337 }
338
339 static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
340 char *productid)
341 {
342 if (strncmp(oem, "IBM NUMA", 8))
343 printk("Warning! May not be a NUMA-Q system!\n");
344 if (mpc->mpc_oemptr)
345 smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
346 mpc->mpc_oemsize);
347 }
348 #endif /* CONFIG_X86_NUMAQ */
349
350 /*
351 * Read/parse the MPC
352 */
353
354 static int __init smp_read_mpc(struct mp_config_table *mpc)
355 {
356 char str[16];
357 char oem[10];
358 int count=sizeof(*mpc);
359 unsigned char *mpt=((unsigned char *)mpc)+count;
360
361 if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
362 printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
363 *(u32 *)mpc->mpc_signature);
364 return 0;
365 }
366 if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
367 printk(KERN_ERR "SMP mptable: checksum error!\n");
368 return 0;
369 }
370 if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
371 printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
372 mpc->mpc_spec);
373 return 0;
374 }
375 if (!mpc->mpc_lapic) {
376 printk(KERN_ERR "SMP mptable: null local APIC address!\n");
377 return 0;
378 }
379 memcpy(oem,mpc->mpc_oem,8);
380 oem[8]=0;
381 printk(KERN_INFO "OEM ID: %s ",oem);
382
383 memcpy(str,mpc->mpc_productid,12);
384 str[12]=0;
385 printk("Product ID: %s ",str);
386
387 mps_oem_check(mpc, oem, str);
388
389 printk("APIC at: 0x%X\n", mpc->mpc_lapic);
390
391 /*
392 * Save the local APIC address (it might be non-default) -- but only
393 * if we're not using ACPI.
394 */
395 if (!acpi_lapic)
396 mp_lapic_addr = mpc->mpc_lapic;
397
398 /*
399 * Now process the configuration blocks.
400 */
401 #ifdef CONFIG_X86_NUMAQ
402 mpc_record = 0;
403 #endif
404 while (count < mpc->mpc_length) {
405 switch(*mpt) {
406 case MP_PROCESSOR:
407 {
408 struct mpc_config_processor *m=
409 (struct mpc_config_processor *)mpt;
410 /* ACPI may have already provided this data */
411 if (!acpi_lapic)
412 MP_processor_info(m);
413 mpt += sizeof(*m);
414 count += sizeof(*m);
415 break;
416 }
417 case MP_BUS:
418 {
419 struct mpc_config_bus *m=
420 (struct mpc_config_bus *)mpt;
421 MP_bus_info(m);
422 mpt += sizeof(*m);
423 count += sizeof(*m);
424 break;
425 }
426 case MP_IOAPIC:
427 {
428 #ifdef CONFIG_X86_IO_APIC
429 struct mpc_config_ioapic *m=
430 (struct mpc_config_ioapic *)mpt;
431 MP_ioapic_info(m);
432 mpt+=sizeof(*m);
433 count+=sizeof(*m);
434 #endif
435 break;
436 }
437 case MP_INTSRC:
438 {
439 #ifdef CONFIG_X86_IO_APIC
440 struct mpc_config_intsrc *m=
441 (struct mpc_config_intsrc *)mpt;
442
443 MP_intsrc_info(m);
444 mpt+=sizeof(*m);
445 count+=sizeof(*m);
446 #endif
447 break;
448 }
449 case MP_LINTSRC:
450 {
451 struct mpc_config_lintsrc *m=
452 (struct mpc_config_lintsrc *)mpt;
453 MP_lintsrc_info(m);
454 mpt+=sizeof(*m);
455 count+=sizeof(*m);
456 break;
457 }
458 default:
459 {
460 count = mpc->mpc_length;
461 break;
462 }
463 }
464 #ifdef CONFIG_X86_NUMAQ
465 ++mpc_record;
466 #endif
467 }
468 setup_apic_routing();
469 if (!num_processors)
470 printk(KERN_ERR "SMP mptable: no processors registered!\n");
471 return num_processors;
472 }
473
474 #ifdef CONFIG_X86_IO_APIC
475
476 static int __init ELCR_trigger(unsigned int irq)
477 {
478 unsigned int port;
479
480 port = 0x4d0 + (irq >> 3);
481 return (inb(port) >> (irq & 7)) & 1;
482 }
483
484 static void __init construct_default_ioirq_mptable(int mpc_default_type)
485 {
486 struct mpc_config_intsrc intsrc;
487 int i;
488 int ELCR_fallback = 0;
489
490 intsrc.mpc_type = MP_INTSRC;
491 intsrc.mpc_irqflag = 0; /* conforming */
492 intsrc.mpc_srcbus = 0;
493 intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
494
495 intsrc.mpc_irqtype = mp_INT;
496
497 /*
498 * If true, we have an ISA/PCI system with no IRQ entries
499 * in the MP table. To prevent the PCI interrupts from being set up
500 * incorrectly, we try to use the ELCR. The sanity check to see if
501 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
502 * never be level sensitive, so we simply see if the ELCR agrees.
503 * If it does, we assume it's valid.
504 */
505 if (mpc_default_type == 5) {
506 printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
507
508 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
509 printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
510 else {
511 printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
512 ELCR_fallback = 1;
513 }
514 }
515
516 for (i = 0; i < 16; i++) {
517 switch (mpc_default_type) {
518 case 2:
519 if (i == 0 || i == 13)
520 continue; /* IRQ0 & IRQ13 not connected */
521 /* fall through */
522 default:
523 if (i == 2)
524 continue; /* IRQ2 is never connected */
525 }
526
527 if (ELCR_fallback) {
528 /*
529 * If the ELCR indicates a level-sensitive interrupt, we
530 * copy that information over to the MP table in the
531 * irqflag field (level sensitive, active high polarity).
532 */
533 if (ELCR_trigger(i))
534 intsrc.mpc_irqflag = 13;
535 else
536 intsrc.mpc_irqflag = 0;
537 }
538
539 intsrc.mpc_srcbusirq = i;
540 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
541 MP_intsrc_info(&intsrc);
542 }
543
544 intsrc.mpc_irqtype = mp_ExtINT;
545 intsrc.mpc_srcbusirq = 0;
546 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
547 MP_intsrc_info(&intsrc);
548 }
549
550 #endif
551
552 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
553 {
554 struct mpc_config_processor processor;
555 struct mpc_config_bus bus;
556 #ifdef CONFIG_X86_IO_APIC
557 struct mpc_config_ioapic ioapic;
558 #endif
559 struct mpc_config_lintsrc lintsrc;
560 int linttypes[2] = { mp_ExtINT, mp_NMI };
561 int i;
562
563 /*
564 * local APIC has default address
565 */
566 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
567
568 /*
569 * 2 CPUs, numbered 0 & 1.
570 */
571 processor.mpc_type = MP_PROCESSOR;
572 /* Either an integrated APIC or a discrete 82489DX. */
573 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
574 processor.mpc_cpuflag = CPU_ENABLED;
575 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
576 (boot_cpu_data.x86_model << 4) |
577 boot_cpu_data.x86_mask;
578 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
579 processor.mpc_reserved[0] = 0;
580 processor.mpc_reserved[1] = 0;
581 for (i = 0; i < 2; i++) {
582 processor.mpc_apicid = i;
583 MP_processor_info(&processor);
584 }
585
586 bus.mpc_type = MP_BUS;
587 bus.mpc_busid = 0;
588 switch (mpc_default_type) {
589 default:
590 printk("???\n");
591 printk(KERN_ERR "Unknown standard configuration %d\n",
592 mpc_default_type);
593 /* fall through */
594 case 1:
595 case 5:
596 memcpy(bus.mpc_bustype, "ISA ", 6);
597 break;
598 case 2:
599 case 6:
600 case 3:
601 memcpy(bus.mpc_bustype, "EISA ", 6);
602 break;
603 case 4:
604 case 7:
605 memcpy(bus.mpc_bustype, "MCA ", 6);
606 }
607 MP_bus_info(&bus);
608 if (mpc_default_type > 4) {
609 bus.mpc_busid = 1;
610 memcpy(bus.mpc_bustype, "PCI ", 6);
611 MP_bus_info(&bus);
612 }
613
614 #ifdef CONFIG_X86_IO_APIC
615 ioapic.mpc_type = MP_IOAPIC;
616 ioapic.mpc_apicid = 2;
617 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
618 ioapic.mpc_flags = MPC_APIC_USABLE;
619 ioapic.mpc_apicaddr = 0xFEC00000;
620 MP_ioapic_info(&ioapic);
621
622 /*
623 * We set up most of the low 16 IO-APIC pins according to MPS rules.
624 */
625 construct_default_ioirq_mptable(mpc_default_type);
626 #endif
627 lintsrc.mpc_type = MP_LINTSRC;
628 lintsrc.mpc_irqflag = 0; /* conforming */
629 lintsrc.mpc_srcbusid = 0;
630 lintsrc.mpc_srcbusirq = 0;
631 lintsrc.mpc_destapic = MP_APIC_ALL;
632 for (i = 0; i < 2; i++) {
633 lintsrc.mpc_irqtype = linttypes[i];
634 lintsrc.mpc_destapiclint = i;
635 MP_lintsrc_info(&lintsrc);
636 }
637 }
638
639 static struct intel_mp_floating *mpf_found;
640
641 /*
642 * Scan the memory blocks for an SMP configuration block.
643 */
644 void __init get_smp_config (void)
645 {
646 struct intel_mp_floating *mpf = mpf_found;
647
648 /*
649 * ACPI supports both logical (e.g. Hyper-Threading) and physical
650 * processors, where MPS only supports physical.
651 */
652 if (acpi_lapic && acpi_ioapic) {
653 printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
654 return;
655 }
656 else if (acpi_lapic)
657 printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
658
659 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
660 if (mpf->mpf_feature2 & (1<<7)) {
661 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
662 pic_mode = 1;
663 } else {
664 printk(KERN_INFO " Virtual Wire compatibility mode.\n");
665 pic_mode = 0;
666 }
667
668 /*
669 * Now see if we need to read further.
670 */
671 if (mpf->mpf_feature1 != 0) {
672
673 printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
674 construct_default_ISA_mptable(mpf->mpf_feature1);
675
676 } else if (mpf->mpf_physptr) {
677
678 /*
679 * Read the physical hardware table. Anything here will
680 * override the defaults.
681 */
682 if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
683 smp_found_config = 0;
684 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
685 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
686 return;
687 }
688
689 #ifdef CONFIG_X86_IO_APIC
690 /*
691 * If there are no explicit MP IRQ entries, then we are
692 * broken. We set up most of the low 16 IO-APIC pins to
693 * ISA defaults and hope it will work.
694 */
695 if (!mp_irq_entries) {
696 struct mpc_config_bus bus;
697
698 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
699
700 bus.mpc_type = MP_BUS;
701 bus.mpc_busid = 0;
702 memcpy(bus.mpc_bustype, "ISA ", 6);
703 MP_bus_info(&bus);
704
705 construct_default_ioirq_mptable(0);
706 }
707 #endif
708 } else
709 BUG();
710
711 printk(KERN_INFO "Processors: %d\n", num_processors);
712 /*
713 * Only use the first configuration found.
714 */
715 }
716
717 static int __init smp_scan_config (unsigned long base, unsigned long length)
718 {
719 unsigned long *bp = phys_to_virt(base);
720 struct intel_mp_floating *mpf;
721
722 printk(KERN_INFO "Scan SMP from %p for %ld bytes.\n", bp,length);
723 if (sizeof(*mpf) != 16)
724 printk("Error: MPF size\n");
725
726 while (length > 0) {
727 mpf = (struct intel_mp_floating *)bp;
728 if ((*bp == SMP_MAGIC_IDENT) &&
729 (mpf->mpf_length == 1) &&
730 !mpf_checksum((unsigned char *)bp, 16) &&
731 ((mpf->mpf_specification == 1)
732 || (mpf->mpf_specification == 4)) ) {
733
734 smp_found_config = 1;
735 printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
736 mpf, virt_to_phys(mpf));
737 reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE,
738 BOOTMEM_DEFAULT);
739 if (mpf->mpf_physptr) {
740 /*
741 * We cannot access to MPC table to compute
742 * table size yet, as only few megabytes from
743 * the bottom is mapped now.
744 * PC-9800's MPC table places on the very last
745 * of physical memory; so that simply reserving
746 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
747 * in reserve_bootmem.
748 */
749 unsigned long size = PAGE_SIZE;
750 unsigned long end = max_low_pfn * PAGE_SIZE;
751 if (mpf->mpf_physptr + size > end)
752 size = end - mpf->mpf_physptr;
753 reserve_bootmem(mpf->mpf_physptr, size,
754 BOOTMEM_DEFAULT);
755 }
756
757 mpf_found = mpf;
758 return 1;
759 }
760 bp += 4;
761 length -= 16;
762 }
763 return 0;
764 }
765
766 void __init find_smp_config (void)
767 {
768 unsigned int address;
769
770 /*
771 * FIXME: Linux assumes you have 640K of base ram..
772 * this continues the error...
773 *
774 * 1) Scan the bottom 1K for a signature
775 * 2) Scan the top 1K of base RAM
776 * 3) Scan the 64K of bios
777 */
778 if (smp_scan_config(0x0,0x400) ||
779 smp_scan_config(639*0x400,0x400) ||
780 smp_scan_config(0xF0000,0x10000))
781 return;
782 /*
783 * If it is an SMP machine we should know now, unless the
784 * configuration is in an EISA/MCA bus machine with an
785 * extended bios data area.
786 *
787 * there is a real-mode segmented pointer pointing to the
788 * 4K EBDA area at 0x40E, calculate and scan it here.
789 *
790 * NOTE! There are Linux loaders that will corrupt the EBDA
791 * area, and as such this kind of SMP config may be less
792 * trustworthy, simply because the SMP table may have been
793 * stomped on during early boot. These loaders are buggy and
794 * should be fixed.
795 *
796 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
797 */
798
799 address = get_bios_ebda();
800 if (address)
801 smp_scan_config(address, 0x400);
802 }
803
804 /* --------------------------------------------------------------------------
805 ACPI-based MP Configuration
806 -------------------------------------------------------------------------- */
807
808 #ifdef CONFIG_ACPI
809
810 void __init mp_register_lapic_address(u64 address)
811 {
812 mp_lapic_addr = (unsigned long) address;
813
814 set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
815
816 if (boot_cpu_physical_apicid == -1U)
817 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
818
819 Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
820 }
821
822 void __cpuinit mp_register_lapic (int id, u8 enabled)
823 {
824 if (MAX_APICS - id <= 0) {
825 printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
826 id, MAX_APICS);
827 return;
828 }
829
830 if (!enabled) {
831 #ifdef CONFIG_X86_SMP
832 ++disabled_cpus;
833 #endif
834 return;
835 }
836
837 generic_processor_info(id, GET_APIC_VERSION(apic_read(APIC_LVR)));
838 }
839
840 #ifdef CONFIG_X86_IO_APIC
841
842 #define MP_ISA_BUS 0
843 #define MP_MAX_IOAPIC_PIN 127
844
845 static struct mp_ioapic_routing {
846 int apic_id;
847 int gsi_base;
848 int gsi_end;
849 u32 pin_programmed[4];
850 } mp_ioapic_routing[MAX_IO_APICS];
851
852 static int mp_find_ioapic (int gsi)
853 {
854 int i = 0;
855
856 /* Find the IOAPIC that manages this GSI. */
857 for (i = 0; i < nr_ioapics; i++) {
858 if ((gsi >= mp_ioapic_routing[i].gsi_base)
859 && (gsi <= mp_ioapic_routing[i].gsi_end))
860 return i;
861 }
862
863 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
864
865 return -1;
866 }
867
868 static u8 uniq_ioapic_id(u8 id)
869 {
870 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
871 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
872 return io_apic_get_unique_id(nr_ioapics, id);
873 else
874 return id;
875 }
876
877 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
878 {
879 int idx = 0;
880
881 if (bad_ioapic(address))
882 return;
883
884 idx = nr_ioapics;
885
886 mp_ioapics[idx].mpc_type = MP_IOAPIC;
887 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
888 mp_ioapics[idx].mpc_apicaddr = address;
889
890 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
891 mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id);
892 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
893
894 /*
895 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
896 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
897 */
898 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
899 mp_ioapic_routing[idx].gsi_base = gsi_base;
900 mp_ioapic_routing[idx].gsi_end = gsi_base +
901 io_apic_get_redir_entries(idx);
902
903 printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
904 "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
905 mp_ioapics[idx].mpc_apicver,
906 mp_ioapics[idx].mpc_apicaddr,
907 mp_ioapic_routing[idx].gsi_base,
908 mp_ioapic_routing[idx].gsi_end);
909
910 nr_ioapics++;
911 }
912
913 void __init
914 mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
915 {
916 struct mpc_config_intsrc intsrc;
917 int ioapic = -1;
918 int pin = -1;
919
920 /*
921 * Convert 'gsi' to 'ioapic.pin'.
922 */
923 ioapic = mp_find_ioapic(gsi);
924 if (ioapic < 0)
925 return;
926 pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
927
928 /*
929 * TBD: This check is for faulty timer entries, where the override
930 * erroneously sets the trigger to level, resulting in a HUGE
931 * increase of timer interrupts!
932 */
933 if ((bus_irq == 0) && (trigger == 3))
934 trigger = 1;
935
936 intsrc.mpc_type = MP_INTSRC;
937 intsrc.mpc_irqtype = mp_INT;
938 intsrc.mpc_irqflag = (trigger << 2) | polarity;
939 intsrc.mpc_srcbus = MP_ISA_BUS;
940 intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
941 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
942 intsrc.mpc_dstirq = pin; /* INTIN# */
943
944 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
945 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
946 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
947 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
948
949 mp_irqs[mp_irq_entries] = intsrc;
950 if (++mp_irq_entries == MAX_IRQ_SOURCES)
951 panic("Max # of irq sources exceeded!\n");
952 }
953
954 int es7000_plat;
955
956 void __init mp_config_acpi_legacy_irqs (void)
957 {
958 struct mpc_config_intsrc intsrc;
959 int i = 0;
960 int ioapic = -1;
961
962 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
963 /*
964 * Fabricate the legacy ISA bus (bus #31).
965 */
966 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
967 #endif
968 set_bit(MP_ISA_BUS, mp_bus_not_pci);
969 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
970
971 /*
972 * Older generations of ES7000 have no legacy identity mappings
973 */
974 if (es7000_plat == 1)
975 return;
976
977 /*
978 * Locate the IOAPIC that manages the ISA IRQs (0-15).
979 */
980 ioapic = mp_find_ioapic(0);
981 if (ioapic < 0)
982 return;
983
984 intsrc.mpc_type = MP_INTSRC;
985 intsrc.mpc_irqflag = 0; /* Conforming */
986 intsrc.mpc_srcbus = MP_ISA_BUS;
987 #ifdef CONFIG_X86_IO_APIC
988 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
989 #endif
990 /*
991 * Use the default configuration for the IRQs 0-15. Unless
992 * overridden by (MADT) interrupt source override entries.
993 */
994 for (i = 0; i < 16; i++) {
995 int idx;
996
997 for (idx = 0; idx < mp_irq_entries; idx++) {
998 struct mpc_config_intsrc *irq = mp_irqs + idx;
999
1000 /* Do we already have a mapping for this ISA IRQ? */
1001 if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
1002 break;
1003
1004 /* Do we already have a mapping for this IOAPIC pin */
1005 if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
1006 (irq->mpc_dstirq == i))
1007 break;
1008 }
1009
1010 if (idx != mp_irq_entries) {
1011 printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
1012 continue; /* IRQ already used */
1013 }
1014
1015 intsrc.mpc_irqtype = mp_INT;
1016 intsrc.mpc_srcbusirq = i; /* Identity mapped */
1017 intsrc.mpc_dstirq = i;
1018
1019 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
1020 "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
1021 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
1022 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
1023 intsrc.mpc_dstirq);
1024
1025 mp_irqs[mp_irq_entries] = intsrc;
1026 if (++mp_irq_entries == MAX_IRQ_SOURCES)
1027 panic("Max # of irq sources exceeded!\n");
1028 }
1029 }
1030
1031 #define MAX_GSI_NUM 4096
1032 #define IRQ_COMPRESSION_START 64
1033
1034 int mp_register_gsi(u32 gsi, int triggering, int polarity)
1035 {
1036 int ioapic = -1;
1037 int ioapic_pin = 0;
1038 int idx, bit = 0;
1039 static int pci_irq = IRQ_COMPRESSION_START;
1040 /*
1041 * Mapping between Global System Interrupts, which
1042 * represent all possible interrupts, and IRQs
1043 * assigned to actual devices.
1044 */
1045 static int gsi_to_irq[MAX_GSI_NUM];
1046
1047 /* Don't set up the ACPI SCI because it's already set up */
1048 if (acpi_gbl_FADT.sci_interrupt == gsi)
1049 return gsi;
1050
1051 ioapic = mp_find_ioapic(gsi);
1052 if (ioapic < 0) {
1053 printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
1054 return gsi;
1055 }
1056
1057 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
1058
1059 if (ioapic_renumber_irq)
1060 gsi = ioapic_renumber_irq(ioapic, gsi);
1061
1062 /*
1063 * Avoid pin reprogramming. PRTs typically include entries
1064 * with redundant pin->gsi mappings (but unique PCI devices);
1065 * we only program the IOAPIC on the first.
1066 */
1067 bit = ioapic_pin % 32;
1068 idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
1069 if (idx > 3) {
1070 printk(KERN_ERR "Invalid reference to IOAPIC pin "
1071 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
1072 ioapic_pin);
1073 return gsi;
1074 }
1075 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
1076 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1077 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
1078 return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
1079 }
1080
1081 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
1082
1083 /*
1084 * For GSI >= 64, use IRQ compression
1085 */
1086 if ((gsi >= IRQ_COMPRESSION_START)
1087 && (triggering == ACPI_LEVEL_SENSITIVE)) {
1088 /*
1089 * For PCI devices assign IRQs in order, avoiding gaps
1090 * due to unused I/O APIC pins.
1091 */
1092 int irq = gsi;
1093 if (gsi < MAX_GSI_NUM) {
1094 /*
1095 * Retain the VIA chipset work-around (gsi > 15), but
1096 * avoid a problem where the 8254 timer (IRQ0) is setup
1097 * via an override (so it's not on pin 0 of the ioapic),
1098 * and at the same time, the pin 0 interrupt is a PCI
1099 * type. The gsi > 15 test could cause these two pins
1100 * to be shared as IRQ0, and they are not shareable.
1101 * So test for this condition, and if necessary, avoid
1102 * the pin collision.
1103 */
1104 gsi = pci_irq++;
1105 /*
1106 * Don't assign IRQ used by ACPI SCI
1107 */
1108 if (gsi == acpi_gbl_FADT.sci_interrupt)
1109 gsi = pci_irq++;
1110 gsi_to_irq[irq] = gsi;
1111 } else {
1112 printk(KERN_ERR "GSI %u is too high\n", gsi);
1113 return gsi;
1114 }
1115 }
1116
1117 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
1118 triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
1119 polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
1120 return gsi;
1121 }
1122
1123 #endif /* CONFIG_X86_IO_APIC */
1124 #endif /* CONFIG_ACPI */
This page took 0.054808 seconds and 5 git commands to generate.