x86: make amd quad core 8 socket system not be clustered_box, #2
[deliverable/linux.git] / arch / x86 / kernel / apic_64.c
1 /*
2 * Local APIC handling, local APIC timers
3 *
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
9 * and Rolf G. Tews
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
13 * Pavel Machek and
14 * Mikael Pettersson : PM converted to driver model.
15 */
16
17 #include <linux/init.h>
18
19 #include <linux/mm.h>
20 #include <linux/delay.h>
21 #include <linux/bootmem.h>
22 #include <linux/interrupt.h>
23 #include <linux/mc146818rtc.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/sysdev.h>
26 #include <linux/ioport.h>
27 #include <linux/clockchips.h>
28 #include <linux/acpi_pmtmr.h>
29 #include <linux/module.h>
30
31 #include <asm/atomic.h>
32 #include <asm/smp.h>
33 #include <asm/mtrr.h>
34 #include <asm/mpspec.h>
35 #include <asm/hpet.h>
36 #include <asm/pgalloc.h>
37 #include <asm/mach_apic.h>
38 #include <asm/nmi.h>
39 #include <asm/idle.h>
40 #include <asm/proto.h>
41 #include <asm/timex.h>
42 #include <asm/apic.h>
43
44 int disable_apic_timer __cpuinitdata;
45 static int apic_calibrate_pmtmr __initdata;
46 int disable_apic;
47
48 /* Local APIC timer works in C2 */
49 int local_apic_timer_c2_ok;
50 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
51
52 /*
53 * Debug level, exported for io_apic.c
54 */
55 int apic_verbosity;
56
57 static struct resource lapic_resource = {
58 .name = "Local APIC",
59 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
60 };
61
62 static unsigned int calibration_result;
63
64 static int lapic_next_event(unsigned long delta,
65 struct clock_event_device *evt);
66 static void lapic_timer_setup(enum clock_event_mode mode,
67 struct clock_event_device *evt);
68 static void lapic_timer_broadcast(cpumask_t mask);
69 static void apic_pm_activate(void);
70
71 static struct clock_event_device lapic_clockevent = {
72 .name = "lapic",
73 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
74 | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
75 .shift = 32,
76 .set_mode = lapic_timer_setup,
77 .set_next_event = lapic_next_event,
78 .broadcast = lapic_timer_broadcast,
79 .rating = 100,
80 .irq = -1,
81 };
82 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
83
84 static unsigned long apic_phys;
85
86 /*
87 * Get the LAPIC version
88 */
89 static inline int lapic_get_version(void)
90 {
91 return GET_APIC_VERSION(apic_read(APIC_LVR));
92 }
93
94 /*
95 * Check, if the APIC is integrated or a seperate chip
96 */
97 static inline int lapic_is_integrated(void)
98 {
99 return 1;
100 }
101
102 /*
103 * Check, whether this is a modern or a first generation APIC
104 */
105 static int modern_apic(void)
106 {
107 /* AMD systems use old APIC versions, so check the CPU */
108 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
109 boot_cpu_data.x86 >= 0xf)
110 return 1;
111 return lapic_get_version() >= 0x14;
112 }
113
114 void apic_wait_icr_idle(void)
115 {
116 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
117 cpu_relax();
118 }
119
120 u32 safe_apic_wait_icr_idle(void)
121 {
122 u32 send_status;
123 int timeout;
124
125 timeout = 0;
126 do {
127 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
128 if (!send_status)
129 break;
130 udelay(100);
131 } while (timeout++ < 1000);
132
133 return send_status;
134 }
135
136 /**
137 * enable_NMI_through_LVT0 - enable NMI through local vector table 0
138 */
139 void __cpuinit enable_NMI_through_LVT0(void)
140 {
141 unsigned int v;
142
143 /* unmask and set to NMI */
144 v = APIC_DM_NMI;
145 apic_write(APIC_LVT0, v);
146 }
147
148 /**
149 * lapic_get_maxlvt - get the maximum number of local vector table entries
150 */
151 int lapic_get_maxlvt(void)
152 {
153 unsigned int v, maxlvt;
154
155 v = apic_read(APIC_LVR);
156 maxlvt = GET_APIC_MAXLVT(v);
157 return maxlvt;
158 }
159
160 /*
161 * This function sets up the local APIC timer, with a timeout of
162 * 'clocks' APIC bus clock. During calibration we actually call
163 * this function twice on the boot CPU, once with a bogus timeout
164 * value, second time for real. The other (noncalibrating) CPUs
165 * call this function only once, with the real, calibrated value.
166 *
167 * We do reads before writes even if unnecessary, to get around the
168 * P5 APIC double write bug.
169 */
170
171 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
172 {
173 unsigned int lvtt_value, tmp_value;
174
175 lvtt_value = LOCAL_TIMER_VECTOR;
176 if (!oneshot)
177 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
178 if (!irqen)
179 lvtt_value |= APIC_LVT_MASKED;
180
181 apic_write(APIC_LVTT, lvtt_value);
182
183 /*
184 * Divide PICLK by 16
185 */
186 tmp_value = apic_read(APIC_TDCR);
187 apic_write(APIC_TDCR, (tmp_value
188 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
189 | APIC_TDR_DIV_16);
190
191 if (!oneshot)
192 apic_write(APIC_TMICT, clocks);
193 }
194
195 /*
196 * Setup extended LVT, AMD specific (K8, family 10h)
197 *
198 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
199 * MCE interrupts are supported. Thus MCE offset must be set to 0.
200 */
201
202 #define APIC_EILVT_LVTOFF_MCE 0
203 #define APIC_EILVT_LVTOFF_IBS 1
204
205 static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
206 {
207 unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
208 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
209
210 apic_write(reg, v);
211 }
212
213 u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
214 {
215 setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
216 return APIC_EILVT_LVTOFF_MCE;
217 }
218
219 u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
220 {
221 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
222 return APIC_EILVT_LVTOFF_IBS;
223 }
224
225 /*
226 * Program the next event, relative to now
227 */
228 static int lapic_next_event(unsigned long delta,
229 struct clock_event_device *evt)
230 {
231 apic_write(APIC_TMICT, delta);
232 return 0;
233 }
234
235 /*
236 * Setup the lapic timer in periodic or oneshot mode
237 */
238 static void lapic_timer_setup(enum clock_event_mode mode,
239 struct clock_event_device *evt)
240 {
241 unsigned long flags;
242 unsigned int v;
243
244 /* Lapic used as dummy for broadcast ? */
245 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
246 return;
247
248 local_irq_save(flags);
249
250 switch (mode) {
251 case CLOCK_EVT_MODE_PERIODIC:
252 case CLOCK_EVT_MODE_ONESHOT:
253 __setup_APIC_LVTT(calibration_result,
254 mode != CLOCK_EVT_MODE_PERIODIC, 1);
255 break;
256 case CLOCK_EVT_MODE_UNUSED:
257 case CLOCK_EVT_MODE_SHUTDOWN:
258 v = apic_read(APIC_LVTT);
259 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
260 apic_write(APIC_LVTT, v);
261 break;
262 case CLOCK_EVT_MODE_RESUME:
263 /* Nothing to do here */
264 break;
265 }
266
267 local_irq_restore(flags);
268 }
269
270 /*
271 * Local APIC timer broadcast function
272 */
273 static void lapic_timer_broadcast(cpumask_t mask)
274 {
275 #ifdef CONFIG_SMP
276 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
277 #endif
278 }
279
280 /*
281 * Setup the local APIC timer for this CPU. Copy the initilized values
282 * of the boot CPU and register the clock event in the framework.
283 */
284 static void setup_APIC_timer(void)
285 {
286 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
287
288 memcpy(levt, &lapic_clockevent, sizeof(*levt));
289 levt->cpumask = cpumask_of_cpu(smp_processor_id());
290
291 clockevents_register_device(levt);
292 }
293
294 /*
295 * In this function we calibrate APIC bus clocks to the external
296 * timer. Unfortunately we cannot use jiffies and the timer irq
297 * to calibrate, since some later bootup code depends on getting
298 * the first irq? Ugh.
299 *
300 * We want to do the calibration only once since we
301 * want to have local timer irqs syncron. CPUs connected
302 * by the same APIC bus have the very same bus frequency.
303 * And we want to have irqs off anyways, no accidental
304 * APIC irq that way.
305 */
306
307 #define TICK_COUNT 100000000
308
309 static void __init calibrate_APIC_clock(void)
310 {
311 unsigned apic, apic_start;
312 unsigned long tsc, tsc_start;
313 int result;
314
315 local_irq_disable();
316
317 /*
318 * Put whatever arbitrary (but long enough) timeout
319 * value into the APIC clock, we just want to get the
320 * counter running for calibration.
321 *
322 * No interrupt enable !
323 */
324 __setup_APIC_LVTT(250000000, 0, 0);
325
326 apic_start = apic_read(APIC_TMCCT);
327 #ifdef CONFIG_X86_PM_TIMER
328 if (apic_calibrate_pmtmr && pmtmr_ioport) {
329 pmtimer_wait(5000); /* 5ms wait */
330 apic = apic_read(APIC_TMCCT);
331 result = (apic_start - apic) * 1000L / 5;
332 } else
333 #endif
334 {
335 rdtscll(tsc_start);
336
337 do {
338 apic = apic_read(APIC_TMCCT);
339 rdtscll(tsc);
340 } while ((tsc - tsc_start) < TICK_COUNT &&
341 (apic_start - apic) < TICK_COUNT);
342
343 result = (apic_start - apic) * 1000L * tsc_khz /
344 (tsc - tsc_start);
345 }
346
347 local_irq_enable();
348
349 printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
350
351 printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
352 result / 1000 / 1000, result / 1000 % 1000);
353
354 /* Calculate the scaled math multiplication factor */
355 lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32);
356 lapic_clockevent.max_delta_ns =
357 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
358 lapic_clockevent.min_delta_ns =
359 clockevent_delta2ns(0xF, &lapic_clockevent);
360
361 calibration_result = result / HZ;
362 }
363
364 /*
365 * Setup the boot APIC
366 *
367 * Calibrate and verify the result.
368 */
369 void __init setup_boot_APIC_clock(void)
370 {
371 /*
372 * The local apic timer can be disabled via the kernel commandline.
373 * Register the lapic timer as a dummy clock event source on SMP
374 * systems, so the broadcast mechanism is used. On UP systems simply
375 * ignore it.
376 */
377 if (disable_apic_timer) {
378 printk(KERN_INFO "Disabling APIC timer\n");
379 /* No broadcast on UP ! */
380 if (num_possible_cpus() > 1) {
381 lapic_clockevent.mult = 1;
382 setup_APIC_timer();
383 }
384 return;
385 }
386
387 printk(KERN_INFO "Using local APIC timer interrupts.\n");
388 calibrate_APIC_clock();
389
390 /*
391 * Do a sanity check on the APIC calibration result
392 */
393 if (calibration_result < (1000000 / HZ)) {
394 printk(KERN_WARNING
395 "APIC frequency too slow, disabling apic timer\n");
396 /* No broadcast on UP ! */
397 if (num_possible_cpus() > 1)
398 setup_APIC_timer();
399 return;
400 }
401
402 /*
403 * If nmi_watchdog is set to IO_APIC, we need the
404 * PIT/HPET going. Otherwise register lapic as a dummy
405 * device.
406 */
407 if (nmi_watchdog != NMI_IO_APIC)
408 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
409 else
410 printk(KERN_WARNING "APIC timer registered as dummy,"
411 " due to nmi_watchdog=1!\n");
412
413 setup_APIC_timer();
414 }
415
416 /*
417 * AMD C1E enabled CPUs have a real nasty problem: Some BIOSes set the
418 * C1E flag only in the secondary CPU, so when we detect the wreckage
419 * we already have enabled the boot CPU local apic timer. Check, if
420 * disable_apic_timer is set and the DUMMY flag is cleared. If yes,
421 * set the DUMMY flag again and force the broadcast mode in the
422 * clockevents layer.
423 */
424 void __cpuinit check_boot_apic_timer_broadcast(void)
425 {
426 if (!disable_apic_timer ||
427 (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
428 return;
429
430 printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n");
431 lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
432
433 local_irq_enable();
434 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id);
435 local_irq_disable();
436 }
437
438 void __cpuinit setup_secondary_APIC_clock(void)
439 {
440 check_boot_apic_timer_broadcast();
441 setup_APIC_timer();
442 }
443
444 /*
445 * The guts of the apic timer interrupt
446 */
447 static void local_apic_timer_interrupt(void)
448 {
449 int cpu = smp_processor_id();
450 struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
451
452 /*
453 * Normally we should not be here till LAPIC has been initialized but
454 * in some cases like kdump, its possible that there is a pending LAPIC
455 * timer interrupt from previous kernel's context and is delivered in
456 * new kernel the moment interrupts are enabled.
457 *
458 * Interrupts are enabled early and LAPIC is setup much later, hence
459 * its possible that when we get here evt->event_handler is NULL.
460 * Check for event_handler being NULL and discard the interrupt as
461 * spurious.
462 */
463 if (!evt->event_handler) {
464 printk(KERN_WARNING
465 "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
466 /* Switch it off */
467 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
468 return;
469 }
470
471 /*
472 * the NMI deadlock-detector uses this.
473 */
474 add_pda(apic_timer_irqs, 1);
475
476 evt->event_handler(evt);
477 }
478
479 /*
480 * Local APIC timer interrupt. This is the most natural way for doing
481 * local interrupts, but local timer interrupts can be emulated by
482 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
483 *
484 * [ if a single-CPU system runs an SMP kernel then we call the local
485 * interrupt as well. Thus we cannot inline the local irq ... ]
486 */
487 void smp_apic_timer_interrupt(struct pt_regs *regs)
488 {
489 struct pt_regs *old_regs = set_irq_regs(regs);
490
491 /*
492 * NOTE! We'd better ACK the irq immediately,
493 * because timer handling can be slow.
494 */
495 ack_APIC_irq();
496 /*
497 * update_process_times() expects us to have done irq_enter().
498 * Besides, if we don't timer interrupts ignore the global
499 * interrupt lock, which is the WrongThing (tm) to do.
500 */
501 exit_idle();
502 irq_enter();
503 local_apic_timer_interrupt();
504 irq_exit();
505 set_irq_regs(old_regs);
506 }
507
508 int setup_profiling_timer(unsigned int multiplier)
509 {
510 return -EINVAL;
511 }
512
513
514 /*
515 * Local APIC start and shutdown
516 */
517
518 /**
519 * clear_local_APIC - shutdown the local APIC
520 *
521 * This is called, when a CPU is disabled and before rebooting, so the state of
522 * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
523 * leftovers during boot.
524 */
525 void clear_local_APIC(void)
526 {
527 int maxlvt = lapic_get_maxlvt();
528 u32 v;
529
530 /* APIC hasn't been mapped yet */
531 if (!apic_phys)
532 return;
533
534 maxlvt = lapic_get_maxlvt();
535 /*
536 * Masking an LVT entry can trigger a local APIC error
537 * if the vector is zero. Mask LVTERR first to prevent this.
538 */
539 if (maxlvt >= 3) {
540 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
541 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
542 }
543 /*
544 * Careful: we have to set masks only first to deassert
545 * any level-triggered sources.
546 */
547 v = apic_read(APIC_LVTT);
548 apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
549 v = apic_read(APIC_LVT0);
550 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
551 v = apic_read(APIC_LVT1);
552 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
553 if (maxlvt >= 4) {
554 v = apic_read(APIC_LVTPC);
555 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
556 }
557
558 /*
559 * Clean APIC state for other OSs:
560 */
561 apic_write(APIC_LVTT, APIC_LVT_MASKED);
562 apic_write(APIC_LVT0, APIC_LVT_MASKED);
563 apic_write(APIC_LVT1, APIC_LVT_MASKED);
564 if (maxlvt >= 3)
565 apic_write(APIC_LVTERR, APIC_LVT_MASKED);
566 if (maxlvt >= 4)
567 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
568 apic_write(APIC_ESR, 0);
569 apic_read(APIC_ESR);
570 }
571
572 /**
573 * disable_local_APIC - clear and disable the local APIC
574 */
575 void disable_local_APIC(void)
576 {
577 unsigned int value;
578
579 clear_local_APIC();
580
581 /*
582 * Disable APIC (implies clearing of registers
583 * for 82489DX!).
584 */
585 value = apic_read(APIC_SPIV);
586 value &= ~APIC_SPIV_APIC_ENABLED;
587 apic_write(APIC_SPIV, value);
588 }
589
590 void lapic_shutdown(void)
591 {
592 unsigned long flags;
593
594 if (!cpu_has_apic)
595 return;
596
597 local_irq_save(flags);
598
599 disable_local_APIC();
600
601 local_irq_restore(flags);
602 }
603
604 /*
605 * This is to verify that we're looking at a real local APIC.
606 * Check these against your board if the CPUs aren't getting
607 * started for no apparent reason.
608 */
609 int __init verify_local_APIC(void)
610 {
611 unsigned int reg0, reg1;
612
613 /*
614 * The version register is read-only in a real APIC.
615 */
616 reg0 = apic_read(APIC_LVR);
617 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
618 apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
619 reg1 = apic_read(APIC_LVR);
620 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
621
622 /*
623 * The two version reads above should print the same
624 * numbers. If the second one is different, then we
625 * poke at a non-APIC.
626 */
627 if (reg1 != reg0)
628 return 0;
629
630 /*
631 * Check if the version looks reasonably.
632 */
633 reg1 = GET_APIC_VERSION(reg0);
634 if (reg1 == 0x00 || reg1 == 0xff)
635 return 0;
636 reg1 = lapic_get_maxlvt();
637 if (reg1 < 0x02 || reg1 == 0xff)
638 return 0;
639
640 /*
641 * The ID register is read/write in a real APIC.
642 */
643 reg0 = apic_read(APIC_ID);
644 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
645 apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
646 reg1 = apic_read(APIC_ID);
647 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
648 apic_write(APIC_ID, reg0);
649 if (reg1 != (reg0 ^ APIC_ID_MASK))
650 return 0;
651
652 /*
653 * The next two are just to see if we have sane values.
654 * They're only really relevant if we're in Virtual Wire
655 * compatibility mode, but most boxes are anymore.
656 */
657 reg0 = apic_read(APIC_LVT0);
658 apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
659 reg1 = apic_read(APIC_LVT1);
660 apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
661
662 return 1;
663 }
664
665 /**
666 * sync_Arb_IDs - synchronize APIC bus arbitration IDs
667 */
668 void __init sync_Arb_IDs(void)
669 {
670 /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */
671 if (modern_apic())
672 return;
673
674 /*
675 * Wait for idle.
676 */
677 apic_wait_icr_idle();
678
679 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
680 apic_write(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
681 | APIC_DM_INIT);
682 }
683
684 /*
685 * An initial setup of the virtual wire mode.
686 */
687 void __init init_bsp_APIC(void)
688 {
689 unsigned int value;
690
691 /*
692 * Don't do the setup now if we have a SMP BIOS as the
693 * through-I/O-APIC virtual wire mode might be active.
694 */
695 if (smp_found_config || !cpu_has_apic)
696 return;
697
698 value = apic_read(APIC_LVR);
699
700 /*
701 * Do not trust the local APIC being empty at bootup.
702 */
703 clear_local_APIC();
704
705 /*
706 * Enable APIC.
707 */
708 value = apic_read(APIC_SPIV);
709 value &= ~APIC_VECTOR_MASK;
710 value |= APIC_SPIV_APIC_ENABLED;
711 value |= APIC_SPIV_FOCUS_DISABLED;
712 value |= SPURIOUS_APIC_VECTOR;
713 apic_write(APIC_SPIV, value);
714
715 /*
716 * Set up the virtual wire mode.
717 */
718 apic_write(APIC_LVT0, APIC_DM_EXTINT);
719 value = APIC_DM_NMI;
720 apic_write(APIC_LVT1, value);
721 }
722
723 /**
724 * setup_local_APIC - setup the local APIC
725 */
726 void __cpuinit setup_local_APIC(void)
727 {
728 unsigned int value;
729 int i, j;
730
731 value = apic_read(APIC_LVR);
732
733 BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
734
735 /*
736 * Double-check whether this APIC is really registered.
737 * This is meaningless in clustered apic mode, so we skip it.
738 */
739 if (!apic_id_registered())
740 BUG();
741
742 /*
743 * Intel recommends to set DFR, LDR and TPR before enabling
744 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
745 * document number 292116). So here it goes...
746 */
747 init_apic_ldr();
748
749 /*
750 * Set Task Priority to 'accept all'. We never change this
751 * later on.
752 */
753 value = apic_read(APIC_TASKPRI);
754 value &= ~APIC_TPRI_MASK;
755 apic_write(APIC_TASKPRI, value);
756
757 /*
758 * After a crash, we no longer service the interrupts and a pending
759 * interrupt from previous kernel might still have ISR bit set.
760 *
761 * Most probably by now CPU has serviced that pending interrupt and
762 * it might not have done the ack_APIC_irq() because it thought,
763 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
764 * does not clear the ISR bit and cpu thinks it has already serivced
765 * the interrupt. Hence a vector might get locked. It was noticed
766 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
767 */
768 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
769 value = apic_read(APIC_ISR + i*0x10);
770 for (j = 31; j >= 0; j--) {
771 if (value & (1<<j))
772 ack_APIC_irq();
773 }
774 }
775
776 /*
777 * Now that we are all set up, enable the APIC
778 */
779 value = apic_read(APIC_SPIV);
780 value &= ~APIC_VECTOR_MASK;
781 /*
782 * Enable APIC
783 */
784 value |= APIC_SPIV_APIC_ENABLED;
785
786 /* We always use processor focus */
787
788 /*
789 * Set spurious IRQ vector
790 */
791 value |= SPURIOUS_APIC_VECTOR;
792 apic_write(APIC_SPIV, value);
793
794 /*
795 * Set up LVT0, LVT1:
796 *
797 * set up through-local-APIC on the BP's LINT0. This is not
798 * strictly necessary in pure symmetric-IO mode, but sometimes
799 * we delegate interrupts to the 8259A.
800 */
801 /*
802 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
803 */
804 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
805 if (!smp_processor_id() && !value) {
806 value = APIC_DM_EXTINT;
807 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
808 smp_processor_id());
809 } else {
810 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
811 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
812 smp_processor_id());
813 }
814 apic_write(APIC_LVT0, value);
815
816 /*
817 * only the BP should see the LINT1 NMI signal, obviously.
818 */
819 if (!smp_processor_id())
820 value = APIC_DM_NMI;
821 else
822 value = APIC_DM_NMI | APIC_LVT_MASKED;
823 apic_write(APIC_LVT1, value);
824 }
825
826 void __cpuinit lapic_setup_esr(void)
827 {
828 unsigned maxlvt = lapic_get_maxlvt();
829
830 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR);
831 /*
832 * spec says clear errors after enabling vector.
833 */
834 if (maxlvt > 3)
835 apic_write(APIC_ESR, 0);
836 }
837
838 void __cpuinit end_local_APIC_setup(void)
839 {
840 lapic_setup_esr();
841 nmi_watchdog_default();
842 setup_apic_nmi_watchdog(NULL);
843 apic_pm_activate();
844 }
845
846 /*
847 * Detect and enable local APICs on non-SMP boards.
848 * Original code written by Keir Fraser.
849 * On AMD64 we trust the BIOS - if it says no APIC it is likely
850 * not correctly set up (usually the APIC timer won't work etc.)
851 */
852 static int __init detect_init_APIC(void)
853 {
854 if (!cpu_has_apic) {
855 printk(KERN_INFO "No local APIC present\n");
856 return -1;
857 }
858
859 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
860 boot_cpu_id = 0;
861 return 0;
862 }
863
864 /**
865 * init_apic_mappings - initialize APIC mappings
866 */
867 void __init init_apic_mappings(void)
868 {
869 /*
870 * If no local APIC can be found then set up a fake all
871 * zeroes page to simulate the local APIC and another
872 * one for the IO-APIC.
873 */
874 if (!smp_found_config && detect_init_APIC()) {
875 apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
876 apic_phys = __pa(apic_phys);
877 } else
878 apic_phys = mp_lapic_addr;
879
880 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
881 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
882 APIC_BASE, apic_phys);
883
884 /* Put local APIC into the resource map. */
885 lapic_resource.start = apic_phys;
886 lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
887 insert_resource(&iomem_resource, &lapic_resource);
888
889 /*
890 * Fetch the APIC ID of the BSP in case we have a
891 * default configuration (or the MP table is broken).
892 */
893 boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
894 }
895
896 /*
897 * This initializes the IO-APIC and APIC hardware if this is
898 * a UP kernel.
899 */
900 int __init APIC_init_uniprocessor(void)
901 {
902 if (disable_apic) {
903 printk(KERN_INFO "Apic disabled\n");
904 return -1;
905 }
906 if (!cpu_has_apic) {
907 disable_apic = 1;
908 printk(KERN_INFO "Apic disabled by BIOS\n");
909 return -1;
910 }
911
912 verify_local_APIC();
913
914 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
915 apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id));
916
917 setup_local_APIC();
918
919 /*
920 * Now enable IO-APICs, actually call clear_IO_APIC
921 * We need clear_IO_APIC before enabling vector on BP
922 */
923 if (!skip_ioapic_setup && nr_ioapics)
924 enable_IO_APIC();
925
926 end_local_APIC_setup();
927
928 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
929 setup_IO_APIC();
930 else
931 nr_ioapics = 0;
932 setup_boot_APIC_clock();
933 check_nmi_watchdog();
934 return 0;
935 }
936
937 /*
938 * Local APIC interrupts
939 */
940
941 /*
942 * This interrupt should _never_ happen with our APIC/SMP architecture
943 */
944 asmlinkage void smp_spurious_interrupt(void)
945 {
946 unsigned int v;
947 exit_idle();
948 irq_enter();
949 /*
950 * Check if this really is a spurious interrupt and ACK it
951 * if it is a vectored one. Just in case...
952 * Spurious interrupts should not be ACKed.
953 */
954 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
955 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
956 ack_APIC_irq();
957
958 add_pda(irq_spurious_count, 1);
959 irq_exit();
960 }
961
962 /*
963 * This interrupt should never happen with our APIC/SMP architecture
964 */
965 asmlinkage void smp_error_interrupt(void)
966 {
967 unsigned int v, v1;
968
969 exit_idle();
970 irq_enter();
971 /* First tickle the hardware, only then report what went on. -- REW */
972 v = apic_read(APIC_ESR);
973 apic_write(APIC_ESR, 0);
974 v1 = apic_read(APIC_ESR);
975 ack_APIC_irq();
976 atomic_inc(&irq_err_count);
977
978 /* Here is what the APIC error bits mean:
979 0: Send CS error
980 1: Receive CS error
981 2: Send accept error
982 3: Receive accept error
983 4: Reserved
984 5: Send illegal vector
985 6: Received illegal vector
986 7: Illegal register address
987 */
988 printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
989 smp_processor_id(), v , v1);
990 irq_exit();
991 }
992
993 void disconnect_bsp_APIC(int virt_wire_setup)
994 {
995 /* Go back to Virtual Wire compatibility mode */
996 unsigned long value;
997
998 /* For the spurious interrupt use vector F, and enable it */
999 value = apic_read(APIC_SPIV);
1000 value &= ~APIC_VECTOR_MASK;
1001 value |= APIC_SPIV_APIC_ENABLED;
1002 value |= 0xf;
1003 apic_write(APIC_SPIV, value);
1004
1005 if (!virt_wire_setup) {
1006 /*
1007 * For LVT0 make it edge triggered, active high,
1008 * external and enabled
1009 */
1010 value = apic_read(APIC_LVT0);
1011 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1012 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1013 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1014 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1015 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1016 apic_write(APIC_LVT0, value);
1017 } else {
1018 /* Disable LVT0 */
1019 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1020 }
1021
1022 /* For LVT1 make it edge triggered, active high, nmi and enabled */
1023 value = apic_read(APIC_LVT1);
1024 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1025 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1026 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1027 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1028 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
1029 apic_write(APIC_LVT1, value);
1030 }
1031
1032 /*
1033 * Power management
1034 */
1035 #ifdef CONFIG_PM
1036
1037 static struct {
1038 /* 'active' is true if the local APIC was enabled by us and
1039 not the BIOS; this signifies that we are also responsible
1040 for disabling it before entering apm/acpi suspend */
1041 int active;
1042 /* r/w apic fields */
1043 unsigned int apic_id;
1044 unsigned int apic_taskpri;
1045 unsigned int apic_ldr;
1046 unsigned int apic_dfr;
1047 unsigned int apic_spiv;
1048 unsigned int apic_lvtt;
1049 unsigned int apic_lvtpc;
1050 unsigned int apic_lvt0;
1051 unsigned int apic_lvt1;
1052 unsigned int apic_lvterr;
1053 unsigned int apic_tmict;
1054 unsigned int apic_tdcr;
1055 unsigned int apic_thmr;
1056 } apic_pm_state;
1057
1058 static int lapic_suspend(struct sys_device *dev, pm_message_t state)
1059 {
1060 unsigned long flags;
1061 int maxlvt;
1062
1063 if (!apic_pm_state.active)
1064 return 0;
1065
1066 maxlvt = lapic_get_maxlvt();
1067
1068 apic_pm_state.apic_id = apic_read(APIC_ID);
1069 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
1070 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
1071 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
1072 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
1073 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
1074 if (maxlvt >= 4)
1075 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
1076 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
1077 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
1078 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
1079 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
1080 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
1081 #ifdef CONFIG_X86_MCE_INTEL
1082 if (maxlvt >= 5)
1083 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
1084 #endif
1085 local_irq_save(flags);
1086 disable_local_APIC();
1087 local_irq_restore(flags);
1088 return 0;
1089 }
1090
1091 static int lapic_resume(struct sys_device *dev)
1092 {
1093 unsigned int l, h;
1094 unsigned long flags;
1095 int maxlvt;
1096
1097 if (!apic_pm_state.active)
1098 return 0;
1099
1100 maxlvt = lapic_get_maxlvt();
1101
1102 local_irq_save(flags);
1103 rdmsr(MSR_IA32_APICBASE, l, h);
1104 l &= ~MSR_IA32_APICBASE_BASE;
1105 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
1106 wrmsr(MSR_IA32_APICBASE, l, h);
1107 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
1108 apic_write(APIC_ID, apic_pm_state.apic_id);
1109 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
1110 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
1111 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
1112 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
1113 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
1114 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
1115 #ifdef CONFIG_X86_MCE_INTEL
1116 if (maxlvt >= 5)
1117 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
1118 #endif
1119 if (maxlvt >= 4)
1120 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
1121 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
1122 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
1123 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
1124 apic_write(APIC_ESR, 0);
1125 apic_read(APIC_ESR);
1126 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
1127 apic_write(APIC_ESR, 0);
1128 apic_read(APIC_ESR);
1129 local_irq_restore(flags);
1130 return 0;
1131 }
1132
1133 static struct sysdev_class lapic_sysclass = {
1134 .name = "lapic",
1135 .resume = lapic_resume,
1136 .suspend = lapic_suspend,
1137 };
1138
1139 static struct sys_device device_lapic = {
1140 .id = 0,
1141 .cls = &lapic_sysclass,
1142 };
1143
1144 static void __cpuinit apic_pm_activate(void)
1145 {
1146 apic_pm_state.active = 1;
1147 }
1148
1149 static int __init init_lapic_sysfs(void)
1150 {
1151 int error;
1152
1153 if (!cpu_has_apic)
1154 return 0;
1155 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
1156
1157 error = sysdev_class_register(&lapic_sysclass);
1158 if (!error)
1159 error = sysdev_register(&device_lapic);
1160 return error;
1161 }
1162 device_initcall(init_lapic_sysfs);
1163
1164 #else /* CONFIG_PM */
1165
1166 static void apic_pm_activate(void) { }
1167
1168 #endif /* CONFIG_PM */
1169
1170 /*
1171 * apic_is_clustered_box() -- Check if we can expect good TSC
1172 *
1173 * Thus far, the major user of this is IBM's Summit2 series:
1174 *
1175 * Clustered boxes may have unsynced TSC problems if they are
1176 * multi-chassis. Use available data to take a good guess.
1177 * If in doubt, go HPET.
1178 */
1179 __cpuinit int apic_is_clustered_box(void)
1180 {
1181 int i, clusters, zeros;
1182 unsigned id;
1183 u16 *bios_cpu_apicid;
1184 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
1185
1186 /*
1187 * there is not this kind of box with AMD CPU yet.
1188 * Some AMD box with quadcore cpu and 8 sockets apicid
1189 * will be [4, 0x23] or [8, 0x27] could be thought to
1190 * have three apic_clusters. So go out early.
1191 */
1192 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1193 return 0;
1194
1195 bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
1196 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
1197
1198 for (i = 0; i < NR_CPUS; i++) {
1199 /* are we being called early in kernel startup? */
1200 if (bios_cpu_apicid) {
1201 id = bios_cpu_apicid[i];
1202 }
1203 else if (i < nr_cpu_ids) {
1204 if (cpu_present(i))
1205 id = per_cpu(x86_bios_cpu_apicid, i);
1206 else
1207 continue;
1208 }
1209 else
1210 break;
1211
1212 if (id != BAD_APICID)
1213 __set_bit(APIC_CLUSTERID(id), clustermap);
1214 }
1215
1216 /* Problem: Partially populated chassis may not have CPUs in some of
1217 * the APIC clusters they have been allocated. Only present CPUs have
1218 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
1219 * Since clusters are allocated sequentially, count zeros only if
1220 * they are bounded by ones.
1221 */
1222 clusters = 0;
1223 zeros = 0;
1224 for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
1225 if (test_bit(i, clustermap)) {
1226 clusters += 1 + zeros;
1227 zeros = 0;
1228 } else
1229 ++zeros;
1230 }
1231
1232 /*
1233 * If clusters > 2, then should be multi-chassis.
1234 * May have to revisit this when multi-core + hyperthreaded CPUs come
1235 * out, but AFAIK this will work even for them.
1236 */
1237 return (clusters > 2);
1238 }
1239
1240 /*
1241 * APIC command line parameters
1242 */
1243 static int __init apic_set_verbosity(char *str)
1244 {
1245 if (str == NULL) {
1246 skip_ioapic_setup = 0;
1247 ioapic_force = 1;
1248 return 0;
1249 }
1250 if (strcmp("debug", str) == 0)
1251 apic_verbosity = APIC_DEBUG;
1252 else if (strcmp("verbose", str) == 0)
1253 apic_verbosity = APIC_VERBOSE;
1254 else {
1255 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
1256 " use apic=verbose or apic=debug\n", str);
1257 return -EINVAL;
1258 }
1259
1260 return 0;
1261 }
1262 early_param("apic", apic_set_verbosity);
1263
1264 static __init int setup_disableapic(char *str)
1265 {
1266 disable_apic = 1;
1267 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1268 return 0;
1269 }
1270 early_param("disableapic", setup_disableapic);
1271
1272 /* same as disableapic, for compatibility */
1273 static __init int setup_nolapic(char *str)
1274 {
1275 return setup_disableapic(str);
1276 }
1277 early_param("nolapic", setup_nolapic);
1278
1279 static int __init parse_lapic_timer_c2_ok(char *arg)
1280 {
1281 local_apic_timer_c2_ok = 1;
1282 return 0;
1283 }
1284 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1285
1286 static __init int setup_noapictimer(char *str)
1287 {
1288 if (str[0] != ' ' && str[0] != 0)
1289 return 0;
1290 disable_apic_timer = 1;
1291 return 1;
1292 }
1293 __setup("noapictimer", setup_noapictimer);
1294
1295 static __init int setup_apicpmtimer(char *s)
1296 {
1297 apic_calibrate_pmtmr = 1;
1298 notsc_setup(NULL);
1299 return 0;
1300 }
1301 __setup("apicpmtimer", setup_apicpmtimer);
1302
This page took 0.075375 seconds and 6 git commands to generate.