x86: move smp_found_config
[deliverable/linux.git] / arch / x86 / kernel / apic_32.c
1 /*
2 * Local APIC handling, local APIC timers
3 *
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
9 * and Rolf G. Tews
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
13 * Pavel Machek and
14 * Mikael Pettersson : PM converted to driver model.
15 */
16
17 #include <linux/init.h>
18
19 #include <linux/mm.h>
20 #include <linux/delay.h>
21 #include <linux/bootmem.h>
22 #include <linux/interrupt.h>
23 #include <linux/mc146818rtc.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/sysdev.h>
26 #include <linux/cpu.h>
27 #include <linux/clockchips.h>
28 #include <linux/acpi_pmtmr.h>
29 #include <linux/module.h>
30 #include <linux/dmi.h>
31
32 #include <asm/atomic.h>
33 #include <asm/smp.h>
34 #include <asm/mtrr.h>
35 #include <asm/mpspec.h>
36 #include <asm/desc.h>
37 #include <asm/arch_hooks.h>
38 #include <asm/hpet.h>
39 #include <asm/i8253.h>
40 #include <asm/nmi.h>
41
42 #include <mach_apic.h>
43 #include <mach_apicdef.h>
44 #include <mach_ipi.h>
45
46 /*
47 * Sanity check
48 */
49 #if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
50 # error SPURIOUS_APIC_VECTOR definition error
51 #endif
52
53 unsigned long mp_lapic_addr;
54
55 DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID;
56 EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
57
58 /*
59 * Knob to control our willingness to enable the local APIC.
60 *
61 * -1=force-disable, +1=force-enable
62 */
63 static int enable_local_apic __initdata;
64
65 /* Local APIC timer verification ok */
66 static int local_apic_timer_verify_ok;
67 /* Disable local APIC timer from the kernel commandline or via dmi quirk
68 or using CPU MSR check */
69 int local_apic_timer_disabled;
70 /* Local APIC timer works in C2 */
71 int local_apic_timer_c2_ok;
72 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
73
74 /*
75 * Debug level, exported for io_apic.c
76 */
77 int apic_verbosity;
78
79 int pic_mode;
80
81 /* Have we found an MP table */
82 int smp_found_config;
83
84 static unsigned int calibration_result;
85
86 static int lapic_next_event(unsigned long delta,
87 struct clock_event_device *evt);
88 static void lapic_timer_setup(enum clock_event_mode mode,
89 struct clock_event_device *evt);
90 static void lapic_timer_broadcast(cpumask_t mask);
91 static void apic_pm_activate(void);
92
93 /*
94 * The local apic timer can be used for any function which is CPU local.
95 */
96 static struct clock_event_device lapic_clockevent = {
97 .name = "lapic",
98 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
99 | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
100 .shift = 32,
101 .set_mode = lapic_timer_setup,
102 .set_next_event = lapic_next_event,
103 .broadcast = lapic_timer_broadcast,
104 .rating = 100,
105 .irq = -1,
106 };
107 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
108
109 /* Local APIC was disabled by the BIOS and enabled by the kernel */
110 static int enabled_via_apicbase;
111
112 static unsigned long apic_phys;
113
114 /*
115 * Get the LAPIC version
116 */
117 static inline int lapic_get_version(void)
118 {
119 return GET_APIC_VERSION(apic_read(APIC_LVR));
120 }
121
122 /*
123 * Check, if the APIC is integrated or a separate chip
124 */
125 static inline int lapic_is_integrated(void)
126 {
127 return APIC_INTEGRATED(lapic_get_version());
128 }
129
130 /*
131 * Check, whether this is a modern or a first generation APIC
132 */
133 static int modern_apic(void)
134 {
135 /* AMD systems use old APIC versions, so check the CPU */
136 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
137 boot_cpu_data.x86 >= 0xf)
138 return 1;
139 return lapic_get_version() >= 0x14;
140 }
141
142 void apic_wait_icr_idle(void)
143 {
144 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
145 cpu_relax();
146 }
147
148 u32 safe_apic_wait_icr_idle(void)
149 {
150 u32 send_status;
151 int timeout;
152
153 timeout = 0;
154 do {
155 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
156 if (!send_status)
157 break;
158 udelay(100);
159 } while (timeout++ < 1000);
160
161 return send_status;
162 }
163
164 /**
165 * enable_NMI_through_LVT0 - enable NMI through local vector table 0
166 */
167 void __cpuinit enable_NMI_through_LVT0(void)
168 {
169 unsigned int v = APIC_DM_NMI;
170
171 /* Level triggered for 82489DX */
172 if (!lapic_is_integrated())
173 v |= APIC_LVT_LEVEL_TRIGGER;
174 apic_write_around(APIC_LVT0, v);
175 }
176
177 /**
178 * get_physical_broadcast - Get number of physical broadcast IDs
179 */
180 int get_physical_broadcast(void)
181 {
182 return modern_apic() ? 0xff : 0xf;
183 }
184
185 /**
186 * lapic_get_maxlvt - get the maximum number of local vector table entries
187 */
188 int lapic_get_maxlvt(void)
189 {
190 unsigned int v = apic_read(APIC_LVR);
191
192 /* 82489DXs do not report # of LVT entries. */
193 return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
194 }
195
196 /*
197 * Local APIC timer
198 */
199
200 /* Clock divisor is set to 16 */
201 #define APIC_DIVISOR 16
202
203 /*
204 * This function sets up the local APIC timer, with a timeout of
205 * 'clocks' APIC bus clock. During calibration we actually call
206 * this function twice on the boot CPU, once with a bogus timeout
207 * value, second time for real. The other (noncalibrating) CPUs
208 * call this function only once, with the real, calibrated value.
209 *
210 * We do reads before writes even if unnecessary, to get around the
211 * P5 APIC double write bug.
212 */
213 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
214 {
215 unsigned int lvtt_value, tmp_value;
216
217 lvtt_value = LOCAL_TIMER_VECTOR;
218 if (!oneshot)
219 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
220 if (!lapic_is_integrated())
221 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
222
223 if (!irqen)
224 lvtt_value |= APIC_LVT_MASKED;
225
226 apic_write_around(APIC_LVTT, lvtt_value);
227
228 /*
229 * Divide PICLK by 16
230 */
231 tmp_value = apic_read(APIC_TDCR);
232 apic_write_around(APIC_TDCR, (tmp_value
233 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
234 | APIC_TDR_DIV_16);
235
236 if (!oneshot)
237 apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
238 }
239
240 /*
241 * Program the next event, relative to now
242 */
243 static int lapic_next_event(unsigned long delta,
244 struct clock_event_device *evt)
245 {
246 apic_write_around(APIC_TMICT, delta);
247 return 0;
248 }
249
250 /*
251 * Setup the lapic timer in periodic or oneshot mode
252 */
253 static void lapic_timer_setup(enum clock_event_mode mode,
254 struct clock_event_device *evt)
255 {
256 unsigned long flags;
257 unsigned int v;
258
259 /* Lapic used for broadcast ? */
260 if (!local_apic_timer_verify_ok)
261 return;
262
263 local_irq_save(flags);
264
265 switch (mode) {
266 case CLOCK_EVT_MODE_PERIODIC:
267 case CLOCK_EVT_MODE_ONESHOT:
268 __setup_APIC_LVTT(calibration_result,
269 mode != CLOCK_EVT_MODE_PERIODIC, 1);
270 break;
271 case CLOCK_EVT_MODE_UNUSED:
272 case CLOCK_EVT_MODE_SHUTDOWN:
273 v = apic_read(APIC_LVTT);
274 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
275 apic_write_around(APIC_LVTT, v);
276 break;
277 case CLOCK_EVT_MODE_RESUME:
278 /* Nothing to do here */
279 break;
280 }
281
282 local_irq_restore(flags);
283 }
284
285 /*
286 * Local APIC timer broadcast function
287 */
288 static void lapic_timer_broadcast(cpumask_t mask)
289 {
290 #ifdef CONFIG_SMP
291 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
292 #endif
293 }
294
295 /*
296 * Setup the local APIC timer for this CPU. Copy the initilized values
297 * of the boot CPU and register the clock event in the framework.
298 */
299 static void __devinit setup_APIC_timer(void)
300 {
301 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
302
303 memcpy(levt, &lapic_clockevent, sizeof(*levt));
304 levt->cpumask = cpumask_of_cpu(smp_processor_id());
305
306 clockevents_register_device(levt);
307 }
308
309 /*
310 * In this functions we calibrate APIC bus clocks to the external timer.
311 *
312 * We want to do the calibration only once since we want to have local timer
313 * irqs syncron. CPUs connected by the same APIC bus have the very same bus
314 * frequency.
315 *
316 * This was previously done by reading the PIT/HPET and waiting for a wrap
317 * around to find out, that a tick has elapsed. I have a box, where the PIT
318 * readout is broken, so it never gets out of the wait loop again. This was
319 * also reported by others.
320 *
321 * Monitoring the jiffies value is inaccurate and the clockevents
322 * infrastructure allows us to do a simple substitution of the interrupt
323 * handler.
324 *
325 * The calibration routine also uses the pm_timer when possible, as the PIT
326 * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes
327 * back to normal later in the boot process).
328 */
329
330 #define LAPIC_CAL_LOOPS (HZ/10)
331
332 static __initdata int lapic_cal_loops = -1;
333 static __initdata long lapic_cal_t1, lapic_cal_t2;
334 static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
335 static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
336 static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
337
338 /*
339 * Temporary interrupt handler.
340 */
341 static void __init lapic_cal_handler(struct clock_event_device *dev)
342 {
343 unsigned long long tsc = 0;
344 long tapic = apic_read(APIC_TMCCT);
345 unsigned long pm = acpi_pm_read_early();
346
347 if (cpu_has_tsc)
348 rdtscll(tsc);
349
350 switch (lapic_cal_loops++) {
351 case 0:
352 lapic_cal_t1 = tapic;
353 lapic_cal_tsc1 = tsc;
354 lapic_cal_pm1 = pm;
355 lapic_cal_j1 = jiffies;
356 break;
357
358 case LAPIC_CAL_LOOPS:
359 lapic_cal_t2 = tapic;
360 lapic_cal_tsc2 = tsc;
361 if (pm < lapic_cal_pm1)
362 pm += ACPI_PM_OVRRUN;
363 lapic_cal_pm2 = pm;
364 lapic_cal_j2 = jiffies;
365 break;
366 }
367 }
368
369 /*
370 * Setup the boot APIC
371 *
372 * Calibrate and verify the result.
373 */
374 void __init setup_boot_APIC_clock(void)
375 {
376 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
377 const long pm_100ms = PMTMR_TICKS_PER_SEC/10;
378 const long pm_thresh = pm_100ms/100;
379 void (*real_handler)(struct clock_event_device *dev);
380 unsigned long deltaj;
381 long delta, deltapm;
382 int pm_referenced = 0;
383
384 /*
385 * The local apic timer can be disabled via the kernel
386 * commandline or from the CPU detection code. Register the lapic
387 * timer as a dummy clock event source on SMP systems, so the
388 * broadcast mechanism is used. On UP systems simply ignore it.
389 */
390 if (local_apic_timer_disabled) {
391 /* No broadcast on UP ! */
392 if (num_possible_cpus() > 1) {
393 lapic_clockevent.mult = 1;
394 setup_APIC_timer();
395 }
396 return;
397 }
398
399 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
400 "calibrating APIC timer ...\n");
401
402 local_irq_disable();
403
404 /* Replace the global interrupt handler */
405 real_handler = global_clock_event->event_handler;
406 global_clock_event->event_handler = lapic_cal_handler;
407
408 /*
409 * Setup the APIC counter to 1e9. There is no way the lapic
410 * can underflow in the 100ms detection time frame
411 */
412 __setup_APIC_LVTT(1000000000, 0, 0);
413
414 /* Let the interrupts run */
415 local_irq_enable();
416
417 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
418 cpu_relax();
419
420 local_irq_disable();
421
422 /* Restore the real event handler */
423 global_clock_event->event_handler = real_handler;
424
425 /* Build delta t1-t2 as apic timer counts down */
426 delta = lapic_cal_t1 - lapic_cal_t2;
427 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
428
429 /* Check, if the PM timer is available */
430 deltapm = lapic_cal_pm2 - lapic_cal_pm1;
431 apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm);
432
433 if (deltapm) {
434 unsigned long mult;
435 u64 res;
436
437 mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
438
439 if (deltapm > (pm_100ms - pm_thresh) &&
440 deltapm < (pm_100ms + pm_thresh)) {
441 apic_printk(APIC_VERBOSE, "... PM timer result ok\n");
442 } else {
443 res = (((u64) deltapm) * mult) >> 22;
444 do_div(res, 1000000);
445 printk(KERN_WARNING "APIC calibration not consistent "
446 "with PM Timer: %ldms instead of 100ms\n",
447 (long)res);
448 /* Correct the lapic counter value */
449 res = (((u64) delta) * pm_100ms);
450 do_div(res, deltapm);
451 printk(KERN_INFO "APIC delta adjusted to PM-Timer: "
452 "%lu (%ld)\n", (unsigned long) res, delta);
453 delta = (long) res;
454 }
455 pm_referenced = 1;
456 }
457
458 /* Calculate the scaled math multiplication factor */
459 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
460 lapic_clockevent.shift);
461 lapic_clockevent.max_delta_ns =
462 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
463 lapic_clockevent.min_delta_ns =
464 clockevent_delta2ns(0xF, &lapic_clockevent);
465
466 calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
467
468 apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
469 apic_printk(APIC_VERBOSE, "..... mult: %ld\n", lapic_clockevent.mult);
470 apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
471 calibration_result);
472
473 if (cpu_has_tsc) {
474 delta = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
475 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
476 "%ld.%04ld MHz.\n",
477 (delta / LAPIC_CAL_LOOPS) / (1000000 / HZ),
478 (delta / LAPIC_CAL_LOOPS) % (1000000 / HZ));
479 }
480
481 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
482 "%u.%04u MHz.\n",
483 calibration_result / (1000000 / HZ),
484 calibration_result % (1000000 / HZ));
485
486 local_apic_timer_verify_ok = 1;
487
488 /*
489 * Do a sanity check on the APIC calibration result
490 */
491 if (calibration_result < (1000000 / HZ)) {
492 local_irq_enable();
493 printk(KERN_WARNING
494 "APIC frequency too slow, disabling apic timer\n");
495 /* No broadcast on UP ! */
496 if (num_possible_cpus() > 1)
497 setup_APIC_timer();
498 return;
499 }
500
501 /* We trust the pm timer based calibration */
502 if (!pm_referenced) {
503 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
504
505 /*
506 * Setup the apic timer manually
507 */
508 levt->event_handler = lapic_cal_handler;
509 lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt);
510 lapic_cal_loops = -1;
511
512 /* Let the interrupts run */
513 local_irq_enable();
514
515 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
516 cpu_relax();
517
518 local_irq_disable();
519
520 /* Stop the lapic timer */
521 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt);
522
523 local_irq_enable();
524
525 /* Jiffies delta */
526 deltaj = lapic_cal_j2 - lapic_cal_j1;
527 apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
528
529 /* Check, if the jiffies result is consistent */
530 if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
531 apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
532 else
533 local_apic_timer_verify_ok = 0;
534 } else
535 local_irq_enable();
536
537 if (!local_apic_timer_verify_ok) {
538 printk(KERN_WARNING
539 "APIC timer disabled due to verification failure.\n");
540 /* No broadcast on UP ! */
541 if (num_possible_cpus() == 1)
542 return;
543 } else {
544 /*
545 * If nmi_watchdog is set to IO_APIC, we need the
546 * PIT/HPET going. Otherwise register lapic as a dummy
547 * device.
548 */
549 if (nmi_watchdog != NMI_IO_APIC)
550 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
551 else
552 printk(KERN_WARNING "APIC timer registered as dummy,"
553 " due to nmi_watchdog=1!\n");
554 }
555
556 /* Setup the lapic or request the broadcast */
557 setup_APIC_timer();
558 }
559
560 void __devinit setup_secondary_APIC_clock(void)
561 {
562 setup_APIC_timer();
563 }
564
565 /*
566 * The guts of the apic timer interrupt
567 */
568 static void local_apic_timer_interrupt(void)
569 {
570 int cpu = smp_processor_id();
571 struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
572
573 /*
574 * Normally we should not be here till LAPIC has been initialized but
575 * in some cases like kdump, its possible that there is a pending LAPIC
576 * timer interrupt from previous kernel's context and is delivered in
577 * new kernel the moment interrupts are enabled.
578 *
579 * Interrupts are enabled early and LAPIC is setup much later, hence
580 * its possible that when we get here evt->event_handler is NULL.
581 * Check for event_handler being NULL and discard the interrupt as
582 * spurious.
583 */
584 if (!evt->event_handler) {
585 printk(KERN_WARNING
586 "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
587 /* Switch it off */
588 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
589 return;
590 }
591
592 /*
593 * the NMI deadlock-detector uses this.
594 */
595 per_cpu(irq_stat, cpu).apic_timer_irqs++;
596
597 evt->event_handler(evt);
598 }
599
600 /*
601 * Local APIC timer interrupt. This is the most natural way for doing
602 * local interrupts, but local timer interrupts can be emulated by
603 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
604 *
605 * [ if a single-CPU system runs an SMP kernel then we call the local
606 * interrupt as well. Thus we cannot inline the local irq ... ]
607 */
608 void smp_apic_timer_interrupt(struct pt_regs *regs)
609 {
610 struct pt_regs *old_regs = set_irq_regs(regs);
611
612 /*
613 * NOTE! We'd better ACK the irq immediately,
614 * because timer handling can be slow.
615 */
616 ack_APIC_irq();
617 /*
618 * update_process_times() expects us to have done irq_enter().
619 * Besides, if we don't timer interrupts ignore the global
620 * interrupt lock, which is the WrongThing (tm) to do.
621 */
622 irq_enter();
623 local_apic_timer_interrupt();
624 irq_exit();
625
626 set_irq_regs(old_regs);
627 }
628
629 int setup_profiling_timer(unsigned int multiplier)
630 {
631 return -EINVAL;
632 }
633
634 /*
635 * Setup extended LVT, AMD specific (K8, family 10h)
636 *
637 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
638 * MCE interrupts are supported. Thus MCE offset must be set to 0.
639 */
640
641 #define APIC_EILVT_LVTOFF_MCE 0
642 #define APIC_EILVT_LVTOFF_IBS 1
643
644 static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
645 {
646 unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
647 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
648 apic_write(reg, v);
649 }
650
651 u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
652 {
653 setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
654 return APIC_EILVT_LVTOFF_MCE;
655 }
656
657 u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
658 {
659 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
660 return APIC_EILVT_LVTOFF_IBS;
661 }
662
663 /*
664 * Local APIC start and shutdown
665 */
666
667 /**
668 * clear_local_APIC - shutdown the local APIC
669 *
670 * This is called, when a CPU is disabled and before rebooting, so the state of
671 * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
672 * leftovers during boot.
673 */
674 void clear_local_APIC(void)
675 {
676 int maxlvt;
677 u32 v;
678
679 /* APIC hasn't been mapped yet */
680 if (!apic_phys)
681 return;
682
683 maxlvt = lapic_get_maxlvt();
684 /*
685 * Masking an LVT entry can trigger a local APIC error
686 * if the vector is zero. Mask LVTERR first to prevent this.
687 */
688 if (maxlvt >= 3) {
689 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
690 apic_write_around(APIC_LVTERR, v | APIC_LVT_MASKED);
691 }
692 /*
693 * Careful: we have to set masks only first to deassert
694 * any level-triggered sources.
695 */
696 v = apic_read(APIC_LVTT);
697 apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
698 v = apic_read(APIC_LVT0);
699 apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
700 v = apic_read(APIC_LVT1);
701 apic_write_around(APIC_LVT1, v | APIC_LVT_MASKED);
702 if (maxlvt >= 4) {
703 v = apic_read(APIC_LVTPC);
704 apic_write_around(APIC_LVTPC, v | APIC_LVT_MASKED);
705 }
706
707 /* lets not touch this if we didn't frob it */
708 #ifdef CONFIG_X86_MCE_P4THERMAL
709 if (maxlvt >= 5) {
710 v = apic_read(APIC_LVTTHMR);
711 apic_write_around(APIC_LVTTHMR, v | APIC_LVT_MASKED);
712 }
713 #endif
714 /*
715 * Clean APIC state for other OSs:
716 */
717 apic_write_around(APIC_LVTT, APIC_LVT_MASKED);
718 apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
719 apic_write_around(APIC_LVT1, APIC_LVT_MASKED);
720 if (maxlvt >= 3)
721 apic_write_around(APIC_LVTERR, APIC_LVT_MASKED);
722 if (maxlvt >= 4)
723 apic_write_around(APIC_LVTPC, APIC_LVT_MASKED);
724
725 #ifdef CONFIG_X86_MCE_P4THERMAL
726 if (maxlvt >= 5)
727 apic_write_around(APIC_LVTTHMR, APIC_LVT_MASKED);
728 #endif
729 /* Integrated APIC (!82489DX) ? */
730 if (lapic_is_integrated()) {
731 if (maxlvt > 3)
732 /* Clear ESR due to Pentium errata 3AP and 11AP */
733 apic_write(APIC_ESR, 0);
734 apic_read(APIC_ESR);
735 }
736 }
737
738 /**
739 * disable_local_APIC - clear and disable the local APIC
740 */
741 void disable_local_APIC(void)
742 {
743 unsigned long value;
744
745 clear_local_APIC();
746
747 /*
748 * Disable APIC (implies clearing of registers
749 * for 82489DX!).
750 */
751 value = apic_read(APIC_SPIV);
752 value &= ~APIC_SPIV_APIC_ENABLED;
753 apic_write_around(APIC_SPIV, value);
754
755 /*
756 * When LAPIC was disabled by the BIOS and enabled by the kernel,
757 * restore the disabled state.
758 */
759 if (enabled_via_apicbase) {
760 unsigned int l, h;
761
762 rdmsr(MSR_IA32_APICBASE, l, h);
763 l &= ~MSR_IA32_APICBASE_ENABLE;
764 wrmsr(MSR_IA32_APICBASE, l, h);
765 }
766 }
767
768 /*
769 * If Linux enabled the LAPIC against the BIOS default disable it down before
770 * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and
771 * not power-off. Additionally clear all LVT entries before disable_local_APIC
772 * for the case where Linux didn't enable the LAPIC.
773 */
774 void lapic_shutdown(void)
775 {
776 unsigned long flags;
777
778 if (!cpu_has_apic)
779 return;
780
781 local_irq_save(flags);
782 clear_local_APIC();
783
784 if (enabled_via_apicbase)
785 disable_local_APIC();
786
787 local_irq_restore(flags);
788 }
789
790 /*
791 * This is to verify that we're looking at a real local APIC.
792 * Check these against your board if the CPUs aren't getting
793 * started for no apparent reason.
794 */
795 int __init verify_local_APIC(void)
796 {
797 unsigned int reg0, reg1;
798
799 /*
800 * The version register is read-only in a real APIC.
801 */
802 reg0 = apic_read(APIC_LVR);
803 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
804 apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
805 reg1 = apic_read(APIC_LVR);
806 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
807
808 /*
809 * The two version reads above should print the same
810 * numbers. If the second one is different, then we
811 * poke at a non-APIC.
812 */
813 if (reg1 != reg0)
814 return 0;
815
816 /*
817 * Check if the version looks reasonably.
818 */
819 reg1 = GET_APIC_VERSION(reg0);
820 if (reg1 == 0x00 || reg1 == 0xff)
821 return 0;
822 reg1 = lapic_get_maxlvt();
823 if (reg1 < 0x02 || reg1 == 0xff)
824 return 0;
825
826 /*
827 * The ID register is read/write in a real APIC.
828 */
829 reg0 = apic_read(APIC_ID);
830 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
831
832 /*
833 * The next two are just to see if we have sane values.
834 * They're only really relevant if we're in Virtual Wire
835 * compatibility mode, but most boxes are anymore.
836 */
837 reg0 = apic_read(APIC_LVT0);
838 apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
839 reg1 = apic_read(APIC_LVT1);
840 apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
841
842 return 1;
843 }
844
845 /**
846 * sync_Arb_IDs - synchronize APIC bus arbitration IDs
847 */
848 void __init sync_Arb_IDs(void)
849 {
850 /*
851 * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
852 * needed on AMD.
853 */
854 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
855 return;
856 /*
857 * Wait for idle.
858 */
859 apic_wait_icr_idle();
860
861 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
862 apic_write_around(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
863 | APIC_DM_INIT);
864 }
865
866 /*
867 * An initial setup of the virtual wire mode.
868 */
869 void __init init_bsp_APIC(void)
870 {
871 unsigned long value;
872
873 /*
874 * Don't do the setup now if we have a SMP BIOS as the
875 * through-I/O-APIC virtual wire mode might be active.
876 */
877 if (smp_found_config || !cpu_has_apic)
878 return;
879
880 /*
881 * Do not trust the local APIC being empty at bootup.
882 */
883 clear_local_APIC();
884
885 /*
886 * Enable APIC.
887 */
888 value = apic_read(APIC_SPIV);
889 value &= ~APIC_VECTOR_MASK;
890 value |= APIC_SPIV_APIC_ENABLED;
891
892 /* This bit is reserved on P4/Xeon and should be cleared */
893 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
894 (boot_cpu_data.x86 == 15))
895 value &= ~APIC_SPIV_FOCUS_DISABLED;
896 else
897 value |= APIC_SPIV_FOCUS_DISABLED;
898 value |= SPURIOUS_APIC_VECTOR;
899 apic_write_around(APIC_SPIV, value);
900
901 /*
902 * Set up the virtual wire mode.
903 */
904 apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
905 value = APIC_DM_NMI;
906 if (!lapic_is_integrated()) /* 82489DX */
907 value |= APIC_LVT_LEVEL_TRIGGER;
908 apic_write_around(APIC_LVT1, value);
909 }
910
911 static void __cpuinit lapic_setup_esr(void)
912 {
913 unsigned long oldvalue, value, maxlvt;
914 if (lapic_is_integrated() && !esr_disable) {
915 /* !82489DX */
916 maxlvt = lapic_get_maxlvt();
917 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
918 apic_write(APIC_ESR, 0);
919 oldvalue = apic_read(APIC_ESR);
920
921 /* enables sending errors */
922 value = ERROR_APIC_VECTOR;
923 apic_write_around(APIC_LVTERR, value);
924 /*
925 * spec says clear errors after enabling vector.
926 */
927 if (maxlvt > 3)
928 apic_write(APIC_ESR, 0);
929 value = apic_read(APIC_ESR);
930 if (value != oldvalue)
931 apic_printk(APIC_VERBOSE, "ESR value before enabling "
932 "vector: 0x%08lx after: 0x%08lx\n",
933 oldvalue, value);
934 } else {
935 if (esr_disable)
936 /*
937 * Something untraceable is creating bad interrupts on
938 * secondary quads ... for the moment, just leave the
939 * ESR disabled - we can't do anything useful with the
940 * errors anyway - mbligh
941 */
942 printk(KERN_INFO "Leaving ESR disabled.\n");
943 else
944 printk(KERN_INFO "No ESR for 82489DX.\n");
945 }
946 }
947
948
949 /**
950 * setup_local_APIC - setup the local APIC
951 */
952 void __cpuinit setup_local_APIC(void)
953 {
954 unsigned long value, integrated;
955 int i, j;
956
957 /* Pound the ESR really hard over the head with a big hammer - mbligh */
958 if (esr_disable) {
959 apic_write(APIC_ESR, 0);
960 apic_write(APIC_ESR, 0);
961 apic_write(APIC_ESR, 0);
962 apic_write(APIC_ESR, 0);
963 }
964
965 integrated = lapic_is_integrated();
966
967 /*
968 * Double-check whether this APIC is really registered.
969 */
970 if (!apic_id_registered())
971 BUG();
972
973 /*
974 * Intel recommends to set DFR, LDR and TPR before enabling
975 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
976 * document number 292116). So here it goes...
977 */
978 init_apic_ldr();
979
980 /*
981 * Set Task Priority to 'accept all'. We never change this
982 * later on.
983 */
984 value = apic_read(APIC_TASKPRI);
985 value &= ~APIC_TPRI_MASK;
986 apic_write_around(APIC_TASKPRI, value);
987
988 /*
989 * After a crash, we no longer service the interrupts and a pending
990 * interrupt from previous kernel might still have ISR bit set.
991 *
992 * Most probably by now CPU has serviced that pending interrupt and
993 * it might not have done the ack_APIC_irq() because it thought,
994 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
995 * does not clear the ISR bit and cpu thinks it has already serivced
996 * the interrupt. Hence a vector might get locked. It was noticed
997 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
998 */
999 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
1000 value = apic_read(APIC_ISR + i*0x10);
1001 for (j = 31; j >= 0; j--) {
1002 if (value & (1<<j))
1003 ack_APIC_irq();
1004 }
1005 }
1006
1007 /*
1008 * Now that we are all set up, enable the APIC
1009 */
1010 value = apic_read(APIC_SPIV);
1011 value &= ~APIC_VECTOR_MASK;
1012 /*
1013 * Enable APIC
1014 */
1015 value |= APIC_SPIV_APIC_ENABLED;
1016
1017 /*
1018 * Some unknown Intel IO/APIC (or APIC) errata is biting us with
1019 * certain networking cards. If high frequency interrupts are
1020 * happening on a particular IOAPIC pin, plus the IOAPIC routing
1021 * entry is masked/unmasked at a high rate as well then sooner or
1022 * later IOAPIC line gets 'stuck', no more interrupts are received
1023 * from the device. If focus CPU is disabled then the hang goes
1024 * away, oh well :-(
1025 *
1026 * [ This bug can be reproduced easily with a level-triggered
1027 * PCI Ne2000 networking cards and PII/PIII processors, dual
1028 * BX chipset. ]
1029 */
1030 /*
1031 * Actually disabling the focus CPU check just makes the hang less
1032 * frequent as it makes the interrupt distributon model be more
1033 * like LRU than MRU (the short-term load is more even across CPUs).
1034 * See also the comment in end_level_ioapic_irq(). --macro
1035 */
1036
1037 /* Enable focus processor (bit==0) */
1038 value &= ~APIC_SPIV_FOCUS_DISABLED;
1039
1040 /*
1041 * Set spurious IRQ vector
1042 */
1043 value |= SPURIOUS_APIC_VECTOR;
1044 apic_write_around(APIC_SPIV, value);
1045
1046 /*
1047 * Set up LVT0, LVT1:
1048 *
1049 * set up through-local-APIC on the BP's LINT0. This is not
1050 * strictly necessary in pure symmetric-IO mode, but sometimes
1051 * we delegate interrupts to the 8259A.
1052 */
1053 /*
1054 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
1055 */
1056 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
1057 if (!smp_processor_id() && (pic_mode || !value)) {
1058 value = APIC_DM_EXTINT;
1059 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
1060 smp_processor_id());
1061 } else {
1062 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
1063 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
1064 smp_processor_id());
1065 }
1066 apic_write_around(APIC_LVT0, value);
1067
1068 /*
1069 * only the BP should see the LINT1 NMI signal, obviously.
1070 */
1071 if (!smp_processor_id())
1072 value = APIC_DM_NMI;
1073 else
1074 value = APIC_DM_NMI | APIC_LVT_MASKED;
1075 if (!integrated) /* 82489DX */
1076 value |= APIC_LVT_LEVEL_TRIGGER;
1077 apic_write_around(APIC_LVT1, value);
1078 }
1079
1080 void __cpuinit end_local_APIC_setup(void)
1081 {
1082 unsigned long value;
1083
1084 lapic_setup_esr();
1085 /* Disable the local apic timer */
1086 value = apic_read(APIC_LVTT);
1087 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1088 apic_write_around(APIC_LVTT, value);
1089
1090 setup_apic_nmi_watchdog(NULL);
1091 apic_pm_activate();
1092 }
1093
1094 /*
1095 * Detect and initialize APIC
1096 */
1097 static int __init detect_init_APIC(void)
1098 {
1099 u32 h, l, features;
1100
1101 /* Disabled by kernel option? */
1102 if (enable_local_apic < 0)
1103 return -1;
1104
1105 switch (boot_cpu_data.x86_vendor) {
1106 case X86_VENDOR_AMD:
1107 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
1108 (boot_cpu_data.x86 == 15))
1109 break;
1110 goto no_apic;
1111 case X86_VENDOR_INTEL:
1112 if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
1113 (boot_cpu_data.x86 == 5 && cpu_has_apic))
1114 break;
1115 goto no_apic;
1116 default:
1117 goto no_apic;
1118 }
1119
1120 if (!cpu_has_apic) {
1121 /*
1122 * Over-ride BIOS and try to enable the local APIC only if
1123 * "lapic" specified.
1124 */
1125 if (enable_local_apic <= 0) {
1126 printk(KERN_INFO "Local APIC disabled by BIOS -- "
1127 "you can enable it with \"lapic\"\n");
1128 return -1;
1129 }
1130 /*
1131 * Some BIOSes disable the local APIC in the APIC_BASE
1132 * MSR. This can only be done in software for Intel P6 or later
1133 * and AMD K7 (Model > 1) or later.
1134 */
1135 rdmsr(MSR_IA32_APICBASE, l, h);
1136 if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1137 printk(KERN_INFO
1138 "Local APIC disabled by BIOS -- reenabling.\n");
1139 l &= ~MSR_IA32_APICBASE_BASE;
1140 l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
1141 wrmsr(MSR_IA32_APICBASE, l, h);
1142 enabled_via_apicbase = 1;
1143 }
1144 }
1145 /*
1146 * The APIC feature bit should now be enabled
1147 * in `cpuid'
1148 */
1149 features = cpuid_edx(1);
1150 if (!(features & (1 << X86_FEATURE_APIC))) {
1151 printk(KERN_WARNING "Could not enable APIC!\n");
1152 return -1;
1153 }
1154 set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1155 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1156
1157 /* The BIOS may have set up the APIC at some other address */
1158 rdmsr(MSR_IA32_APICBASE, l, h);
1159 if (l & MSR_IA32_APICBASE_ENABLE)
1160 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1161
1162 if (nmi_watchdog != NMI_NONE && nmi_watchdog != NMI_DISABLED)
1163 nmi_watchdog = NMI_LOCAL_APIC;
1164
1165 printk(KERN_INFO "Found and enabled local APIC!\n");
1166
1167 apic_pm_activate();
1168
1169 return 0;
1170
1171 no_apic:
1172 printk(KERN_INFO "No local APIC present or hardware disabled\n");
1173 return -1;
1174 }
1175
1176 /**
1177 * init_apic_mappings - initialize APIC mappings
1178 */
1179 void __init init_apic_mappings(void)
1180 {
1181 /*
1182 * If no local APIC can be found then set up a fake all
1183 * zeroes page to simulate the local APIC and another
1184 * one for the IO-APIC.
1185 */
1186 if (!smp_found_config && detect_init_APIC()) {
1187 apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
1188 apic_phys = __pa(apic_phys);
1189 } else
1190 apic_phys = mp_lapic_addr;
1191
1192 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
1193 printk(KERN_DEBUG "mapped APIC to %08lx (%08lx)\n", APIC_BASE,
1194 apic_phys);
1195
1196 /*
1197 * Fetch the APIC ID of the BSP in case we have a
1198 * default configuration (or the MP table is broken).
1199 */
1200 if (boot_cpu_physical_apicid == -1U)
1201 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
1202
1203 #ifdef CONFIG_X86_IO_APIC
1204 {
1205 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
1206 int i;
1207
1208 for (i = 0; i < nr_ioapics; i++) {
1209 if (smp_found_config) {
1210 ioapic_phys = mp_ioapics[i].mp_apicaddr;
1211 if (!ioapic_phys) {
1212 printk(KERN_ERR
1213 "WARNING: bogus zero IO-APIC "
1214 "address found in MPTABLE, "
1215 "disabling IO/APIC support!\n");
1216 smp_found_config = 0;
1217 skip_ioapic_setup = 1;
1218 goto fake_ioapic_page;
1219 }
1220 } else {
1221 fake_ioapic_page:
1222 ioapic_phys = (unsigned long)
1223 alloc_bootmem_pages(PAGE_SIZE);
1224 ioapic_phys = __pa(ioapic_phys);
1225 }
1226 set_fixmap_nocache(idx, ioapic_phys);
1227 printk(KERN_DEBUG "mapped IOAPIC to %08lx (%08lx)\n",
1228 __fix_to_virt(idx), ioapic_phys);
1229 idx++;
1230 }
1231 }
1232 #endif
1233 }
1234
1235 /*
1236 * This initializes the IO-APIC and APIC hardware if this is
1237 * a UP kernel.
1238 */
1239
1240 int apic_version[MAX_APICS];
1241
1242 int __init APIC_init_uniprocessor(void)
1243 {
1244 if (enable_local_apic < 0)
1245 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1246
1247 if (!smp_found_config && !cpu_has_apic)
1248 return -1;
1249
1250 /*
1251 * Complain if the BIOS pretends there is one.
1252 */
1253 if (!cpu_has_apic &&
1254 APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1255 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1256 boot_cpu_physical_apicid);
1257 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1258 return -1;
1259 }
1260
1261 verify_local_APIC();
1262
1263 connect_bsp_APIC();
1264
1265 /*
1266 * Hack: In case of kdump, after a crash, kernel might be booting
1267 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
1268 * might be zero if read from MP tables. Get it from LAPIC.
1269 */
1270 #ifdef CONFIG_CRASH_DUMP
1271 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
1272 #endif
1273 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
1274
1275 setup_local_APIC();
1276
1277 end_local_APIC_setup();
1278 #ifdef CONFIG_X86_IO_APIC
1279 if (smp_found_config)
1280 if (!skip_ioapic_setup && nr_ioapics)
1281 setup_IO_APIC();
1282 #endif
1283 setup_boot_clock();
1284
1285 return 0;
1286 }
1287
1288 /*
1289 * Local APIC interrupts
1290 */
1291
1292 /*
1293 * This interrupt should _never_ happen with our APIC/SMP architecture
1294 */
1295 void smp_spurious_interrupt(struct pt_regs *regs)
1296 {
1297 unsigned long v;
1298
1299 irq_enter();
1300 /*
1301 * Check if this really is a spurious interrupt and ACK it
1302 * if it is a vectored one. Just in case...
1303 * Spurious interrupts should not be ACKed.
1304 */
1305 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1306 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1307 ack_APIC_irq();
1308
1309 /* see sw-dev-man vol 3, chapter 7.4.13.5 */
1310 printk(KERN_INFO "spurious APIC interrupt on CPU#%d, "
1311 "should never happen.\n", smp_processor_id());
1312 __get_cpu_var(irq_stat).irq_spurious_count++;
1313 irq_exit();
1314 }
1315
1316 /*
1317 * This interrupt should never happen with our APIC/SMP architecture
1318 */
1319 void smp_error_interrupt(struct pt_regs *regs)
1320 {
1321 unsigned long v, v1;
1322
1323 irq_enter();
1324 /* First tickle the hardware, only then report what went on. -- REW */
1325 v = apic_read(APIC_ESR);
1326 apic_write(APIC_ESR, 0);
1327 v1 = apic_read(APIC_ESR);
1328 ack_APIC_irq();
1329 atomic_inc(&irq_err_count);
1330
1331 /* Here is what the APIC error bits mean:
1332 0: Send CS error
1333 1: Receive CS error
1334 2: Send accept error
1335 3: Receive accept error
1336 4: Reserved
1337 5: Send illegal vector
1338 6: Received illegal vector
1339 7: Illegal register address
1340 */
1341 printk(KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n",
1342 smp_processor_id(), v , v1);
1343 irq_exit();
1344 }
1345
1346 #ifdef CONFIG_SMP
1347 void __init smp_intr_init(void)
1348 {
1349 /*
1350 * IRQ0 must be given a fixed assignment and initialized,
1351 * because it's used before the IO-APIC is set up.
1352 */
1353 set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
1354
1355 /*
1356 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1357 * IPI, driven by wakeup.
1358 */
1359 set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1360
1361 /* IPI for invalidation */
1362 set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1363
1364 /* IPI for generic function call */
1365 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
1366 }
1367 #endif
1368
1369 /*
1370 * Initialize APIC interrupts
1371 */
1372 void __init apic_intr_init(void)
1373 {
1374 #ifdef CONFIG_SMP
1375 smp_intr_init();
1376 #endif
1377 /* self generated IPI for local APIC timer */
1378 set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
1379
1380 /* IPI vectors for APIC spurious and error interrupts */
1381 set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
1382 set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
1383
1384 /* thermal monitor LVT interrupt */
1385 #ifdef CONFIG_X86_MCE_P4THERMAL
1386 set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
1387 #endif
1388 }
1389
1390 /**
1391 * connect_bsp_APIC - attach the APIC to the interrupt system
1392 */
1393 void __init connect_bsp_APIC(void)
1394 {
1395 if (pic_mode) {
1396 /*
1397 * Do not trust the local APIC being empty at bootup.
1398 */
1399 clear_local_APIC();
1400 /*
1401 * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's
1402 * local APIC to INT and NMI lines.
1403 */
1404 apic_printk(APIC_VERBOSE, "leaving PIC mode, "
1405 "enabling APIC mode.\n");
1406 outb(0x70, 0x22);
1407 outb(0x01, 0x23);
1408 }
1409 enable_apic_mode();
1410 }
1411
1412 /**
1413 * disconnect_bsp_APIC - detach the APIC from the interrupt system
1414 * @virt_wire_setup: indicates, whether virtual wire mode is selected
1415 *
1416 * Virtual wire mode is necessary to deliver legacy interrupts even when the
1417 * APIC is disabled.
1418 */
1419 void disconnect_bsp_APIC(int virt_wire_setup)
1420 {
1421 if (pic_mode) {
1422 /*
1423 * Put the board back into PIC mode (has an effect only on
1424 * certain older boards). Note that APIC interrupts, including
1425 * IPIs, won't work beyond this point! The only exception are
1426 * INIT IPIs.
1427 */
1428 apic_printk(APIC_VERBOSE, "disabling APIC mode, "
1429 "entering PIC mode.\n");
1430 outb(0x70, 0x22);
1431 outb(0x00, 0x23);
1432 } else {
1433 /* Go back to Virtual Wire compatibility mode */
1434 unsigned long value;
1435
1436 /* For the spurious interrupt use vector F, and enable it */
1437 value = apic_read(APIC_SPIV);
1438 value &= ~APIC_VECTOR_MASK;
1439 value |= APIC_SPIV_APIC_ENABLED;
1440 value |= 0xf;
1441 apic_write_around(APIC_SPIV, value);
1442
1443 if (!virt_wire_setup) {
1444 /*
1445 * For LVT0 make it edge triggered, active high,
1446 * external and enabled
1447 */
1448 value = apic_read(APIC_LVT0);
1449 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1450 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1451 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1452 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1453 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1454 apic_write_around(APIC_LVT0, value);
1455 } else {
1456 /* Disable LVT0 */
1457 apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
1458 }
1459
1460 /*
1461 * For LVT1 make it edge triggered, active high, nmi and
1462 * enabled
1463 */
1464 value = apic_read(APIC_LVT1);
1465 value &= ~(
1466 APIC_MODE_MASK | APIC_SEND_PENDING |
1467 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1468 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1469 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1470 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
1471 apic_write_around(APIC_LVT1, value);
1472 }
1473 }
1474
1475 unsigned int __cpuinitdata maxcpus = NR_CPUS;
1476
1477 void __cpuinit generic_processor_info(int apicid, int version)
1478 {
1479 int cpu;
1480 cpumask_t tmp_map;
1481 physid_mask_t phys_cpu;
1482
1483 /*
1484 * Validate version
1485 */
1486 if (version == 0x0) {
1487 printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
1488 "fixing up to 0x10. (tell your hw vendor)\n",
1489 version);
1490 version = 0x10;
1491 }
1492 apic_version[apicid] = version;
1493
1494 phys_cpu = apicid_to_cpu_present(apicid);
1495 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
1496
1497 if (num_processors >= NR_CPUS) {
1498 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
1499 " Processor ignored.\n", NR_CPUS);
1500 return;
1501 }
1502
1503 if (num_processors >= maxcpus) {
1504 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
1505 " Processor ignored.\n", maxcpus);
1506 return;
1507 }
1508
1509 num_processors++;
1510 cpus_complement(tmp_map, cpu_present_map);
1511 cpu = first_cpu(tmp_map);
1512
1513 if (apicid == boot_cpu_physical_apicid)
1514 /*
1515 * x86_bios_cpu_apicid is required to have processors listed
1516 * in same order as logical cpu numbers. Hence the first
1517 * entry is BSP, and so on.
1518 */
1519 cpu = 0;
1520
1521 /*
1522 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
1523 * but we need to work other dependencies like SMP_SUSPEND etc
1524 * before this can be done without some confusion.
1525 * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
1526 * - Ashok Raj <ashok.raj@intel.com>
1527 */
1528 if (num_processors > 8) {
1529 switch (boot_cpu_data.x86_vendor) {
1530 case X86_VENDOR_INTEL:
1531 if (!APIC_XAPIC(version)) {
1532 def_to_bigsmp = 0;
1533 break;
1534 }
1535 /* If P4 and above fall through */
1536 case X86_VENDOR_AMD:
1537 def_to_bigsmp = 1;
1538 }
1539 }
1540 #ifdef CONFIG_SMP
1541 /* are we being called early in kernel startup? */
1542 if (x86_cpu_to_apicid_early_ptr) {
1543 u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
1544 u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
1545
1546 cpu_to_apicid[cpu] = apicid;
1547 bios_cpu_apicid[cpu] = apicid;
1548 } else {
1549 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1550 per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1551 }
1552 #endif
1553 cpu_set(cpu, cpu_possible_map);
1554 cpu_set(cpu, cpu_present_map);
1555 }
1556
1557 /*
1558 * Power management
1559 */
1560 #ifdef CONFIG_PM
1561
1562 static struct {
1563 int active;
1564 /* r/w apic fields */
1565 unsigned int apic_id;
1566 unsigned int apic_taskpri;
1567 unsigned int apic_ldr;
1568 unsigned int apic_dfr;
1569 unsigned int apic_spiv;
1570 unsigned int apic_lvtt;
1571 unsigned int apic_lvtpc;
1572 unsigned int apic_lvt0;
1573 unsigned int apic_lvt1;
1574 unsigned int apic_lvterr;
1575 unsigned int apic_tmict;
1576 unsigned int apic_tdcr;
1577 unsigned int apic_thmr;
1578 } apic_pm_state;
1579
1580 static int lapic_suspend(struct sys_device *dev, pm_message_t state)
1581 {
1582 unsigned long flags;
1583 int maxlvt;
1584
1585 if (!apic_pm_state.active)
1586 return 0;
1587
1588 maxlvt = lapic_get_maxlvt();
1589
1590 apic_pm_state.apic_id = apic_read(APIC_ID);
1591 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
1592 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
1593 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
1594 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
1595 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
1596 if (maxlvt >= 4)
1597 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
1598 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
1599 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
1600 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
1601 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
1602 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
1603 #ifdef CONFIG_X86_MCE_P4THERMAL
1604 if (maxlvt >= 5)
1605 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
1606 #endif
1607
1608 local_irq_save(flags);
1609 disable_local_APIC();
1610 local_irq_restore(flags);
1611 return 0;
1612 }
1613
1614 static int lapic_resume(struct sys_device *dev)
1615 {
1616 unsigned int l, h;
1617 unsigned long flags;
1618 int maxlvt;
1619
1620 if (!apic_pm_state.active)
1621 return 0;
1622
1623 maxlvt = lapic_get_maxlvt();
1624
1625 local_irq_save(flags);
1626
1627 /*
1628 * Make sure the APICBASE points to the right address
1629 *
1630 * FIXME! This will be wrong if we ever support suspend on
1631 * SMP! We'll need to do this as part of the CPU restore!
1632 */
1633 rdmsr(MSR_IA32_APICBASE, l, h);
1634 l &= ~MSR_IA32_APICBASE_BASE;
1635 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
1636 wrmsr(MSR_IA32_APICBASE, l, h);
1637
1638 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
1639 apic_write(APIC_ID, apic_pm_state.apic_id);
1640 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
1641 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
1642 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
1643 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
1644 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
1645 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
1646 #ifdef CONFIG_X86_MCE_P4THERMAL
1647 if (maxlvt >= 5)
1648 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
1649 #endif
1650 if (maxlvt >= 4)
1651 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
1652 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
1653 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
1654 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
1655 apic_write(APIC_ESR, 0);
1656 apic_read(APIC_ESR);
1657 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
1658 apic_write(APIC_ESR, 0);
1659 apic_read(APIC_ESR);
1660 local_irq_restore(flags);
1661 return 0;
1662 }
1663
1664 /*
1665 * This device has no shutdown method - fully functioning local APICs
1666 * are needed on every CPU up until machine_halt/restart/poweroff.
1667 */
1668
1669 static struct sysdev_class lapic_sysclass = {
1670 .name = "lapic",
1671 .resume = lapic_resume,
1672 .suspend = lapic_suspend,
1673 };
1674
1675 static struct sys_device device_lapic = {
1676 .id = 0,
1677 .cls = &lapic_sysclass,
1678 };
1679
1680 static void __devinit apic_pm_activate(void)
1681 {
1682 apic_pm_state.active = 1;
1683 }
1684
1685 static int __init init_lapic_sysfs(void)
1686 {
1687 int error;
1688
1689 if (!cpu_has_apic)
1690 return 0;
1691 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
1692
1693 error = sysdev_class_register(&lapic_sysclass);
1694 if (!error)
1695 error = sysdev_register(&device_lapic);
1696 return error;
1697 }
1698 device_initcall(init_lapic_sysfs);
1699
1700 #else /* CONFIG_PM */
1701
1702 static void apic_pm_activate(void) { }
1703
1704 #endif /* CONFIG_PM */
1705
1706 /*
1707 * APIC command line parameters
1708 */
1709 static int __init parse_lapic(char *arg)
1710 {
1711 enable_local_apic = 1;
1712 return 0;
1713 }
1714 early_param("lapic", parse_lapic);
1715
1716 static int __init parse_nolapic(char *arg)
1717 {
1718 enable_local_apic = -1;
1719 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1720 return 0;
1721 }
1722 early_param("nolapic", parse_nolapic);
1723
1724 static int __init parse_disable_lapic_timer(char *arg)
1725 {
1726 local_apic_timer_disabled = 1;
1727 return 0;
1728 }
1729 early_param("nolapic_timer", parse_disable_lapic_timer);
1730
1731 static int __init parse_lapic_timer_c2_ok(char *arg)
1732 {
1733 local_apic_timer_c2_ok = 1;
1734 return 0;
1735 }
1736 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1737
1738 static int __init apic_set_verbosity(char *str)
1739 {
1740 if (strcmp("debug", str) == 0)
1741 apic_verbosity = APIC_DEBUG;
1742 else if (strcmp("verbose", str) == 0)
1743 apic_verbosity = APIC_VERBOSE;
1744 return 1;
1745 }
1746 __setup("apic=", apic_set_verbosity);
1747
This page took 0.066103 seconds and 6 git commands to generate.