Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * x86 SMP booting functions | |
3 | * | |
4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | |
5 | * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * Much of the core SMP work is based on previous work by Thomas Radke, to | |
8 | * whom a great many thanks are extended. | |
9 | * | |
10 | * Thanks to Intel for making available several different Pentium, | |
11 | * Pentium Pro and Pentium-II/Xeon MP machines. | |
12 | * Original development of Linux SMP code supported by Caldera. | |
13 | * | |
14 | * This code is released under the GNU General Public License version 2 or | |
15 | * later. | |
16 | * | |
17 | * Fixes | |
18 | * Felix Koop : NR_CPUS used properly | |
19 | * Jose Renau : Handle single CPU case. | |
20 | * Alan Cox : By repeated request 8) - Total BogoMIPS report. | |
21 | * Greg Wright : Fix for kernel stacks panic. | |
22 | * Erich Boleyn : MP v1.4 and additional changes. | |
23 | * Matthias Sattler : Changes for 2.1 kernel map. | |
24 | * Michel Lespinasse : Changes for 2.1 kernel map. | |
25 | * Michael Chastain : Change trampoline.S to gnu as. | |
26 | * Alan Cox : Dumb bug: 'B' step PPro's are fine | |
27 | * Ingo Molnar : Added APIC timers, based on code | |
28 | * from Jose Renau | |
29 | * Ingo Molnar : various cleanups and rewrites | |
30 | * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. | |
31 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs | |
32 | * Martin J. Bligh : Added support for multi-quad systems | |
33 | * Dave Jones : Report invalid combinations of Athlon CPUs. | |
34 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. */ | |
35 | ||
36 | #include <linux/module.h> | |
37 | #include <linux/config.h> | |
38 | #include <linux/init.h> | |
39 | #include <linux/kernel.h> | |
40 | ||
41 | #include <linux/mm.h> | |
42 | #include <linux/sched.h> | |
43 | #include <linux/kernel_stat.h> | |
44 | #include <linux/smp_lock.h> | |
1da177e4 | 45 | #include <linux/bootmem.h> |
f3705136 ZM |
46 | #include <linux/notifier.h> |
47 | #include <linux/cpu.h> | |
48 | #include <linux/percpu.h> | |
1da177e4 LT |
49 | |
50 | #include <linux/delay.h> | |
51 | #include <linux/mc146818rtc.h> | |
52 | #include <asm/tlbflush.h> | |
53 | #include <asm/desc.h> | |
54 | #include <asm/arch_hooks.h> | |
55 | ||
56 | #include <mach_apic.h> | |
57 | #include <mach_wakecpu.h> | |
58 | #include <smpboot_hooks.h> | |
59 | ||
60 | /* Set if we find a B stepping CPU */ | |
0bb3184d | 61 | static int __devinitdata smp_b_stepping; |
1da177e4 LT |
62 | |
63 | /* Number of siblings per CPU package */ | |
64 | int smp_num_siblings = 1; | |
129f6946 AD |
65 | #ifdef CONFIG_X86_HT |
66 | EXPORT_SYMBOL(smp_num_siblings); | |
67 | #endif | |
d720803a LS |
68 | |
69 | /* Package ID of each logical CPU */ | |
6c036527 | 70 | int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; |
1da177e4 | 71 | EXPORT_SYMBOL(phys_proc_id); |
d720803a LS |
72 | |
73 | /* Core ID of each logical CPU */ | |
6c036527 | 74 | int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; |
3dd9d514 | 75 | EXPORT_SYMBOL(cpu_core_id); |
1da177e4 | 76 | |
6c036527 | 77 | cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; |
d720803a LS |
78 | EXPORT_SYMBOL(cpu_sibling_map); |
79 | ||
6c036527 | 80 | cpumask_t cpu_core_map[NR_CPUS] __read_mostly; |
d720803a LS |
81 | EXPORT_SYMBOL(cpu_core_map); |
82 | ||
1da177e4 | 83 | /* bitmap of online cpus */ |
6c036527 | 84 | cpumask_t cpu_online_map __read_mostly; |
129f6946 | 85 | EXPORT_SYMBOL(cpu_online_map); |
1da177e4 LT |
86 | |
87 | cpumask_t cpu_callin_map; | |
88 | cpumask_t cpu_callout_map; | |
129f6946 | 89 | EXPORT_SYMBOL(cpu_callout_map); |
9f40a72a NP |
90 | #ifdef CONFIG_HOTPLUG_CPU |
91 | cpumask_t cpu_possible_map = CPU_MASK_ALL; | |
92 | #else | |
4ad8d383 | 93 | cpumask_t cpu_possible_map; |
9f40a72a | 94 | #endif |
4ad8d383 | 95 | EXPORT_SYMBOL(cpu_possible_map); |
1da177e4 LT |
96 | static cpumask_t smp_commenced_mask; |
97 | ||
e1367daf LS |
98 | /* TSC's upper 32 bits can't be written in eariler CPU (before prescott), there |
99 | * is no way to resync one AP against BP. TBD: for prescott and above, we | |
100 | * should use IA64's algorithm | |
101 | */ | |
102 | static int __devinitdata tsc_sync_disabled; | |
103 | ||
1da177e4 LT |
104 | /* Per CPU bogomips and other parameters */ |
105 | struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; | |
129f6946 | 106 | EXPORT_SYMBOL(cpu_data); |
1da177e4 | 107 | |
6c036527 | 108 | u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = |
1da177e4 LT |
109 | { [0 ... NR_CPUS-1] = 0xff }; |
110 | EXPORT_SYMBOL(x86_cpu_to_apicid); | |
111 | ||
112 | /* | |
113 | * Trampoline 80x86 program as an array. | |
114 | */ | |
115 | ||
116 | extern unsigned char trampoline_data []; | |
117 | extern unsigned char trampoline_end []; | |
118 | static unsigned char *trampoline_base; | |
119 | static int trampoline_exec; | |
120 | ||
121 | static void map_cpu_to_logical_apicid(void); | |
122 | ||
f3705136 ZM |
123 | /* State of each CPU. */ |
124 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |
125 | ||
1da177e4 LT |
126 | /* |
127 | * Currently trivial. Write the real->protected mode | |
128 | * bootstrap into the page concerned. The caller | |
129 | * has made sure it's suitably aligned. | |
130 | */ | |
131 | ||
0bb3184d | 132 | static unsigned long __devinit setup_trampoline(void) |
1da177e4 LT |
133 | { |
134 | memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); | |
135 | return virt_to_phys(trampoline_base); | |
136 | } | |
137 | ||
138 | /* | |
139 | * We are called very early to get the low memory for the | |
140 | * SMP bootup trampoline page. | |
141 | */ | |
142 | void __init smp_alloc_memory(void) | |
143 | { | |
144 | trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE); | |
145 | /* | |
146 | * Has to be in very low memory so we can execute | |
147 | * real-mode AP code. | |
148 | */ | |
149 | if (__pa(trampoline_base) >= 0x9F000) | |
150 | BUG(); | |
151 | /* | |
152 | * Make the SMP trampoline executable: | |
153 | */ | |
154 | trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1); | |
155 | } | |
156 | ||
157 | /* | |
158 | * The bootstrap kernel entry code has set these up. Save them for | |
159 | * a given CPU | |
160 | */ | |
161 | ||
0bb3184d | 162 | static void __devinit smp_store_cpu_info(int id) |
1da177e4 LT |
163 | { |
164 | struct cpuinfo_x86 *c = cpu_data + id; | |
165 | ||
166 | *c = boot_cpu_data; | |
167 | if (id!=0) | |
168 | identify_cpu(c); | |
169 | /* | |
170 | * Mask B, Pentium, but not Pentium MMX | |
171 | */ | |
172 | if (c->x86_vendor == X86_VENDOR_INTEL && | |
173 | c->x86 == 5 && | |
174 | c->x86_mask >= 1 && c->x86_mask <= 4 && | |
175 | c->x86_model <= 3) | |
176 | /* | |
177 | * Remember we have B step Pentia with bugs | |
178 | */ | |
179 | smp_b_stepping = 1; | |
180 | ||
181 | /* | |
182 | * Certain Athlons might work (for various values of 'work') in SMP | |
183 | * but they are not certified as MP capable. | |
184 | */ | |
185 | if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) { | |
186 | ||
187 | /* Athlon 660/661 is valid. */ | |
188 | if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1))) | |
189 | goto valid_k7; | |
190 | ||
191 | /* Duron 670 is valid */ | |
192 | if ((c->x86_model==7) && (c->x86_mask==0)) | |
193 | goto valid_k7; | |
194 | ||
195 | /* | |
196 | * Athlon 662, Duron 671, and Athlon >model 7 have capability bit. | |
197 | * It's worth noting that the A5 stepping (662) of some Athlon XP's | |
198 | * have the MP bit set. | |
199 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more. | |
200 | */ | |
201 | if (((c->x86_model==6) && (c->x86_mask>=2)) || | |
202 | ((c->x86_model==7) && (c->x86_mask>=1)) || | |
203 | (c->x86_model> 7)) | |
204 | if (cpu_has_mp) | |
205 | goto valid_k7; | |
206 | ||
207 | /* If we get here, it's not a certified SMP capable AMD system. */ | |
9f158333 | 208 | add_taint(TAINT_UNSAFE_SMP); |
1da177e4 LT |
209 | } |
210 | ||
211 | valid_k7: | |
212 | ; | |
213 | } | |
214 | ||
215 | /* | |
216 | * TSC synchronization. | |
217 | * | |
218 | * We first check whether all CPUs have their TSC's synchronized, | |
219 | * then we print a warning if not, and always resync. | |
220 | */ | |
221 | ||
222 | static atomic_t tsc_start_flag = ATOMIC_INIT(0); | |
223 | static atomic_t tsc_count_start = ATOMIC_INIT(0); | |
224 | static atomic_t tsc_count_stop = ATOMIC_INIT(0); | |
225 | static unsigned long long tsc_values[NR_CPUS]; | |
226 | ||
227 | #define NR_LOOPS 5 | |
228 | ||
229 | static void __init synchronize_tsc_bp (void) | |
230 | { | |
231 | int i; | |
232 | unsigned long long t0; | |
233 | unsigned long long sum, avg; | |
234 | long long delta; | |
a3a255e7 | 235 | unsigned int one_usec; |
1da177e4 LT |
236 | int buggy = 0; |
237 | ||
238 | printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus()); | |
239 | ||
240 | /* convert from kcyc/sec to cyc/usec */ | |
241 | one_usec = cpu_khz / 1000; | |
242 | ||
243 | atomic_set(&tsc_start_flag, 1); | |
244 | wmb(); | |
245 | ||
246 | /* | |
247 | * We loop a few times to get a primed instruction cache, | |
248 | * then the last pass is more or less synchronized and | |
249 | * the BP and APs set their cycle counters to zero all at | |
250 | * once. This reduces the chance of having random offsets | |
251 | * between the processors, and guarantees that the maximum | |
252 | * delay between the cycle counters is never bigger than | |
253 | * the latency of information-passing (cachelines) between | |
254 | * two CPUs. | |
255 | */ | |
256 | for (i = 0; i < NR_LOOPS; i++) { | |
257 | /* | |
258 | * all APs synchronize but they loop on '== num_cpus' | |
259 | */ | |
260 | while (atomic_read(&tsc_count_start) != num_booting_cpus()-1) | |
261 | mb(); | |
262 | atomic_set(&tsc_count_stop, 0); | |
263 | wmb(); | |
264 | /* | |
265 | * this lets the APs save their current TSC: | |
266 | */ | |
267 | atomic_inc(&tsc_count_start); | |
268 | ||
269 | rdtscll(tsc_values[smp_processor_id()]); | |
270 | /* | |
271 | * We clear the TSC in the last loop: | |
272 | */ | |
273 | if (i == NR_LOOPS-1) | |
274 | write_tsc(0, 0); | |
275 | ||
276 | /* | |
277 | * Wait for all APs to leave the synchronization point: | |
278 | */ | |
279 | while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1) | |
280 | mb(); | |
281 | atomic_set(&tsc_count_start, 0); | |
282 | wmb(); | |
283 | atomic_inc(&tsc_count_stop); | |
284 | } | |
285 | ||
286 | sum = 0; | |
287 | for (i = 0; i < NR_CPUS; i++) { | |
288 | if (cpu_isset(i, cpu_callout_map)) { | |
289 | t0 = tsc_values[i]; | |
290 | sum += t0; | |
291 | } | |
292 | } | |
293 | avg = sum; | |
294 | do_div(avg, num_booting_cpus()); | |
295 | ||
296 | sum = 0; | |
297 | for (i = 0; i < NR_CPUS; i++) { | |
298 | if (!cpu_isset(i, cpu_callout_map)) | |
299 | continue; | |
300 | delta = tsc_values[i] - avg; | |
301 | if (delta < 0) | |
302 | delta = -delta; | |
303 | /* | |
304 | * We report bigger than 2 microseconds clock differences. | |
305 | */ | |
306 | if (delta > 2*one_usec) { | |
307 | long realdelta; | |
308 | if (!buggy) { | |
309 | buggy = 1; | |
310 | printk("\n"); | |
311 | } | |
312 | realdelta = delta; | |
313 | do_div(realdelta, one_usec); | |
314 | if (tsc_values[i] < avg) | |
315 | realdelta = -realdelta; | |
316 | ||
317 | printk(KERN_INFO "CPU#%d had %ld usecs TSC skew, fixed it up.\n", i, realdelta); | |
318 | } | |
319 | ||
320 | sum += delta; | |
321 | } | |
322 | if (!buggy) | |
323 | printk("passed.\n"); | |
324 | } | |
325 | ||
326 | static void __init synchronize_tsc_ap (void) | |
327 | { | |
328 | int i; | |
329 | ||
330 | /* | |
331 | * Not every cpu is online at the time | |
332 | * this gets called, so we first wait for the BP to | |
333 | * finish SMP initialization: | |
334 | */ | |
335 | while (!atomic_read(&tsc_start_flag)) mb(); | |
336 | ||
337 | for (i = 0; i < NR_LOOPS; i++) { | |
338 | atomic_inc(&tsc_count_start); | |
339 | while (atomic_read(&tsc_count_start) != num_booting_cpus()) | |
340 | mb(); | |
341 | ||
342 | rdtscll(tsc_values[smp_processor_id()]); | |
343 | if (i == NR_LOOPS-1) | |
344 | write_tsc(0, 0); | |
345 | ||
346 | atomic_inc(&tsc_count_stop); | |
347 | while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb(); | |
348 | } | |
349 | } | |
350 | #undef NR_LOOPS | |
351 | ||
352 | extern void calibrate_delay(void); | |
353 | ||
354 | static atomic_t init_deasserted; | |
355 | ||
0bb3184d | 356 | static void __devinit smp_callin(void) |
1da177e4 LT |
357 | { |
358 | int cpuid, phys_id; | |
359 | unsigned long timeout; | |
360 | ||
361 | /* | |
362 | * If waken up by an INIT in an 82489DX configuration | |
363 | * we may get here before an INIT-deassert IPI reaches | |
364 | * our local APIC. We have to wait for the IPI or we'll | |
365 | * lock up on an APIC access. | |
366 | */ | |
367 | wait_for_init_deassert(&init_deasserted); | |
368 | ||
369 | /* | |
370 | * (This works even if the APIC is not enabled.) | |
371 | */ | |
372 | phys_id = GET_APIC_ID(apic_read(APIC_ID)); | |
373 | cpuid = smp_processor_id(); | |
374 | if (cpu_isset(cpuid, cpu_callin_map)) { | |
375 | printk("huh, phys CPU#%d, CPU#%d already present??\n", | |
376 | phys_id, cpuid); | |
377 | BUG(); | |
378 | } | |
379 | Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); | |
380 | ||
381 | /* | |
382 | * STARTUP IPIs are fragile beasts as they might sometimes | |
383 | * trigger some glue motherboard logic. Complete APIC bus | |
384 | * silence for 1 second, this overestimates the time the | |
385 | * boot CPU is spending to send the up to 2 STARTUP IPIs | |
386 | * by a factor of two. This should be enough. | |
387 | */ | |
388 | ||
389 | /* | |
390 | * Waiting 2s total for startup (udelay is not yet working) | |
391 | */ | |
392 | timeout = jiffies + 2*HZ; | |
393 | while (time_before(jiffies, timeout)) { | |
394 | /* | |
395 | * Has the boot CPU finished it's STARTUP sequence? | |
396 | */ | |
397 | if (cpu_isset(cpuid, cpu_callout_map)) | |
398 | break; | |
399 | rep_nop(); | |
400 | } | |
401 | ||
402 | if (!time_before(jiffies, timeout)) { | |
403 | printk("BUG: CPU%d started up but did not get a callout!\n", | |
404 | cpuid); | |
405 | BUG(); | |
406 | } | |
407 | ||
408 | /* | |
409 | * the boot CPU has finished the init stage and is spinning | |
410 | * on callin_map until we finish. We are free to set up this | |
411 | * CPU, first the APIC. (this is probably redundant on most | |
412 | * boards) | |
413 | */ | |
414 | ||
415 | Dprintk("CALLIN, before setup_local_APIC().\n"); | |
416 | smp_callin_clear_local_apic(); | |
417 | setup_local_APIC(); | |
418 | map_cpu_to_logical_apicid(); | |
419 | ||
420 | /* | |
421 | * Get our bogomips. | |
422 | */ | |
423 | calibrate_delay(); | |
424 | Dprintk("Stack at about %p\n",&cpuid); | |
425 | ||
426 | /* | |
427 | * Save our processor parameters | |
428 | */ | |
429 | smp_store_cpu_info(cpuid); | |
430 | ||
431 | disable_APIC_timer(); | |
432 | ||
433 | /* | |
434 | * Allow the master to continue. | |
435 | */ | |
436 | cpu_set(cpuid, cpu_callin_map); | |
437 | ||
438 | /* | |
439 | * Synchronize the TSC with the BP | |
440 | */ | |
e1367daf | 441 | if (cpu_has_tsc && cpu_khz && !tsc_sync_disabled) |
1da177e4 LT |
442 | synchronize_tsc_ap(); |
443 | } | |
444 | ||
445 | static int cpucount; | |
446 | ||
d720803a LS |
447 | static inline void |
448 | set_cpu_sibling_map(int cpu) | |
449 | { | |
450 | int i; | |
451 | ||
452 | if (smp_num_siblings > 1) { | |
453 | for (i = 0; i < NR_CPUS; i++) { | |
454 | if (!cpu_isset(i, cpu_callout_map)) | |
455 | continue; | |
456 | if (cpu_core_id[cpu] == cpu_core_id[i]) { | |
457 | cpu_set(i, cpu_sibling_map[cpu]); | |
458 | cpu_set(cpu, cpu_sibling_map[i]); | |
459 | } | |
460 | } | |
461 | } else { | |
462 | cpu_set(cpu, cpu_sibling_map[cpu]); | |
463 | } | |
464 | ||
465 | if (current_cpu_data.x86_num_cores > 1) { | |
466 | for (i = 0; i < NR_CPUS; i++) { | |
467 | if (!cpu_isset(i, cpu_callout_map)) | |
468 | continue; | |
469 | if (phys_proc_id[cpu] == phys_proc_id[i]) { | |
470 | cpu_set(i, cpu_core_map[cpu]); | |
471 | cpu_set(cpu, cpu_core_map[i]); | |
472 | } | |
473 | } | |
474 | } else { | |
475 | cpu_core_map[cpu] = cpu_sibling_map[cpu]; | |
476 | } | |
477 | } | |
478 | ||
1da177e4 LT |
479 | /* |
480 | * Activate a secondary processor. | |
481 | */ | |
0bb3184d | 482 | static void __devinit start_secondary(void *unused) |
1da177e4 LT |
483 | { |
484 | /* | |
485 | * Dont put anything before smp_callin(), SMP | |
486 | * booting is too fragile that we want to limit the | |
487 | * things done here to the most necessary things. | |
488 | */ | |
489 | cpu_init(); | |
490 | smp_callin(); | |
491 | while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) | |
492 | rep_nop(); | |
493 | setup_secondary_APIC_clock(); | |
494 | if (nmi_watchdog == NMI_IO_APIC) { | |
495 | disable_8259A_irq(0); | |
496 | enable_NMI_through_LVT0(NULL); | |
497 | enable_8259A_irq(0); | |
498 | } | |
499 | enable_APIC_timer(); | |
500 | /* | |
501 | * low-memory mappings have been cleared, flush them from | |
502 | * the local TLBs too. | |
503 | */ | |
504 | local_flush_tlb(); | |
6fe940d6 | 505 | |
d720803a LS |
506 | /* This must be done before setting cpu_online_map */ |
507 | set_cpu_sibling_map(raw_smp_processor_id()); | |
508 | wmb(); | |
509 | ||
6fe940d6 LS |
510 | /* |
511 | * We need to hold call_lock, so there is no inconsistency | |
512 | * between the time smp_call_function() determines number of | |
513 | * IPI receipients, and the time when the determination is made | |
514 | * for which cpus receive the IPI. Holding this | |
515 | * lock helps us to not include this cpu in a currently in progress | |
516 | * smp_call_function(). | |
517 | */ | |
518 | lock_ipi_call_lock(); | |
1da177e4 | 519 | cpu_set(smp_processor_id(), cpu_online_map); |
6fe940d6 | 520 | unlock_ipi_call_lock(); |
e1367daf | 521 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
1da177e4 LT |
522 | |
523 | /* We can take interrupts now: we're officially "up". */ | |
524 | local_irq_enable(); | |
525 | ||
526 | wmb(); | |
527 | cpu_idle(); | |
528 | } | |
529 | ||
530 | /* | |
531 | * Everything has been set up for the secondary | |
532 | * CPUs - they just need to reload everything | |
533 | * from the task structure | |
534 | * This function must not return. | |
535 | */ | |
0bb3184d | 536 | void __devinit initialize_secondary(void) |
1da177e4 LT |
537 | { |
538 | /* | |
539 | * We don't actually need to load the full TSS, | |
540 | * basically just the stack pointer and the eip. | |
541 | */ | |
542 | ||
543 | asm volatile( | |
544 | "movl %0,%%esp\n\t" | |
545 | "jmp *%1" | |
546 | : | |
547 | :"r" (current->thread.esp),"r" (current->thread.eip)); | |
548 | } | |
549 | ||
550 | extern struct { | |
551 | void * esp; | |
552 | unsigned short ss; | |
553 | } stack_start; | |
554 | ||
555 | #ifdef CONFIG_NUMA | |
556 | ||
557 | /* which logical CPUs are on which nodes */ | |
6c036527 | 558 | cpumask_t node_2_cpu_mask[MAX_NUMNODES] __read_mostly = |
1da177e4 LT |
559 | { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE }; |
560 | /* which node each logical CPU is on */ | |
6c036527 | 561 | int cpu_2_node[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; |
1da177e4 LT |
562 | EXPORT_SYMBOL(cpu_2_node); |
563 | ||
564 | /* set up a mapping between cpu and node. */ | |
565 | static inline void map_cpu_to_node(int cpu, int node) | |
566 | { | |
567 | printk("Mapping cpu %d to node %d\n", cpu, node); | |
568 | cpu_set(cpu, node_2_cpu_mask[node]); | |
569 | cpu_2_node[cpu] = node; | |
570 | } | |
571 | ||
572 | /* undo a mapping between cpu and node. */ | |
573 | static inline void unmap_cpu_to_node(int cpu) | |
574 | { | |
575 | int node; | |
576 | ||
577 | printk("Unmapping cpu %d from all nodes\n", cpu); | |
578 | for (node = 0; node < MAX_NUMNODES; node ++) | |
579 | cpu_clear(cpu, node_2_cpu_mask[node]); | |
580 | cpu_2_node[cpu] = 0; | |
581 | } | |
582 | #else /* !CONFIG_NUMA */ | |
583 | ||
584 | #define map_cpu_to_node(cpu, node) ({}) | |
585 | #define unmap_cpu_to_node(cpu) ({}) | |
586 | ||
587 | #endif /* CONFIG_NUMA */ | |
588 | ||
6c036527 | 589 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; |
1da177e4 LT |
590 | |
591 | static void map_cpu_to_logical_apicid(void) | |
592 | { | |
593 | int cpu = smp_processor_id(); | |
594 | int apicid = logical_smp_processor_id(); | |
595 | ||
596 | cpu_2_logical_apicid[cpu] = apicid; | |
597 | map_cpu_to_node(cpu, apicid_to_node(apicid)); | |
598 | } | |
599 | ||
600 | static void unmap_cpu_to_logical_apicid(int cpu) | |
601 | { | |
602 | cpu_2_logical_apicid[cpu] = BAD_APICID; | |
603 | unmap_cpu_to_node(cpu); | |
604 | } | |
605 | ||
606 | #if APIC_DEBUG | |
607 | static inline void __inquire_remote_apic(int apicid) | |
608 | { | |
609 | int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; | |
610 | char *names[] = { "ID", "VERSION", "SPIV" }; | |
611 | int timeout, status; | |
612 | ||
613 | printk("Inquiring remote APIC #%d...\n", apicid); | |
614 | ||
615 | for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) { | |
616 | printk("... APIC #%d %s: ", apicid, names[i]); | |
617 | ||
618 | /* | |
619 | * Wait for idle. | |
620 | */ | |
621 | apic_wait_icr_idle(); | |
622 | ||
623 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); | |
624 | apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]); | |
625 | ||
626 | timeout = 0; | |
627 | do { | |
628 | udelay(100); | |
629 | status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; | |
630 | } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); | |
631 | ||
632 | switch (status) { | |
633 | case APIC_ICR_RR_VALID: | |
634 | status = apic_read(APIC_RRR); | |
635 | printk("%08x\n", status); | |
636 | break; | |
637 | default: | |
638 | printk("failed\n"); | |
639 | } | |
640 | } | |
641 | } | |
642 | #endif | |
643 | ||
644 | #ifdef WAKE_SECONDARY_VIA_NMI | |
645 | /* | |
646 | * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal | |
647 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this | |
648 | * won't ... remember to clear down the APIC, etc later. | |
649 | */ | |
0bb3184d | 650 | static int __devinit |
1da177e4 LT |
651 | wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) |
652 | { | |
653 | unsigned long send_status = 0, accept_status = 0; | |
654 | int timeout, maxlvt; | |
655 | ||
656 | /* Target chip */ | |
657 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid)); | |
658 | ||
659 | /* Boot on the stack */ | |
660 | /* Kick the second */ | |
661 | apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL); | |
662 | ||
663 | Dprintk("Waiting for send to finish...\n"); | |
664 | timeout = 0; | |
665 | do { | |
666 | Dprintk("+"); | |
667 | udelay(100); | |
668 | send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; | |
669 | } while (send_status && (timeout++ < 1000)); | |
670 | ||
671 | /* | |
672 | * Give the other CPU some time to accept the IPI. | |
673 | */ | |
674 | udelay(200); | |
675 | /* | |
676 | * Due to the Pentium erratum 3AP. | |
677 | */ | |
678 | maxlvt = get_maxlvt(); | |
679 | if (maxlvt > 3) { | |
680 | apic_read_around(APIC_SPIV); | |
681 | apic_write(APIC_ESR, 0); | |
682 | } | |
683 | accept_status = (apic_read(APIC_ESR) & 0xEF); | |
684 | Dprintk("NMI sent.\n"); | |
685 | ||
686 | if (send_status) | |
687 | printk("APIC never delivered???\n"); | |
688 | if (accept_status) | |
689 | printk("APIC delivery error (%lx).\n", accept_status); | |
690 | ||
691 | return (send_status | accept_status); | |
692 | } | |
693 | #endif /* WAKE_SECONDARY_VIA_NMI */ | |
694 | ||
695 | #ifdef WAKE_SECONDARY_VIA_INIT | |
0bb3184d | 696 | static int __devinit |
1da177e4 LT |
697 | wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) |
698 | { | |
699 | unsigned long send_status = 0, accept_status = 0; | |
700 | int maxlvt, timeout, num_starts, j; | |
701 | ||
702 | /* | |
703 | * Be paranoid about clearing APIC errors. | |
704 | */ | |
705 | if (APIC_INTEGRATED(apic_version[phys_apicid])) { | |
706 | apic_read_around(APIC_SPIV); | |
707 | apic_write(APIC_ESR, 0); | |
708 | apic_read(APIC_ESR); | |
709 | } | |
710 | ||
711 | Dprintk("Asserting INIT.\n"); | |
712 | ||
713 | /* | |
714 | * Turn INIT on target chip | |
715 | */ | |
716 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | |
717 | ||
718 | /* | |
719 | * Send IPI | |
720 | */ | |
721 | apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT | |
722 | | APIC_DM_INIT); | |
723 | ||
724 | Dprintk("Waiting for send to finish...\n"); | |
725 | timeout = 0; | |
726 | do { | |
727 | Dprintk("+"); | |
728 | udelay(100); | |
729 | send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; | |
730 | } while (send_status && (timeout++ < 1000)); | |
731 | ||
732 | mdelay(10); | |
733 | ||
734 | Dprintk("Deasserting INIT.\n"); | |
735 | ||
736 | /* Target chip */ | |
737 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | |
738 | ||
739 | /* Send IPI */ | |
740 | apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); | |
741 | ||
742 | Dprintk("Waiting for send to finish...\n"); | |
743 | timeout = 0; | |
744 | do { | |
745 | Dprintk("+"); | |
746 | udelay(100); | |
747 | send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; | |
748 | } while (send_status && (timeout++ < 1000)); | |
749 | ||
750 | atomic_set(&init_deasserted, 1); | |
751 | ||
752 | /* | |
753 | * Should we send STARTUP IPIs ? | |
754 | * | |
755 | * Determine this based on the APIC version. | |
756 | * If we don't have an integrated APIC, don't send the STARTUP IPIs. | |
757 | */ | |
758 | if (APIC_INTEGRATED(apic_version[phys_apicid])) | |
759 | num_starts = 2; | |
760 | else | |
761 | num_starts = 0; | |
762 | ||
763 | /* | |
764 | * Run STARTUP IPI loop. | |
765 | */ | |
766 | Dprintk("#startup loops: %d.\n", num_starts); | |
767 | ||
768 | maxlvt = get_maxlvt(); | |
769 | ||
770 | for (j = 1; j <= num_starts; j++) { | |
771 | Dprintk("Sending STARTUP #%d.\n",j); | |
772 | apic_read_around(APIC_SPIV); | |
773 | apic_write(APIC_ESR, 0); | |
774 | apic_read(APIC_ESR); | |
775 | Dprintk("After apic_write.\n"); | |
776 | ||
777 | /* | |
778 | * STARTUP IPI | |
779 | */ | |
780 | ||
781 | /* Target chip */ | |
782 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | |
783 | ||
784 | /* Boot on the stack */ | |
785 | /* Kick the second */ | |
786 | apic_write_around(APIC_ICR, APIC_DM_STARTUP | |
787 | | (start_eip >> 12)); | |
788 | ||
789 | /* | |
790 | * Give the other CPU some time to accept the IPI. | |
791 | */ | |
792 | udelay(300); | |
793 | ||
794 | Dprintk("Startup point 1.\n"); | |
795 | ||
796 | Dprintk("Waiting for send to finish...\n"); | |
797 | timeout = 0; | |
798 | do { | |
799 | Dprintk("+"); | |
800 | udelay(100); | |
801 | send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; | |
802 | } while (send_status && (timeout++ < 1000)); | |
803 | ||
804 | /* | |
805 | * Give the other CPU some time to accept the IPI. | |
806 | */ | |
807 | udelay(200); | |
808 | /* | |
809 | * Due to the Pentium erratum 3AP. | |
810 | */ | |
811 | if (maxlvt > 3) { | |
812 | apic_read_around(APIC_SPIV); | |
813 | apic_write(APIC_ESR, 0); | |
814 | } | |
815 | accept_status = (apic_read(APIC_ESR) & 0xEF); | |
816 | if (send_status || accept_status) | |
817 | break; | |
818 | } | |
819 | Dprintk("After Startup.\n"); | |
820 | ||
821 | if (send_status) | |
822 | printk("APIC never delivered???\n"); | |
823 | if (accept_status) | |
824 | printk("APIC delivery error (%lx).\n", accept_status); | |
825 | ||
826 | return (send_status | accept_status); | |
827 | } | |
828 | #endif /* WAKE_SECONDARY_VIA_INIT */ | |
829 | ||
830 | extern cpumask_t cpu_initialized; | |
e1367daf LS |
831 | static inline int alloc_cpu_id(void) |
832 | { | |
833 | cpumask_t tmp_map; | |
834 | int cpu; | |
835 | cpus_complement(tmp_map, cpu_present_map); | |
836 | cpu = first_cpu(tmp_map); | |
837 | if (cpu >= NR_CPUS) | |
838 | return -ENODEV; | |
839 | return cpu; | |
840 | } | |
841 | ||
842 | #ifdef CONFIG_HOTPLUG_CPU | |
843 | static struct task_struct * __devinitdata cpu_idle_tasks[NR_CPUS]; | |
844 | static inline struct task_struct * alloc_idle_task(int cpu) | |
845 | { | |
846 | struct task_struct *idle; | |
847 | ||
848 | if ((idle = cpu_idle_tasks[cpu]) != NULL) { | |
849 | /* initialize thread_struct. we really want to avoid destroy | |
850 | * idle tread | |
851 | */ | |
852 | idle->thread.esp = (unsigned long)(((struct pt_regs *) | |
853 | (THREAD_SIZE + (unsigned long) idle->thread_info)) - 1); | |
854 | init_idle(idle, cpu); | |
855 | return idle; | |
856 | } | |
857 | idle = fork_idle(cpu); | |
858 | ||
859 | if (!IS_ERR(idle)) | |
860 | cpu_idle_tasks[cpu] = idle; | |
861 | return idle; | |
862 | } | |
863 | #else | |
864 | #define alloc_idle_task(cpu) fork_idle(cpu) | |
865 | #endif | |
1da177e4 | 866 | |
e1367daf | 867 | static int __devinit do_boot_cpu(int apicid, int cpu) |
1da177e4 LT |
868 | /* |
869 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad | |
870 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. | |
871 | * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. | |
872 | */ | |
873 | { | |
874 | struct task_struct *idle; | |
875 | unsigned long boot_error; | |
e1367daf | 876 | int timeout; |
1da177e4 LT |
877 | unsigned long start_eip; |
878 | unsigned short nmi_high = 0, nmi_low = 0; | |
879 | ||
e1367daf LS |
880 | ++cpucount; |
881 | ||
1da177e4 LT |
882 | /* |
883 | * We can't use kernel_thread since we must avoid to | |
884 | * reschedule the child. | |
885 | */ | |
e1367daf | 886 | idle = alloc_idle_task(cpu); |
1da177e4 LT |
887 | if (IS_ERR(idle)) |
888 | panic("failed fork for CPU %d", cpu); | |
889 | idle->thread.eip = (unsigned long) start_secondary; | |
890 | /* start_eip had better be page-aligned! */ | |
891 | start_eip = setup_trampoline(); | |
892 | ||
893 | /* So we see what's up */ | |
894 | printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip); | |
895 | /* Stack for startup_32 can be just as for start_secondary onwards */ | |
896 | stack_start.esp = (void *) idle->thread.esp; | |
897 | ||
898 | irq_ctx_init(cpu); | |
899 | ||
900 | /* | |
901 | * This grunge runs the startup process for | |
902 | * the targeted processor. | |
903 | */ | |
904 | ||
905 | atomic_set(&init_deasserted, 0); | |
906 | ||
907 | Dprintk("Setting warm reset code and vector.\n"); | |
908 | ||
909 | store_NMI_vector(&nmi_high, &nmi_low); | |
910 | ||
911 | smpboot_setup_warm_reset_vector(start_eip); | |
912 | ||
913 | /* | |
914 | * Starting actual IPI sequence... | |
915 | */ | |
916 | boot_error = wakeup_secondary_cpu(apicid, start_eip); | |
917 | ||
918 | if (!boot_error) { | |
919 | /* | |
920 | * allow APs to start initializing. | |
921 | */ | |
922 | Dprintk("Before Callout %d.\n", cpu); | |
923 | cpu_set(cpu, cpu_callout_map); | |
924 | Dprintk("After Callout %d.\n", cpu); | |
925 | ||
926 | /* | |
927 | * Wait 5s total for a response | |
928 | */ | |
929 | for (timeout = 0; timeout < 50000; timeout++) { | |
930 | if (cpu_isset(cpu, cpu_callin_map)) | |
931 | break; /* It has booted */ | |
932 | udelay(100); | |
933 | } | |
934 | ||
935 | if (cpu_isset(cpu, cpu_callin_map)) { | |
936 | /* number CPUs logically, starting from 1 (BSP is 0) */ | |
937 | Dprintk("OK.\n"); | |
938 | printk("CPU%d: ", cpu); | |
939 | print_cpu_info(&cpu_data[cpu]); | |
940 | Dprintk("CPU has booted.\n"); | |
941 | } else { | |
942 | boot_error= 1; | |
943 | if (*((volatile unsigned char *)trampoline_base) | |
944 | == 0xA5) | |
945 | /* trampoline started but...? */ | |
946 | printk("Stuck ??\n"); | |
947 | else | |
948 | /* trampoline code not run */ | |
949 | printk("Not responding.\n"); | |
950 | inquire_remote_apic(apicid); | |
951 | } | |
952 | } | |
e1367daf | 953 | |
1da177e4 LT |
954 | if (boot_error) { |
955 | /* Try to put things back the way they were before ... */ | |
956 | unmap_cpu_to_logical_apicid(cpu); | |
957 | cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ | |
958 | cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ | |
959 | cpucount--; | |
e1367daf LS |
960 | } else { |
961 | x86_cpu_to_apicid[cpu] = apicid; | |
962 | cpu_set(cpu, cpu_present_map); | |
1da177e4 LT |
963 | } |
964 | ||
965 | /* mark "stuck" area as not stuck */ | |
966 | *((volatile unsigned long *)trampoline_base) = 0; | |
967 | ||
968 | return boot_error; | |
969 | } | |
970 | ||
e1367daf LS |
971 | #ifdef CONFIG_HOTPLUG_CPU |
972 | void cpu_exit_clear(void) | |
973 | { | |
974 | int cpu = raw_smp_processor_id(); | |
975 | ||
976 | idle_task_exit(); | |
977 | ||
978 | cpucount --; | |
979 | cpu_uninit(); | |
980 | irq_ctx_exit(cpu); | |
981 | ||
982 | cpu_clear(cpu, cpu_callout_map); | |
983 | cpu_clear(cpu, cpu_callin_map); | |
984 | cpu_clear(cpu, cpu_present_map); | |
985 | ||
986 | cpu_clear(cpu, smp_commenced_mask); | |
987 | unmap_cpu_to_logical_apicid(cpu); | |
988 | } | |
989 | ||
990 | struct warm_boot_cpu_info { | |
991 | struct completion *complete; | |
992 | int apicid; | |
993 | int cpu; | |
994 | }; | |
995 | ||
996 | static void __devinit do_warm_boot_cpu(void *p) | |
997 | { | |
998 | struct warm_boot_cpu_info *info = p; | |
999 | do_boot_cpu(info->apicid, info->cpu); | |
1000 | complete(info->complete); | |
1001 | } | |
1002 | ||
1003 | int __devinit smp_prepare_cpu(int cpu) | |
1004 | { | |
1005 | DECLARE_COMPLETION(done); | |
1006 | struct warm_boot_cpu_info info; | |
1007 | struct work_struct task; | |
1008 | int apicid, ret; | |
1009 | ||
1010 | lock_cpu_hotplug(); | |
1011 | apicid = x86_cpu_to_apicid[cpu]; | |
1012 | if (apicid == BAD_APICID) { | |
1013 | ret = -ENODEV; | |
1014 | goto exit; | |
1015 | } | |
1016 | ||
1017 | info.complete = &done; | |
1018 | info.apicid = apicid; | |
1019 | info.cpu = cpu; | |
1020 | INIT_WORK(&task, do_warm_boot_cpu, &info); | |
1021 | ||
1022 | tsc_sync_disabled = 1; | |
1023 | ||
1024 | /* init low mem mapping */ | |
d7271b14 ZA |
1025 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, |
1026 | KERNEL_PGD_PTRS); | |
e1367daf LS |
1027 | flush_tlb_all(); |
1028 | schedule_work(&task); | |
1029 | wait_for_completion(&done); | |
1030 | ||
1031 | tsc_sync_disabled = 0; | |
1032 | zap_low_mappings(); | |
1033 | ret = 0; | |
1034 | exit: | |
1035 | unlock_cpu_hotplug(); | |
1036 | return ret; | |
1037 | } | |
1038 | #endif | |
1039 | ||
1da177e4 LT |
1040 | static void smp_tune_scheduling (void) |
1041 | { | |
1042 | unsigned long cachesize; /* kB */ | |
1043 | unsigned long bandwidth = 350; /* MB/s */ | |
1044 | /* | |
1045 | * Rough estimation for SMP scheduling, this is the number of | |
1046 | * cycles it takes for a fully memory-limited process to flush | |
1047 | * the SMP-local cache. | |
1048 | * | |
1049 | * (For a P5 this pretty much means we will choose another idle | |
1050 | * CPU almost always at wakeup time (this is due to the small | |
1051 | * L1 cache), on PIIs it's around 50-100 usecs, depending on | |
1052 | * the cache size) | |
1053 | */ | |
1054 | ||
1055 | if (!cpu_khz) { | |
1056 | /* | |
1057 | * this basically disables processor-affinity | |
1058 | * scheduling on SMP without a TSC. | |
1059 | */ | |
1060 | return; | |
1061 | } else { | |
1062 | cachesize = boot_cpu_data.x86_cache_size; | |
1063 | if (cachesize == -1) { | |
1064 | cachesize = 16; /* Pentiums, 2x8kB cache */ | |
1065 | bandwidth = 100; | |
1066 | } | |
1067 | } | |
1068 | } | |
1069 | ||
1070 | /* | |
1071 | * Cycle through the processors sending APIC IPIs to boot each. | |
1072 | */ | |
1073 | ||
1074 | static int boot_cpu_logical_apicid; | |
1075 | /* Where the IO area was mapped on multiquad, always 0 otherwise */ | |
1076 | void *xquad_portio; | |
129f6946 AD |
1077 | #ifdef CONFIG_X86_NUMAQ |
1078 | EXPORT_SYMBOL(xquad_portio); | |
1079 | #endif | |
1da177e4 | 1080 | |
1da177e4 LT |
1081 | static void __init smp_boot_cpus(unsigned int max_cpus) |
1082 | { | |
1083 | int apicid, cpu, bit, kicked; | |
1084 | unsigned long bogosum = 0; | |
1085 | ||
1086 | /* | |
1087 | * Setup boot CPU information | |
1088 | */ | |
1089 | smp_store_cpu_info(0); /* Final full version of the data */ | |
1090 | printk("CPU%d: ", 0); | |
1091 | print_cpu_info(&cpu_data[0]); | |
1092 | ||
1e4c85f9 | 1093 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); |
1da177e4 LT |
1094 | boot_cpu_logical_apicid = logical_smp_processor_id(); |
1095 | x86_cpu_to_apicid[0] = boot_cpu_physical_apicid; | |
1096 | ||
1097 | current_thread_info()->cpu = 0; | |
1098 | smp_tune_scheduling(); | |
1099 | cpus_clear(cpu_sibling_map[0]); | |
1100 | cpu_set(0, cpu_sibling_map[0]); | |
1101 | ||
3dd9d514 AK |
1102 | cpus_clear(cpu_core_map[0]); |
1103 | cpu_set(0, cpu_core_map[0]); | |
1104 | ||
1da177e4 LT |
1105 | /* |
1106 | * If we couldn't find an SMP configuration at boot time, | |
1107 | * get out of here now! | |
1108 | */ | |
1109 | if (!smp_found_config && !acpi_lapic) { | |
1110 | printk(KERN_NOTICE "SMP motherboard not detected.\n"); | |
1e4c85f9 LT |
1111 | smpboot_clear_io_apic_irqs(); |
1112 | phys_cpu_present_map = physid_mask_of_physid(0); | |
1113 | if (APIC_init_uniprocessor()) | |
1114 | printk(KERN_NOTICE "Local APIC not detected." | |
1115 | " Using dummy APIC emulation.\n"); | |
1116 | map_cpu_to_logical_apicid(); | |
1117 | cpu_set(0, cpu_sibling_map[0]); | |
1118 | cpu_set(0, cpu_core_map[0]); | |
1119 | return; | |
1120 | } | |
1121 | ||
1122 | /* | |
1123 | * Should not be necessary because the MP table should list the boot | |
1124 | * CPU too, but we do it for the sake of robustness anyway. | |
1125 | * Makes no sense to do this check in clustered apic mode, so skip it | |
1126 | */ | |
1127 | if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { | |
1128 | printk("weird, boot CPU (#%d) not listed by the BIOS.\n", | |
1129 | boot_cpu_physical_apicid); | |
1130 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); | |
1131 | } | |
1132 | ||
1133 | /* | |
1134 | * If we couldn't find a local APIC, then get out of here now! | |
1135 | */ | |
1136 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) { | |
1137 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", | |
1138 | boot_cpu_physical_apicid); | |
1139 | printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); | |
1140 | smpboot_clear_io_apic_irqs(); | |
1141 | phys_cpu_present_map = physid_mask_of_physid(0); | |
1142 | cpu_set(0, cpu_sibling_map[0]); | |
1143 | cpu_set(0, cpu_core_map[0]); | |
1da177e4 LT |
1144 | return; |
1145 | } | |
1146 | ||
1e4c85f9 LT |
1147 | verify_local_APIC(); |
1148 | ||
1da177e4 LT |
1149 | /* |
1150 | * If SMP should be disabled, then really disable it! | |
1151 | */ | |
1e4c85f9 LT |
1152 | if (!max_cpus) { |
1153 | smp_found_config = 0; | |
1154 | printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); | |
1155 | smpboot_clear_io_apic_irqs(); | |
1156 | phys_cpu_present_map = physid_mask_of_physid(0); | |
1157 | cpu_set(0, cpu_sibling_map[0]); | |
1158 | cpu_set(0, cpu_core_map[0]); | |
1da177e4 LT |
1159 | return; |
1160 | } | |
1161 | ||
1e4c85f9 LT |
1162 | connect_bsp_APIC(); |
1163 | setup_local_APIC(); | |
1164 | map_cpu_to_logical_apicid(); | |
1165 | ||
1166 | ||
1da177e4 LT |
1167 | setup_portio_remap(); |
1168 | ||
1169 | /* | |
1170 | * Scan the CPU present map and fire up the other CPUs via do_boot_cpu | |
1171 | * | |
1172 | * In clustered apic mode, phys_cpu_present_map is a constructed thus: | |
1173 | * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the | |
1174 | * clustered apic ID. | |
1175 | */ | |
1176 | Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map)); | |
1177 | ||
1178 | kicked = 1; | |
1179 | for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) { | |
1180 | apicid = cpu_present_to_apicid(bit); | |
1181 | /* | |
1182 | * Don't even attempt to start the boot CPU! | |
1183 | */ | |
1184 | if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID)) | |
1185 | continue; | |
1186 | ||
1187 | if (!check_apicid_present(bit)) | |
1188 | continue; | |
1189 | if (max_cpus <= cpucount+1) | |
1190 | continue; | |
1191 | ||
e1367daf | 1192 | if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu)) |
1da177e4 LT |
1193 | printk("CPU #%d not responding - cannot use it.\n", |
1194 | apicid); | |
1195 | else | |
1196 | ++kicked; | |
1197 | } | |
1198 | ||
1199 | /* | |
1200 | * Cleanup possible dangling ends... | |
1201 | */ | |
1202 | smpboot_restore_warm_reset_vector(); | |
1203 | ||
1204 | /* | |
1205 | * Allow the user to impress friends. | |
1206 | */ | |
1207 | Dprintk("Before bogomips.\n"); | |
1208 | for (cpu = 0; cpu < NR_CPUS; cpu++) | |
1209 | if (cpu_isset(cpu, cpu_callout_map)) | |
1210 | bogosum += cpu_data[cpu].loops_per_jiffy; | |
1211 | printk(KERN_INFO | |
1212 | "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | |
1213 | cpucount+1, | |
1214 | bogosum/(500000/HZ), | |
1215 | (bogosum/(5000/HZ))%100); | |
1216 | ||
1217 | Dprintk("Before bogocount - setting activated=1.\n"); | |
1218 | ||
1219 | if (smp_b_stepping) | |
1220 | printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n"); | |
1221 | ||
1222 | /* | |
1223 | * Don't taint if we are running SMP kernel on a single non-MP | |
1224 | * approved Athlon | |
1225 | */ | |
1226 | if (tainted & TAINT_UNSAFE_SMP) { | |
1227 | if (cpucount) | |
1228 | printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n"); | |
1229 | else | |
1230 | tainted &= ~TAINT_UNSAFE_SMP; | |
1231 | } | |
1232 | ||
1233 | Dprintk("Boot done.\n"); | |
1234 | ||
1235 | /* | |
1236 | * construct cpu_sibling_map[], so that we can tell sibling CPUs | |
1237 | * efficiently. | |
1238 | */ | |
3dd9d514 | 1239 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
1da177e4 | 1240 | cpus_clear(cpu_sibling_map[cpu]); |
3dd9d514 AK |
1241 | cpus_clear(cpu_core_map[cpu]); |
1242 | } | |
1da177e4 | 1243 | |
d720803a LS |
1244 | cpu_set(0, cpu_sibling_map[0]); |
1245 | cpu_set(0, cpu_core_map[0]); | |
1da177e4 | 1246 | |
1e4c85f9 LT |
1247 | smpboot_setup_io_apic(); |
1248 | ||
1249 | setup_boot_APIC_clock(); | |
1250 | ||
1da177e4 LT |
1251 | /* |
1252 | * Synchronize the TSC with the AP | |
1253 | */ | |
1254 | if (cpu_has_tsc && cpucount && cpu_khz) | |
1255 | synchronize_tsc_bp(); | |
1256 | } | |
1257 | ||
1258 | /* These are wrappers to interface to the new boot process. Someone | |
1259 | who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */ | |
1260 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
1261 | { | |
f3705136 ZM |
1262 | smp_commenced_mask = cpumask_of_cpu(0); |
1263 | cpu_callin_map = cpumask_of_cpu(0); | |
1264 | mb(); | |
1da177e4 LT |
1265 | smp_boot_cpus(max_cpus); |
1266 | } | |
1267 | ||
1268 | void __devinit smp_prepare_boot_cpu(void) | |
1269 | { | |
1270 | cpu_set(smp_processor_id(), cpu_online_map); | |
1271 | cpu_set(smp_processor_id(), cpu_callout_map); | |
e1367daf | 1272 | cpu_set(smp_processor_id(), cpu_present_map); |
4ad8d383 | 1273 | cpu_set(smp_processor_id(), cpu_possible_map); |
e1367daf | 1274 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
1da177e4 LT |
1275 | } |
1276 | ||
f3705136 | 1277 | #ifdef CONFIG_HOTPLUG_CPU |
e1367daf LS |
1278 | static void |
1279 | remove_siblinginfo(int cpu) | |
1da177e4 | 1280 | { |
e1367daf LS |
1281 | int sibling; |
1282 | ||
1283 | for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) | |
1284 | cpu_clear(cpu, cpu_sibling_map[sibling]); | |
1285 | for_each_cpu_mask(sibling, cpu_core_map[cpu]) | |
1286 | cpu_clear(cpu, cpu_core_map[sibling]); | |
1287 | cpus_clear(cpu_sibling_map[cpu]); | |
1288 | cpus_clear(cpu_core_map[cpu]); | |
1289 | phys_proc_id[cpu] = BAD_APICID; | |
1290 | cpu_core_id[cpu] = BAD_APICID; | |
f3705136 ZM |
1291 | } |
1292 | ||
1293 | int __cpu_disable(void) | |
1294 | { | |
1295 | cpumask_t map = cpu_online_map; | |
1296 | int cpu = smp_processor_id(); | |
1297 | ||
1298 | /* | |
1299 | * Perhaps use cpufreq to drop frequency, but that could go | |
1300 | * into generic code. | |
1301 | * | |
1302 | * We won't take down the boot processor on i386 due to some | |
1303 | * interrupts only being able to be serviced by the BSP. | |
1304 | * Especially so if we're not using an IOAPIC -zwane | |
1305 | */ | |
1306 | if (cpu == 0) | |
1307 | return -EBUSY; | |
1308 | ||
1309 | /* We enable the timer again on the exit path of the death loop */ | |
1310 | disable_APIC_timer(); | |
1311 | /* Allow any queued timer interrupts to get serviced */ | |
1312 | local_irq_enable(); | |
1313 | mdelay(1); | |
1314 | local_irq_disable(); | |
1315 | ||
e1367daf LS |
1316 | remove_siblinginfo(cpu); |
1317 | ||
f3705136 ZM |
1318 | cpu_clear(cpu, map); |
1319 | fixup_irqs(map); | |
1320 | /* It's now safe to remove this processor from the online map */ | |
1321 | cpu_clear(cpu, cpu_online_map); | |
1322 | return 0; | |
1323 | } | |
1324 | ||
1325 | void __cpu_die(unsigned int cpu) | |
1326 | { | |
1327 | /* We don't do anything here: idle task is faking death itself. */ | |
1328 | unsigned int i; | |
1329 | ||
1330 | for (i = 0; i < 10; i++) { | |
1331 | /* They ack this in play_dead by setting CPU_DEAD */ | |
e1367daf LS |
1332 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { |
1333 | printk ("CPU %d is now offline\n", cpu); | |
f3705136 | 1334 | return; |
e1367daf | 1335 | } |
aeb8397b | 1336 | msleep(100); |
1da177e4 | 1337 | } |
f3705136 ZM |
1338 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); |
1339 | } | |
1340 | #else /* ... !CONFIG_HOTPLUG_CPU */ | |
1341 | int __cpu_disable(void) | |
1342 | { | |
1343 | return -ENOSYS; | |
1344 | } | |
1da177e4 | 1345 | |
f3705136 ZM |
1346 | void __cpu_die(unsigned int cpu) |
1347 | { | |
1348 | /* We said "no" in __cpu_disable */ | |
1349 | BUG(); | |
1350 | } | |
1351 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1352 | ||
1353 | int __devinit __cpu_up(unsigned int cpu) | |
1354 | { | |
1da177e4 LT |
1355 | /* In case one didn't come up */ |
1356 | if (!cpu_isset(cpu, cpu_callin_map)) { | |
f3705136 | 1357 | printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu); |
1da177e4 LT |
1358 | local_irq_enable(); |
1359 | return -EIO; | |
1360 | } | |
1361 | ||
1362 | local_irq_enable(); | |
e1367daf | 1363 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
1da177e4 LT |
1364 | /* Unleash the CPU! */ |
1365 | cpu_set(cpu, smp_commenced_mask); | |
1366 | while (!cpu_isset(cpu, cpu_online_map)) | |
1367 | mb(); | |
1368 | return 0; | |
1369 | } | |
1370 | ||
1371 | void __init smp_cpus_done(unsigned int max_cpus) | |
1372 | { | |
1373 | #ifdef CONFIG_X86_IO_APIC | |
1374 | setup_ioapic_dest(); | |
1375 | #endif | |
1376 | zap_low_mappings(); | |
e1367daf | 1377 | #ifndef CONFIG_HOTPLUG_CPU |
1da177e4 LT |
1378 | /* |
1379 | * Disable executability of the SMP trampoline: | |
1380 | */ | |
1381 | set_kernel_exec((unsigned long)trampoline_base, trampoline_exec); | |
e1367daf | 1382 | #endif |
1da177e4 LT |
1383 | } |
1384 | ||
1385 | void __init smp_intr_init(void) | |
1386 | { | |
1387 | /* | |
1388 | * IRQ0 must be given a fixed assignment and initialized, | |
1389 | * because it's used before the IO-APIC is set up. | |
1390 | */ | |
1391 | set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]); | |
1392 | ||
1393 | /* | |
1394 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper | |
1395 | * IPI, driven by wakeup. | |
1396 | */ | |
1397 | set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); | |
1398 | ||
1399 | /* IPI for invalidation */ | |
1400 | set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); | |
1401 | ||
1402 | /* IPI for generic function call */ | |
1403 | set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | |
1404 | } |