Commit | Line | Data |
---|---|---|
4cedb334 GOC |
1 | /* |
2 | * x86 SMP booting functions | |
3 | * | |
4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | |
5 | * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> | |
6 | * Copyright 2001 Andi Kleen, SuSE Labs. | |
7 | * | |
8 | * Much of the core SMP work is based on previous work by Thomas Radke, to | |
9 | * whom a great many thanks are extended. | |
10 | * | |
11 | * Thanks to Intel for making available several different Pentium, | |
12 | * Pentium Pro and Pentium-II/Xeon MP machines. | |
13 | * Original development of Linux SMP code supported by Caldera. | |
14 | * | |
15 | * This code is released under the GNU General Public License version 2 or | |
16 | * later. | |
17 | * | |
18 | * Fixes | |
19 | * Felix Koop : NR_CPUS used properly | |
20 | * Jose Renau : Handle single CPU case. | |
21 | * Alan Cox : By repeated request 8) - Total BogoMIPS report. | |
22 | * Greg Wright : Fix for kernel stacks panic. | |
23 | * Erich Boleyn : MP v1.4 and additional changes. | |
24 | * Matthias Sattler : Changes for 2.1 kernel map. | |
25 | * Michel Lespinasse : Changes for 2.1 kernel map. | |
26 | * Michael Chastain : Change trampoline.S to gnu as. | |
27 | * Alan Cox : Dumb bug: 'B' step PPro's are fine | |
28 | * Ingo Molnar : Added APIC timers, based on code | |
29 | * from Jose Renau | |
30 | * Ingo Molnar : various cleanups and rewrites | |
31 | * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. | |
32 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs | |
33 | * Andi Kleen : Changed for SMP boot into long mode. | |
34 | * Martin J. Bligh : Added support for multi-quad systems | |
35 | * Dave Jones : Report invalid combinations of Athlon CPUs. | |
36 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. | |
37 | * Andi Kleen : Converted to new state machine. | |
38 | * Ashok Raj : CPU hotplug support | |
39 | * Glauber Costa : i386 and x86_64 integration | |
40 | */ | |
41 | ||
68a1c3f8 GC |
42 | #include <linux/init.h> |
43 | #include <linux/smp.h> | |
a355352b | 44 | #include <linux/module.h> |
70708a18 | 45 | #include <linux/sched.h> |
69c18c15 | 46 | #include <linux/percpu.h> |
91718e8d | 47 | #include <linux/bootmem.h> |
cb3c8b90 GOC |
48 | #include <linux/err.h> |
49 | #include <linux/nmi.h> | |
69c18c15 | 50 | |
8aef135c | 51 | #include <asm/acpi.h> |
cb3c8b90 | 52 | #include <asm/desc.h> |
69c18c15 GC |
53 | #include <asm/nmi.h> |
54 | #include <asm/irq.h> | |
55 | #include <asm/smp.h> | |
e44b7b75 | 56 | #include <asm/trampoline.h> |
69c18c15 GC |
57 | #include <asm/cpu.h> |
58 | #include <asm/numa.h> | |
cb3c8b90 GOC |
59 | #include <asm/pgtable.h> |
60 | #include <asm/tlbflush.h> | |
61 | #include <asm/mtrr.h> | |
bbc2ff6a | 62 | #include <asm/vmi.h> |
34d05591 | 63 | #include <asm/genapic.h> |
cb3c8b90 | 64 | #include <linux/mc146818rtc.h> |
68a1c3f8 | 65 | |
f6bc4029 | 66 | #include <mach_apic.h> |
cb3c8b90 GOC |
67 | #include <mach_wakecpu.h> |
68 | #include <smpboot_hooks.h> | |
69 | ||
16ecf7a4 | 70 | #ifdef CONFIG_X86_32 |
4cedb334 | 71 | u8 apicid_2_node[MAX_APICID]; |
61165d7a | 72 | static int low_mappings; |
acbb6734 GOC |
73 | #endif |
74 | ||
a8db8453 GOC |
75 | /* State of each CPU */ |
76 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |
77 | ||
cb3c8b90 GOC |
78 | /* Store all idle threads, this can be reused instead of creating |
79 | * a new thread. Also avoids complicated thread destroy functionality | |
80 | * for idle threads. | |
81 | */ | |
82 | #ifdef CONFIG_HOTPLUG_CPU | |
83 | /* | |
84 | * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is | |
85 | * removed after init for !CONFIG_HOTPLUG_CPU. | |
86 | */ | |
87 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | |
88 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | |
89 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | |
90 | #else | |
91 | struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | |
92 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | |
93 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | |
94 | #endif | |
f6bc4029 | 95 | |
a355352b GC |
96 | /* Number of siblings per CPU package */ |
97 | int smp_num_siblings = 1; | |
98 | EXPORT_SYMBOL(smp_num_siblings); | |
99 | ||
100 | /* Last level cache ID of each logical CPU */ | |
101 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; | |
102 | ||
103 | /* bitmap of online cpus */ | |
104 | cpumask_t cpu_online_map __read_mostly; | |
105 | EXPORT_SYMBOL(cpu_online_map); | |
106 | ||
107 | cpumask_t cpu_callin_map; | |
108 | cpumask_t cpu_callout_map; | |
109 | cpumask_t cpu_possible_map; | |
110 | EXPORT_SYMBOL(cpu_possible_map); | |
111 | ||
112 | /* representing HT siblings of each logical CPU */ | |
113 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); | |
114 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | |
115 | ||
116 | /* representing HT and core siblings of each logical CPU */ | |
117 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); | |
118 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | |
119 | ||
120 | /* Per CPU bogomips and other parameters */ | |
121 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | |
122 | EXPORT_PER_CPU_SYMBOL(cpu_info); | |
768d9505 | 123 | |
cb3c8b90 GOC |
124 | static atomic_t init_deasserted; |
125 | ||
8aef135c GOC |
126 | static int boot_cpu_logical_apicid; |
127 | ||
768d9505 GC |
128 | /* representing cpus for which sibling maps can be computed */ |
129 | static cpumask_t cpu_sibling_setup_map; | |
130 | ||
1d89a7f0 GOC |
131 | /* Set if we find a B stepping CPU */ |
132 | int __cpuinitdata smp_b_stepping; | |
1d89a7f0 | 133 | |
7cc3959e GOC |
134 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) |
135 | ||
136 | /* which logical CPUs are on which nodes */ | |
137 | cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly = | |
138 | { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE }; | |
139 | EXPORT_SYMBOL(node_to_cpumask_map); | |
140 | /* which node each logical CPU is on */ | |
141 | int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; | |
142 | EXPORT_SYMBOL(cpu_to_node_map); | |
143 | ||
144 | /* set up a mapping between cpu and node. */ | |
145 | static void map_cpu_to_node(int cpu, int node) | |
146 | { | |
147 | printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); | |
148 | cpu_set(cpu, node_to_cpumask_map[node]); | |
149 | cpu_to_node_map[cpu] = node; | |
150 | } | |
151 | ||
152 | /* undo a mapping between cpu and node. */ | |
153 | static void unmap_cpu_to_node(int cpu) | |
154 | { | |
155 | int node; | |
156 | ||
157 | printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); | |
158 | for (node = 0; node < MAX_NUMNODES; node++) | |
159 | cpu_clear(cpu, node_to_cpumask_map[node]); | |
160 | cpu_to_node_map[cpu] = 0; | |
161 | } | |
162 | #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ | |
163 | #define map_cpu_to_node(cpu, node) ({}) | |
164 | #define unmap_cpu_to_node(cpu) ({}) | |
165 | #endif | |
166 | ||
167 | #ifdef CONFIG_X86_32 | |
168 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = | |
169 | { [0 ... NR_CPUS-1] = BAD_APICID }; | |
170 | ||
a4928cff | 171 | static void map_cpu_to_logical_apicid(void) |
7cc3959e GOC |
172 | { |
173 | int cpu = smp_processor_id(); | |
174 | int apicid = logical_smp_processor_id(); | |
175 | int node = apicid_to_node(apicid); | |
176 | ||
177 | if (!node_online(node)) | |
178 | node = first_online_node; | |
179 | ||
180 | cpu_2_logical_apicid[cpu] = apicid; | |
181 | map_cpu_to_node(cpu, node); | |
182 | } | |
183 | ||
1481a3dd | 184 | void numa_remove_cpu(int cpu) |
7cc3959e GOC |
185 | { |
186 | cpu_2_logical_apicid[cpu] = BAD_APICID; | |
187 | unmap_cpu_to_node(cpu); | |
188 | } | |
189 | #else | |
7cc3959e GOC |
190 | #define map_cpu_to_logical_apicid() do {} while (0) |
191 | #endif | |
192 | ||
cb3c8b90 GOC |
193 | /* |
194 | * Report back to the Boot Processor. | |
195 | * Running on AP. | |
196 | */ | |
a4928cff | 197 | static void __cpuinit smp_callin(void) |
cb3c8b90 GOC |
198 | { |
199 | int cpuid, phys_id; | |
200 | unsigned long timeout; | |
201 | ||
202 | /* | |
203 | * If waken up by an INIT in an 82489DX configuration | |
204 | * we may get here before an INIT-deassert IPI reaches | |
205 | * our local APIC. We have to wait for the IPI or we'll | |
206 | * lock up on an APIC access. | |
207 | */ | |
208 | wait_for_init_deassert(&init_deasserted); | |
209 | ||
210 | /* | |
211 | * (This works even if the APIC is not enabled.) | |
212 | */ | |
05f2d12c | 213 | phys_id = GET_APIC_ID(read_apic_id()); |
cb3c8b90 GOC |
214 | cpuid = smp_processor_id(); |
215 | if (cpu_isset(cpuid, cpu_callin_map)) { | |
216 | panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, | |
217 | phys_id, cpuid); | |
218 | } | |
cfc1b9a6 | 219 | pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); |
cb3c8b90 GOC |
220 | |
221 | /* | |
222 | * STARTUP IPIs are fragile beasts as they might sometimes | |
223 | * trigger some glue motherboard logic. Complete APIC bus | |
224 | * silence for 1 second, this overestimates the time the | |
225 | * boot CPU is spending to send the up to 2 STARTUP IPIs | |
226 | * by a factor of two. This should be enough. | |
227 | */ | |
228 | ||
229 | /* | |
230 | * Waiting 2s total for startup (udelay is not yet working) | |
231 | */ | |
232 | timeout = jiffies + 2*HZ; | |
233 | while (time_before(jiffies, timeout)) { | |
234 | /* | |
235 | * Has the boot CPU finished it's STARTUP sequence? | |
236 | */ | |
237 | if (cpu_isset(cpuid, cpu_callout_map)) | |
238 | break; | |
239 | cpu_relax(); | |
240 | } | |
241 | ||
242 | if (!time_before(jiffies, timeout)) { | |
243 | panic("%s: CPU%d started up but did not get a callout!\n", | |
244 | __func__, cpuid); | |
245 | } | |
246 | ||
247 | /* | |
248 | * the boot CPU has finished the init stage and is spinning | |
249 | * on callin_map until we finish. We are free to set up this | |
250 | * CPU, first the APIC. (this is probably redundant on most | |
251 | * boards) | |
252 | */ | |
253 | ||
cfc1b9a6 | 254 | pr_debug("CALLIN, before setup_local_APIC().\n"); |
cb3c8b90 GOC |
255 | smp_callin_clear_local_apic(); |
256 | setup_local_APIC(); | |
257 | end_local_APIC_setup(); | |
258 | map_cpu_to_logical_apicid(); | |
259 | ||
260 | /* | |
261 | * Get our bogomips. | |
262 | * | |
263 | * Need to enable IRQs because it can take longer and then | |
264 | * the NMI watchdog might kill us. | |
265 | */ | |
266 | local_irq_enable(); | |
267 | calibrate_delay(); | |
268 | local_irq_disable(); | |
cfc1b9a6 | 269 | pr_debug("Stack at about %p\n", &cpuid); |
cb3c8b90 GOC |
270 | |
271 | /* | |
272 | * Save our processor parameters | |
273 | */ | |
274 | smp_store_cpu_info(cpuid); | |
275 | ||
276 | /* | |
277 | * Allow the master to continue. | |
278 | */ | |
279 | cpu_set(cpuid, cpu_callin_map); | |
280 | } | |
281 | ||
bbc2ff6a GOC |
282 | /* |
283 | * Activate a secondary processor. | |
284 | */ | |
dbe55f47 | 285 | static void __cpuinit start_secondary(void *unused) |
bbc2ff6a GOC |
286 | { |
287 | /* | |
288 | * Don't put *anything* before cpu_init(), SMP booting is too | |
289 | * fragile that we want to limit the things done here to the | |
290 | * most necessary things. | |
291 | */ | |
292 | #ifdef CONFIG_VMI | |
293 | vmi_bringup(); | |
294 | #endif | |
295 | cpu_init(); | |
296 | preempt_disable(); | |
297 | smp_callin(); | |
298 | ||
299 | /* otherwise gcc will move up smp_processor_id before the cpu_init */ | |
300 | barrier(); | |
301 | /* | |
302 | * Check TSC synchronization with the BP: | |
303 | */ | |
304 | check_tsc_sync_target(); | |
305 | ||
306 | if (nmi_watchdog == NMI_IO_APIC) { | |
307 | disable_8259A_irq(0); | |
308 | enable_NMI_through_LVT0(); | |
309 | enable_8259A_irq(0); | |
310 | } | |
311 | ||
61165d7a HD |
312 | #ifdef CONFIG_X86_32 |
313 | while (low_mappings) | |
314 | cpu_relax(); | |
315 | __flush_tlb_all(); | |
316 | #endif | |
317 | ||
bbc2ff6a GOC |
318 | /* This must be done before setting cpu_online_map */ |
319 | set_cpu_sibling_map(raw_smp_processor_id()); | |
320 | wmb(); | |
321 | ||
322 | /* | |
323 | * We need to hold call_lock, so there is no inconsistency | |
324 | * between the time smp_call_function() determines number of | |
325 | * IPI recipients, and the time when the determination is made | |
326 | * for which cpus receive the IPI. Holding this | |
327 | * lock helps us to not include this cpu in a currently in progress | |
328 | * smp_call_function(). | |
d388e5fd EB |
329 | * |
330 | * We need to hold vector_lock so there the set of online cpus | |
331 | * does not change while we are assigning vectors to cpus. Holding | |
332 | * this lock ensures we don't half assign or remove an irq from a cpu. | |
bbc2ff6a | 333 | */ |
3b16cf87 | 334 | ipi_call_lock_irq(); |
d388e5fd EB |
335 | lock_vector_lock(); |
336 | __setup_vector_irq(smp_processor_id()); | |
bbc2ff6a | 337 | cpu_set(smp_processor_id(), cpu_online_map); |
d388e5fd | 338 | unlock_vector_lock(); |
3b16cf87 | 339 | ipi_call_unlock_irq(); |
bbc2ff6a GOC |
340 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
341 | ||
342 | setup_secondary_clock(); | |
343 | ||
344 | wmb(); | |
345 | cpu_idle(); | |
346 | } | |
347 | ||
1d89a7f0 GOC |
348 | static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c) |
349 | { | |
1d89a7f0 GOC |
350 | /* |
351 | * Mask B, Pentium, but not Pentium MMX | |
352 | */ | |
353 | if (c->x86_vendor == X86_VENDOR_INTEL && | |
354 | c->x86 == 5 && | |
355 | c->x86_mask >= 1 && c->x86_mask <= 4 && | |
356 | c->x86_model <= 3) | |
357 | /* | |
358 | * Remember we have B step Pentia with bugs | |
359 | */ | |
360 | smp_b_stepping = 1; | |
361 | ||
362 | /* | |
363 | * Certain Athlons might work (for various values of 'work') in SMP | |
364 | * but they are not certified as MP capable. | |
365 | */ | |
366 | if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) { | |
367 | ||
368 | if (num_possible_cpus() == 1) | |
369 | goto valid_k7; | |
370 | ||
371 | /* Athlon 660/661 is valid. */ | |
372 | if ((c->x86_model == 6) && ((c->x86_mask == 0) || | |
373 | (c->x86_mask == 1))) | |
374 | goto valid_k7; | |
375 | ||
376 | /* Duron 670 is valid */ | |
377 | if ((c->x86_model == 7) && (c->x86_mask == 0)) | |
378 | goto valid_k7; | |
379 | ||
380 | /* | |
381 | * Athlon 662, Duron 671, and Athlon >model 7 have capability | |
382 | * bit. It's worth noting that the A5 stepping (662) of some | |
383 | * Athlon XP's have the MP bit set. | |
384 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for | |
385 | * more. | |
386 | */ | |
387 | if (((c->x86_model == 6) && (c->x86_mask >= 2)) || | |
388 | ((c->x86_model == 7) && (c->x86_mask >= 1)) || | |
389 | (c->x86_model > 7)) | |
390 | if (cpu_has_mp) | |
391 | goto valid_k7; | |
392 | ||
393 | /* If we get here, not a certified SMP capable AMD system. */ | |
394 | add_taint(TAINT_UNSAFE_SMP); | |
395 | } | |
396 | ||
397 | valid_k7: | |
398 | ; | |
1d89a7f0 GOC |
399 | } |
400 | ||
a4928cff | 401 | static void __cpuinit smp_checks(void) |
693d4b8a GOC |
402 | { |
403 | if (smp_b_stepping) | |
404 | printk(KERN_WARNING "WARNING: SMP operation may be unreliable" | |
405 | "with B stepping processors.\n"); | |
406 | ||
407 | /* | |
408 | * Don't taint if we are running SMP kernel on a single non-MP | |
409 | * approved Athlon | |
410 | */ | |
411 | if (tainted & TAINT_UNSAFE_SMP) { | |
f68e00a3 | 412 | if (num_online_cpus()) |
693d4b8a GOC |
413 | printk(KERN_INFO "WARNING: This combination of AMD" |
414 | "processors is not suitable for SMP.\n"); | |
415 | else | |
416 | tainted &= ~TAINT_UNSAFE_SMP; | |
417 | } | |
418 | } | |
419 | ||
1d89a7f0 GOC |
420 | /* |
421 | * The bootstrap kernel entry code has set these up. Save them for | |
422 | * a given CPU | |
423 | */ | |
424 | ||
425 | void __cpuinit smp_store_cpu_info(int id) | |
426 | { | |
427 | struct cpuinfo_x86 *c = &cpu_data(id); | |
428 | ||
429 | *c = boot_cpu_data; | |
430 | c->cpu_index = id; | |
431 | if (id != 0) | |
432 | identify_secondary_cpu(c); | |
433 | smp_apply_quirks(c); | |
434 | } | |
435 | ||
436 | ||
768d9505 GC |
437 | void __cpuinit set_cpu_sibling_map(int cpu) |
438 | { | |
439 | int i; | |
440 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
441 | ||
442 | cpu_set(cpu, cpu_sibling_setup_map); | |
443 | ||
444 | if (smp_num_siblings > 1) { | |
334ef7a7 | 445 | for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { |
768d9505 GC |
446 | if (c->phys_proc_id == cpu_data(i).phys_proc_id && |
447 | c->cpu_core_id == cpu_data(i).cpu_core_id) { | |
448 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); | |
449 | cpu_set(cpu, per_cpu(cpu_sibling_map, i)); | |
450 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | |
451 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | |
452 | cpu_set(i, c->llc_shared_map); | |
453 | cpu_set(cpu, cpu_data(i).llc_shared_map); | |
454 | } | |
455 | } | |
456 | } else { | |
457 | cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); | |
458 | } | |
459 | ||
460 | cpu_set(cpu, c->llc_shared_map); | |
461 | ||
462 | if (current_cpu_data.x86_max_cores == 1) { | |
463 | per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); | |
464 | c->booted_cores = 1; | |
465 | return; | |
466 | } | |
467 | ||
334ef7a7 | 468 | for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { |
768d9505 GC |
469 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && |
470 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | |
471 | cpu_set(i, c->llc_shared_map); | |
472 | cpu_set(cpu, cpu_data(i).llc_shared_map); | |
473 | } | |
474 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | |
475 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | |
476 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | |
477 | /* | |
478 | * Does this new cpu bringup a new core? | |
479 | */ | |
480 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { | |
481 | /* | |
482 | * for each core in package, increment | |
483 | * the booted_cores for this new cpu | |
484 | */ | |
485 | if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) | |
486 | c->booted_cores++; | |
487 | /* | |
488 | * increment the core count for all | |
489 | * the other cpus in this package | |
490 | */ | |
491 | if (i != cpu) | |
492 | cpu_data(i).booted_cores++; | |
493 | } else if (i != cpu && !c->booted_cores) | |
494 | c->booted_cores = cpu_data(i).booted_cores; | |
495 | } | |
496 | } | |
497 | } | |
498 | ||
70708a18 GC |
499 | /* maps the cpu to the sched domain representing multi-core */ |
500 | cpumask_t cpu_coregroup_map(int cpu) | |
501 | { | |
502 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
503 | /* | |
504 | * For perf, we return last level cache shared map. | |
505 | * And for power savings, we return cpu_core_map | |
506 | */ | |
507 | if (sched_mc_power_savings || sched_smt_power_savings) | |
508 | return per_cpu(cpu_core_map, cpu); | |
509 | else | |
510 | return c->llc_shared_map; | |
511 | } | |
512 | ||
a4928cff | 513 | static void impress_friends(void) |
904541e2 GOC |
514 | { |
515 | int cpu; | |
516 | unsigned long bogosum = 0; | |
517 | /* | |
518 | * Allow the user to impress friends. | |
519 | */ | |
cfc1b9a6 | 520 | pr_debug("Before bogomips.\n"); |
904541e2 GOC |
521 | for_each_possible_cpu(cpu) |
522 | if (cpu_isset(cpu, cpu_callout_map)) | |
523 | bogosum += cpu_data(cpu).loops_per_jiffy; | |
524 | printk(KERN_INFO | |
525 | "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | |
f68e00a3 | 526 | num_online_cpus(), |
904541e2 GOC |
527 | bogosum/(500000/HZ), |
528 | (bogosum/(5000/HZ))%100); | |
529 | ||
cfc1b9a6 | 530 | pr_debug("Before bogocount - setting activated=1.\n"); |
904541e2 GOC |
531 | } |
532 | ||
cb3c8b90 GOC |
533 | static inline void __inquire_remote_apic(int apicid) |
534 | { | |
535 | unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; | |
536 | char *names[] = { "ID", "VERSION", "SPIV" }; | |
537 | int timeout; | |
538 | u32 status; | |
539 | ||
540 | printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); | |
541 | ||
542 | for (i = 0; i < ARRAY_SIZE(regs); i++) { | |
543 | printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]); | |
544 | ||
545 | /* | |
546 | * Wait for idle. | |
547 | */ | |
548 | status = safe_apic_wait_icr_idle(); | |
549 | if (status) | |
550 | printk(KERN_CONT | |
551 | "a previous APIC delivery may have failed\n"); | |
552 | ||
593f4a78 MR |
553 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); |
554 | apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]); | |
cb3c8b90 GOC |
555 | |
556 | timeout = 0; | |
557 | do { | |
558 | udelay(100); | |
559 | status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; | |
560 | } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); | |
561 | ||
562 | switch (status) { | |
563 | case APIC_ICR_RR_VALID: | |
564 | status = apic_read(APIC_RRR); | |
565 | printk(KERN_CONT "%08x\n", status); | |
566 | break; | |
567 | default: | |
568 | printk(KERN_CONT "failed\n"); | |
569 | } | |
570 | } | |
571 | } | |
572 | ||
573 | #ifdef WAKE_SECONDARY_VIA_NMI | |
574 | /* | |
575 | * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal | |
576 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this | |
577 | * won't ... remember to clear down the APIC, etc later. | |
578 | */ | |
579 | static int __devinit | |
580 | wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) | |
581 | { | |
582 | unsigned long send_status, accept_status = 0; | |
583 | int maxlvt; | |
584 | ||
585 | /* Target chip */ | |
593f4a78 | 586 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid)); |
cb3c8b90 GOC |
587 | |
588 | /* Boot on the stack */ | |
589 | /* Kick the second */ | |
593f4a78 | 590 | apic_write(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL); |
cb3c8b90 | 591 | |
cfc1b9a6 | 592 | pr_debug("Waiting for send to finish...\n"); |
cb3c8b90 GOC |
593 | send_status = safe_apic_wait_icr_idle(); |
594 | ||
595 | /* | |
596 | * Give the other CPU some time to accept the IPI. | |
597 | */ | |
598 | udelay(200); | |
cb3c8b90 | 599 | maxlvt = lapic_get_maxlvt(); |
593f4a78 | 600 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
cb3c8b90 | 601 | apic_write(APIC_ESR, 0); |
cb3c8b90 | 602 | accept_status = (apic_read(APIC_ESR) & 0xEF); |
cfc1b9a6 | 603 | pr_debug("NMI sent.\n"); |
cb3c8b90 GOC |
604 | |
605 | if (send_status) | |
606 | printk(KERN_ERR "APIC never delivered???\n"); | |
607 | if (accept_status) | |
608 | printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); | |
609 | ||
610 | return (send_status | accept_status); | |
611 | } | |
612 | #endif /* WAKE_SECONDARY_VIA_NMI */ | |
613 | ||
cb3c8b90 GOC |
614 | #ifdef WAKE_SECONDARY_VIA_INIT |
615 | static int __devinit | |
616 | wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |
617 | { | |
618 | unsigned long send_status, accept_status = 0; | |
619 | int maxlvt, num_starts, j; | |
620 | ||
34d05591 JS |
621 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) { |
622 | send_status = uv_wakeup_secondary(phys_apicid, start_eip); | |
623 | atomic_set(&init_deasserted, 1); | |
624 | return send_status; | |
625 | } | |
626 | ||
593f4a78 MR |
627 | maxlvt = lapic_get_maxlvt(); |
628 | ||
cb3c8b90 GOC |
629 | /* |
630 | * Be paranoid about clearing APIC errors. | |
631 | */ | |
632 | if (APIC_INTEGRATED(apic_version[phys_apicid])) { | |
593f4a78 MR |
633 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
634 | apic_write(APIC_ESR, 0); | |
cb3c8b90 GOC |
635 | apic_read(APIC_ESR); |
636 | } | |
637 | ||
cfc1b9a6 | 638 | pr_debug("Asserting INIT.\n"); |
cb3c8b90 GOC |
639 | |
640 | /* | |
641 | * Turn INIT on target chip | |
642 | */ | |
593f4a78 | 643 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); |
cb3c8b90 GOC |
644 | |
645 | /* | |
646 | * Send IPI | |
647 | */ | |
593f4a78 MR |
648 | apic_write(APIC_ICR, |
649 | APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT); | |
cb3c8b90 | 650 | |
cfc1b9a6 | 651 | pr_debug("Waiting for send to finish...\n"); |
cb3c8b90 GOC |
652 | send_status = safe_apic_wait_icr_idle(); |
653 | ||
654 | mdelay(10); | |
655 | ||
cfc1b9a6 | 656 | pr_debug("Deasserting INIT.\n"); |
cb3c8b90 GOC |
657 | |
658 | /* Target chip */ | |
593f4a78 | 659 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); |
cb3c8b90 GOC |
660 | |
661 | /* Send IPI */ | |
593f4a78 | 662 | apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); |
cb3c8b90 | 663 | |
cfc1b9a6 | 664 | pr_debug("Waiting for send to finish...\n"); |
cb3c8b90 GOC |
665 | send_status = safe_apic_wait_icr_idle(); |
666 | ||
667 | mb(); | |
668 | atomic_set(&init_deasserted, 1); | |
669 | ||
670 | /* | |
671 | * Should we send STARTUP IPIs ? | |
672 | * | |
673 | * Determine this based on the APIC version. | |
674 | * If we don't have an integrated APIC, don't send the STARTUP IPIs. | |
675 | */ | |
676 | if (APIC_INTEGRATED(apic_version[phys_apicid])) | |
677 | num_starts = 2; | |
678 | else | |
679 | num_starts = 0; | |
680 | ||
681 | /* | |
682 | * Paravirt / VMI wants a startup IPI hook here to set up the | |
683 | * target processor state. | |
684 | */ | |
685 | startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, | |
cb3c8b90 | 686 | (unsigned long)stack_start.sp); |
cb3c8b90 GOC |
687 | |
688 | /* | |
689 | * Run STARTUP IPI loop. | |
690 | */ | |
cfc1b9a6 | 691 | pr_debug("#startup loops: %d.\n", num_starts); |
cb3c8b90 | 692 | |
cb3c8b90 | 693 | for (j = 1; j <= num_starts; j++) { |
cfc1b9a6 | 694 | pr_debug("Sending STARTUP #%d.\n", j); |
593f4a78 MR |
695 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
696 | apic_write(APIC_ESR, 0); | |
cb3c8b90 | 697 | apic_read(APIC_ESR); |
cfc1b9a6 | 698 | pr_debug("After apic_write.\n"); |
cb3c8b90 GOC |
699 | |
700 | /* | |
701 | * STARTUP IPI | |
702 | */ | |
703 | ||
704 | /* Target chip */ | |
593f4a78 | 705 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); |
cb3c8b90 GOC |
706 | |
707 | /* Boot on the stack */ | |
708 | /* Kick the second */ | |
593f4a78 | 709 | apic_write(APIC_ICR, APIC_DM_STARTUP | (start_eip >> 12)); |
cb3c8b90 GOC |
710 | |
711 | /* | |
712 | * Give the other CPU some time to accept the IPI. | |
713 | */ | |
714 | udelay(300); | |
715 | ||
cfc1b9a6 | 716 | pr_debug("Startup point 1.\n"); |
cb3c8b90 | 717 | |
cfc1b9a6 | 718 | pr_debug("Waiting for send to finish...\n"); |
cb3c8b90 GOC |
719 | send_status = safe_apic_wait_icr_idle(); |
720 | ||
721 | /* | |
722 | * Give the other CPU some time to accept the IPI. | |
723 | */ | |
724 | udelay(200); | |
593f4a78 | 725 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
cb3c8b90 | 726 | apic_write(APIC_ESR, 0); |
cb3c8b90 GOC |
727 | accept_status = (apic_read(APIC_ESR) & 0xEF); |
728 | if (send_status || accept_status) | |
729 | break; | |
730 | } | |
cfc1b9a6 | 731 | pr_debug("After Startup.\n"); |
cb3c8b90 GOC |
732 | |
733 | if (send_status) | |
734 | printk(KERN_ERR "APIC never delivered???\n"); | |
735 | if (accept_status) | |
736 | printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); | |
737 | ||
738 | return (send_status | accept_status); | |
739 | } | |
740 | #endif /* WAKE_SECONDARY_VIA_INIT */ | |
741 | ||
742 | struct create_idle { | |
743 | struct work_struct work; | |
744 | struct task_struct *idle; | |
745 | struct completion done; | |
746 | int cpu; | |
747 | }; | |
748 | ||
749 | static void __cpuinit do_fork_idle(struct work_struct *work) | |
750 | { | |
751 | struct create_idle *c_idle = | |
752 | container_of(work, struct create_idle, work); | |
753 | ||
754 | c_idle->idle = fork_idle(c_idle->cpu); | |
755 | complete(&c_idle->done); | |
756 | } | |
757 | ||
f307d25e | 758 | #ifdef CONFIG_X86_64 |
d19fbfdf MS |
759 | |
760 | /* __ref because it's safe to call free_bootmem when after_bootmem == 0. */ | |
761 | static void __ref free_bootmem_pda(struct x8664_pda *oldpda) | |
762 | { | |
763 | if (!after_bootmem) | |
764 | free_bootmem((unsigned long)oldpda, sizeof(*oldpda)); | |
765 | } | |
766 | ||
3461b0af MT |
767 | /* |
768 | * Allocate node local memory for the AP pda. | |
769 | * | |
770 | * Must be called after the _cpu_pda pointer table is initialized. | |
771 | */ | |
7c33b1e6 | 772 | int __cpuinit get_local_pda(int cpu) |
3461b0af MT |
773 | { |
774 | struct x8664_pda *oldpda, *newpda; | |
775 | unsigned long size = sizeof(struct x8664_pda); | |
776 | int node = cpu_to_node(cpu); | |
777 | ||
778 | if (cpu_pda(cpu) && !cpu_pda(cpu)->in_bootmem) | |
779 | return 0; | |
780 | ||
781 | oldpda = cpu_pda(cpu); | |
782 | newpda = kmalloc_node(size, GFP_ATOMIC, node); | |
783 | if (!newpda) { | |
784 | printk(KERN_ERR "Could not allocate node local PDA " | |
785 | "for CPU %d on node %d\n", cpu, node); | |
786 | ||
787 | if (oldpda) | |
788 | return 0; /* have a usable pda */ | |
789 | else | |
790 | return -1; | |
791 | } | |
792 | ||
793 | if (oldpda) { | |
794 | memcpy(newpda, oldpda, size); | |
d19fbfdf | 795 | free_bootmem_pda(oldpda); |
3461b0af MT |
796 | } |
797 | ||
798 | newpda->in_bootmem = 0; | |
799 | cpu_pda(cpu) = newpda; | |
800 | return 0; | |
801 | } | |
f307d25e | 802 | #endif /* CONFIG_X86_64 */ |
3461b0af | 803 | |
cb3c8b90 GOC |
804 | static int __cpuinit do_boot_cpu(int apicid, int cpu) |
805 | /* | |
806 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad | |
807 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. | |
808 | * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. | |
809 | */ | |
810 | { | |
811 | unsigned long boot_error = 0; | |
812 | int timeout; | |
813 | unsigned long start_ip; | |
814 | unsigned short nmi_high = 0, nmi_low = 0; | |
815 | struct create_idle c_idle = { | |
816 | .cpu = cpu, | |
817 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | |
818 | }; | |
819 | INIT_WORK(&c_idle.work, do_fork_idle); | |
cb3c8b90 | 820 | |
a939098a | 821 | #ifdef CONFIG_X86_64 |
cb3c8b90 | 822 | /* Allocate node local memory for AP pdas */ |
3461b0af MT |
823 | if (cpu > 0) { |
824 | boot_error = get_local_pda(cpu); | |
825 | if (boot_error) | |
826 | goto restore_state; | |
827 | /* if can't get pda memory, can't start cpu */ | |
cb3c8b90 GOC |
828 | } |
829 | #endif | |
830 | ||
831 | alternatives_smp_switch(1); | |
832 | ||
833 | c_idle.idle = get_idle_for_cpu(cpu); | |
834 | ||
835 | /* | |
836 | * We can't use kernel_thread since we must avoid to | |
837 | * reschedule the child. | |
838 | */ | |
839 | if (c_idle.idle) { | |
840 | c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *) | |
841 | (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); | |
842 | init_idle(c_idle.idle, cpu); | |
843 | goto do_rest; | |
844 | } | |
845 | ||
846 | if (!keventd_up() || current_is_keventd()) | |
847 | c_idle.work.func(&c_idle.work); | |
848 | else { | |
849 | schedule_work(&c_idle.work); | |
850 | wait_for_completion(&c_idle.done); | |
851 | } | |
852 | ||
853 | if (IS_ERR(c_idle.idle)) { | |
854 | printk("failed fork for CPU %d\n", cpu); | |
855 | return PTR_ERR(c_idle.idle); | |
856 | } | |
857 | ||
858 | set_idle_for_cpu(cpu, c_idle.idle); | |
859 | do_rest: | |
860 | #ifdef CONFIG_X86_32 | |
861 | per_cpu(current_task, cpu) = c_idle.idle; | |
862 | init_gdt(cpu); | |
cb3c8b90 | 863 | /* Stack for startup_32 can be just as for start_secondary onwards */ |
cb3c8b90 GOC |
864 | irq_ctx_init(cpu); |
865 | #else | |
866 | cpu_pda(cpu)->pcurrent = c_idle.idle; | |
cb3c8b90 GOC |
867 | clear_tsk_thread_flag(c_idle.idle, TIF_FORK); |
868 | #endif | |
a939098a | 869 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
3e970473 | 870 | initial_code = (unsigned long)start_secondary; |
9cf4f298 | 871 | stack_start.sp = (void *) c_idle.idle->thread.sp; |
cb3c8b90 GOC |
872 | |
873 | /* start_ip had better be page-aligned! */ | |
874 | start_ip = setup_trampoline(); | |
875 | ||
876 | /* So we see what's up */ | |
877 | printk(KERN_INFO "Booting processor %d/%d ip %lx\n", | |
878 | cpu, apicid, start_ip); | |
879 | ||
880 | /* | |
881 | * This grunge runs the startup process for | |
882 | * the targeted processor. | |
883 | */ | |
884 | ||
885 | atomic_set(&init_deasserted, 0); | |
886 | ||
34d05591 | 887 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { |
cb3c8b90 | 888 | |
cfc1b9a6 | 889 | pr_debug("Setting warm reset code and vector.\n"); |
cb3c8b90 | 890 | |
34d05591 JS |
891 | store_NMI_vector(&nmi_high, &nmi_low); |
892 | ||
893 | smpboot_setup_warm_reset_vector(start_ip); | |
894 | /* | |
895 | * Be paranoid about clearing APIC errors. | |
896 | */ | |
897 | apic_write(APIC_ESR, 0); | |
898 | apic_read(APIC_ESR); | |
899 | } | |
cb3c8b90 | 900 | |
cb3c8b90 GOC |
901 | /* |
902 | * Starting actual IPI sequence... | |
903 | */ | |
904 | boot_error = wakeup_secondary_cpu(apicid, start_ip); | |
905 | ||
906 | if (!boot_error) { | |
907 | /* | |
908 | * allow APs to start initializing. | |
909 | */ | |
cfc1b9a6 | 910 | pr_debug("Before Callout %d.\n", cpu); |
cb3c8b90 | 911 | cpu_set(cpu, cpu_callout_map); |
cfc1b9a6 | 912 | pr_debug("After Callout %d.\n", cpu); |
cb3c8b90 GOC |
913 | |
914 | /* | |
915 | * Wait 5s total for a response | |
916 | */ | |
917 | for (timeout = 0; timeout < 50000; timeout++) { | |
918 | if (cpu_isset(cpu, cpu_callin_map)) | |
919 | break; /* It has booted */ | |
920 | udelay(100); | |
921 | } | |
922 | ||
923 | if (cpu_isset(cpu, cpu_callin_map)) { | |
924 | /* number CPUs logically, starting from 1 (BSP is 0) */ | |
cfc1b9a6 | 925 | pr_debug("OK.\n"); |
cb3c8b90 GOC |
926 | printk(KERN_INFO "CPU%d: ", cpu); |
927 | print_cpu_info(&cpu_data(cpu)); | |
cfc1b9a6 | 928 | pr_debug("CPU has booted.\n"); |
cb3c8b90 GOC |
929 | } else { |
930 | boot_error = 1; | |
931 | if (*((volatile unsigned char *)trampoline_base) | |
932 | == 0xA5) | |
933 | /* trampoline started but...? */ | |
934 | printk(KERN_ERR "Stuck ??\n"); | |
935 | else | |
936 | /* trampoline code not run */ | |
937 | printk(KERN_ERR "Not responding.\n"); | |
34d05591 JS |
938 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) |
939 | inquire_remote_apic(apicid); | |
cb3c8b90 GOC |
940 | } |
941 | } | |
6f585e01 | 942 | #ifdef CONFIG_X86_64 |
3461b0af | 943 | restore_state: |
6f585e01 | 944 | #endif |
cb3c8b90 GOC |
945 | if (boot_error) { |
946 | /* Try to put things back the way they were before ... */ | |
23ca4bba | 947 | numa_remove_cpu(cpu); /* was set by numa_add_cpu */ |
cb3c8b90 GOC |
948 | cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */ |
949 | cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ | |
cb3c8b90 GOC |
950 | cpu_clear(cpu, cpu_present_map); |
951 | per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; | |
952 | } | |
953 | ||
954 | /* mark "stuck" area as not stuck */ | |
955 | *((volatile unsigned long *)trampoline_base) = 0; | |
956 | ||
63d38198 AK |
957 | /* |
958 | * Cleanup possible dangling ends... | |
959 | */ | |
960 | smpboot_restore_warm_reset_vector(); | |
961 | ||
cb3c8b90 GOC |
962 | return boot_error; |
963 | } | |
964 | ||
965 | int __cpuinit native_cpu_up(unsigned int cpu) | |
966 | { | |
967 | int apicid = cpu_present_to_apicid(cpu); | |
968 | unsigned long flags; | |
969 | int err; | |
970 | ||
971 | WARN_ON(irqs_disabled()); | |
972 | ||
cfc1b9a6 | 973 | pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu); |
cb3c8b90 GOC |
974 | |
975 | if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid || | |
976 | !physid_isset(apicid, phys_cpu_present_map)) { | |
977 | printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu); | |
978 | return -EINVAL; | |
979 | } | |
980 | ||
981 | /* | |
982 | * Already booted CPU? | |
983 | */ | |
984 | if (cpu_isset(cpu, cpu_callin_map)) { | |
cfc1b9a6 | 985 | pr_debug("do_boot_cpu %d Already started\n", cpu); |
cb3c8b90 GOC |
986 | return -ENOSYS; |
987 | } | |
988 | ||
989 | /* | |
990 | * Save current MTRR state in case it was changed since early boot | |
991 | * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: | |
992 | */ | |
993 | mtrr_save_state(); | |
994 | ||
995 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | |
996 | ||
997 | #ifdef CONFIG_X86_32 | |
998 | /* init low mem mapping */ | |
68db065c | 999 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, |
61165d7a | 1000 | min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); |
cb3c8b90 | 1001 | flush_tlb_all(); |
61165d7a | 1002 | low_mappings = 1; |
cb3c8b90 GOC |
1003 | |
1004 | err = do_boot_cpu(apicid, cpu); | |
61165d7a HD |
1005 | |
1006 | zap_low_mappings(); | |
1007 | low_mappings = 0; | |
1008 | #else | |
1009 | err = do_boot_cpu(apicid, cpu); | |
1010 | #endif | |
1011 | if (err) { | |
cfc1b9a6 | 1012 | pr_debug("do_boot_cpu failed %d\n", err); |
61165d7a | 1013 | return -EIO; |
cb3c8b90 GOC |
1014 | } |
1015 | ||
1016 | /* | |
1017 | * Check TSC synchronization with the AP (keep irqs disabled | |
1018 | * while doing so): | |
1019 | */ | |
1020 | local_irq_save(flags); | |
1021 | check_tsc_sync_source(cpu); | |
1022 | local_irq_restore(flags); | |
1023 | ||
7c04e64a | 1024 | while (!cpu_online(cpu)) { |
cb3c8b90 GOC |
1025 | cpu_relax(); |
1026 | touch_nmi_watchdog(); | |
1027 | } | |
1028 | ||
1029 | return 0; | |
1030 | } | |
1031 | ||
8aef135c GOC |
1032 | /* |
1033 | * Fall back to non SMP mode after errors. | |
1034 | * | |
1035 | * RED-PEN audit/test this more. I bet there is more state messed up here. | |
1036 | */ | |
1037 | static __init void disable_smp(void) | |
1038 | { | |
1039 | cpu_present_map = cpumask_of_cpu(0); | |
1040 | cpu_possible_map = cpumask_of_cpu(0); | |
8aef135c | 1041 | smpboot_clear_io_apic_irqs(); |
0f385d1d | 1042 | |
8aef135c | 1043 | if (smp_found_config) |
b6df1b8b | 1044 | physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); |
8aef135c | 1045 | else |
b6df1b8b | 1046 | physid_set_mask_of_physid(0, &phys_cpu_present_map); |
8aef135c GOC |
1047 | map_cpu_to_logical_apicid(); |
1048 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); | |
1049 | cpu_set(0, per_cpu(cpu_core_map, 0)); | |
1050 | } | |
1051 | ||
1052 | /* | |
1053 | * Various sanity checks. | |
1054 | */ | |
1055 | static int __init smp_sanity_check(unsigned max_cpus) | |
1056 | { | |
ac23d4ee | 1057 | preempt_disable(); |
a58f03b0 YL |
1058 | |
1059 | #if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32) | |
1060 | if (def_to_bigsmp && nr_cpu_ids > 8) { | |
1061 | unsigned int cpu; | |
1062 | unsigned nr; | |
1063 | ||
1064 | printk(KERN_WARNING | |
1065 | "More than 8 CPUs detected - skipping them.\n" | |
1066 | "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n"); | |
1067 | ||
1068 | nr = 0; | |
1069 | for_each_present_cpu(cpu) { | |
1070 | if (nr >= 8) | |
1071 | cpu_clear(cpu, cpu_present_map); | |
1072 | nr++; | |
1073 | } | |
1074 | ||
1075 | nr = 0; | |
1076 | for_each_possible_cpu(cpu) { | |
1077 | if (nr >= 8) | |
1078 | cpu_clear(cpu, cpu_possible_map); | |
1079 | nr++; | |
1080 | } | |
1081 | ||
1082 | nr_cpu_ids = 8; | |
1083 | } | |
1084 | #endif | |
1085 | ||
8aef135c GOC |
1086 | if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { |
1087 | printk(KERN_WARNING "weird, boot CPU (#%d) not listed" | |
1088 | "by the BIOS.\n", hard_smp_processor_id()); | |
1089 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); | |
1090 | } | |
1091 | ||
1092 | /* | |
1093 | * If we couldn't find an SMP configuration at boot time, | |
1094 | * get out of here now! | |
1095 | */ | |
1096 | if (!smp_found_config && !acpi_lapic) { | |
ac23d4ee | 1097 | preempt_enable(); |
8aef135c GOC |
1098 | printk(KERN_NOTICE "SMP motherboard not detected.\n"); |
1099 | disable_smp(); | |
1100 | if (APIC_init_uniprocessor()) | |
1101 | printk(KERN_NOTICE "Local APIC not detected." | |
1102 | " Using dummy APIC emulation.\n"); | |
1103 | return -1; | |
1104 | } | |
1105 | ||
1106 | /* | |
1107 | * Should not be necessary because the MP table should list the boot | |
1108 | * CPU too, but we do it for the sake of robustness anyway. | |
1109 | */ | |
1110 | if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { | |
1111 | printk(KERN_NOTICE | |
1112 | "weird, boot CPU (#%d) not listed by the BIOS.\n", | |
1113 | boot_cpu_physical_apicid); | |
1114 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); | |
1115 | } | |
ac23d4ee | 1116 | preempt_enable(); |
8aef135c GOC |
1117 | |
1118 | /* | |
1119 | * If we couldn't find a local APIC, then get out of here now! | |
1120 | */ | |
1121 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && | |
1122 | !cpu_has_apic) { | |
1123 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", | |
1124 | boot_cpu_physical_apicid); | |
1125 | printk(KERN_ERR "... forcing use of dummy APIC emulation." | |
1126 | "(tell your hw vendor)\n"); | |
1127 | smpboot_clear_io_apic(); | |
1128 | return -1; | |
1129 | } | |
1130 | ||
1131 | verify_local_APIC(); | |
1132 | ||
1133 | /* | |
1134 | * If SMP should be disabled, then really disable it! | |
1135 | */ | |
1136 | if (!max_cpus) { | |
73d08e63 | 1137 | printk(KERN_INFO "SMP mode deactivated.\n"); |
8aef135c | 1138 | smpboot_clear_io_apic(); |
d54db1ac MR |
1139 | |
1140 | localise_nmi_watchdog(); | |
1141 | ||
e90955c2 | 1142 | connect_bsp_APIC(); |
e90955c2 JB |
1143 | setup_local_APIC(); |
1144 | end_local_APIC_setup(); | |
8aef135c GOC |
1145 | return -1; |
1146 | } | |
1147 | ||
1148 | return 0; | |
1149 | } | |
1150 | ||
1151 | static void __init smp_cpu_index_default(void) | |
1152 | { | |
1153 | int i; | |
1154 | struct cpuinfo_x86 *c; | |
1155 | ||
7c04e64a | 1156 | for_each_possible_cpu(i) { |
8aef135c GOC |
1157 | c = &cpu_data(i); |
1158 | /* mark all to hotplug */ | |
1159 | c->cpu_index = NR_CPUS; | |
1160 | } | |
1161 | } | |
1162 | ||
1163 | /* | |
1164 | * Prepare for SMP bootup. The MP table or ACPI has been read | |
1165 | * earlier. Just do some sanity checking here and enable APIC mode. | |
1166 | */ | |
1167 | void __init native_smp_prepare_cpus(unsigned int max_cpus) | |
1168 | { | |
deef3250 | 1169 | preempt_disable(); |
8aef135c GOC |
1170 | smp_cpu_index_default(); |
1171 | current_cpu_data = boot_cpu_data; | |
1172 | cpu_callin_map = cpumask_of_cpu(0); | |
1173 | mb(); | |
1174 | /* | |
1175 | * Setup boot CPU information | |
1176 | */ | |
1177 | smp_store_cpu_info(0); /* Final full version of the data */ | |
1178 | boot_cpu_logical_apicid = logical_smp_processor_id(); | |
1179 | current_thread_info()->cpu = 0; /* needed? */ | |
1180 | set_cpu_sibling_map(0); | |
1181 | ||
1182 | if (smp_sanity_check(max_cpus) < 0) { | |
1183 | printk(KERN_INFO "SMP disabled\n"); | |
1184 | disable_smp(); | |
deef3250 | 1185 | goto out; |
8aef135c GOC |
1186 | } |
1187 | ||
ac23d4ee | 1188 | preempt_disable(); |
05f2d12c | 1189 | if (GET_APIC_ID(read_apic_id()) != boot_cpu_physical_apicid) { |
8aef135c | 1190 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", |
05f2d12c | 1191 | GET_APIC_ID(read_apic_id()), boot_cpu_physical_apicid); |
8aef135c GOC |
1192 | /* Or can we switch back to PIC here? */ |
1193 | } | |
ac23d4ee | 1194 | preempt_enable(); |
8aef135c | 1195 | |
8aef135c | 1196 | connect_bsp_APIC(); |
b5841765 | 1197 | |
8aef135c GOC |
1198 | /* |
1199 | * Switch from PIC to APIC mode. | |
1200 | */ | |
1201 | setup_local_APIC(); | |
1202 | ||
1203 | #ifdef CONFIG_X86_64 | |
1204 | /* | |
1205 | * Enable IO APIC before setting up error vector | |
1206 | */ | |
1207 | if (!skip_ioapic_setup && nr_ioapics) | |
1208 | enable_IO_APIC(); | |
1209 | #endif | |
1210 | end_local_APIC_setup(); | |
1211 | ||
1212 | map_cpu_to_logical_apicid(); | |
1213 | ||
1214 | setup_portio_remap(); | |
1215 | ||
1216 | smpboot_setup_io_apic(); | |
1217 | /* | |
1218 | * Set up local APIC timer on boot CPU. | |
1219 | */ | |
1220 | ||
1221 | printk(KERN_INFO "CPU%d: ", 0); | |
1222 | print_cpu_info(&cpu_data(0)); | |
1223 | setup_boot_clock(); | |
c4bd1fda MS |
1224 | |
1225 | if (is_uv_system()) | |
1226 | uv_system_init(); | |
deef3250 IM |
1227 | out: |
1228 | preempt_enable(); | |
8aef135c | 1229 | } |
a8db8453 GOC |
1230 | /* |
1231 | * Early setup to make printk work. | |
1232 | */ | |
1233 | void __init native_smp_prepare_boot_cpu(void) | |
1234 | { | |
1235 | int me = smp_processor_id(); | |
1236 | #ifdef CONFIG_X86_32 | |
1237 | init_gdt(me); | |
a8db8453 | 1238 | #endif |
a939098a | 1239 | switch_to_new_gdt(); |
a8db8453 GOC |
1240 | /* already set me in cpu_online_map in boot_cpu_init() */ |
1241 | cpu_set(me, cpu_callout_map); | |
1242 | per_cpu(cpu_state, me) = CPU_ONLINE; | |
1243 | } | |
1244 | ||
83f7eb9c GOC |
1245 | void __init native_smp_cpus_done(unsigned int max_cpus) |
1246 | { | |
cfc1b9a6 | 1247 | pr_debug("Boot done.\n"); |
83f7eb9c GOC |
1248 | |
1249 | impress_friends(); | |
1250 | smp_checks(); | |
1251 | #ifdef CONFIG_X86_IO_APIC | |
1252 | setup_ioapic_dest(); | |
1253 | #endif | |
1254 | check_nmi_watchdog(); | |
83f7eb9c GOC |
1255 | } |
1256 | ||
68a1c3f8 | 1257 | #ifdef CONFIG_HOTPLUG_CPU |
2cd9fb71 | 1258 | |
a4928cff | 1259 | static void remove_siblinginfo(int cpu) |
768d9505 GC |
1260 | { |
1261 | int sibling; | |
1262 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
1263 | ||
334ef7a7 | 1264 | for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) { |
768d9505 GC |
1265 | cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); |
1266 | /*/ | |
1267 | * last thread sibling in this cpu core going down | |
1268 | */ | |
1269 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) | |
1270 | cpu_data(sibling).booted_cores--; | |
1271 | } | |
1272 | ||
334ef7a7 | 1273 | for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu)) |
768d9505 GC |
1274 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); |
1275 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | |
1276 | cpus_clear(per_cpu(cpu_core_map, cpu)); | |
1277 | c->phys_proc_id = 0; | |
1278 | c->cpu_core_id = 0; | |
1279 | cpu_clear(cpu, cpu_sibling_setup_map); | |
1280 | } | |
68a1c3f8 | 1281 | |
c5562fae | 1282 | static int additional_cpus __initdata = -1; |
68a1c3f8 GC |
1283 | |
1284 | static __init int setup_additional_cpus(char *s) | |
1285 | { | |
1286 | return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL; | |
1287 | } | |
1288 | early_param("additional_cpus", setup_additional_cpus); | |
1289 | ||
1290 | /* | |
1291 | * cpu_possible_map should be static, it cannot change as cpu's | |
1292 | * are onlined, or offlined. The reason is per-cpu data-structures | |
1293 | * are allocated by some modules at init time, and dont expect to | |
1294 | * do this dynamically on cpu arrival/departure. | |
1295 | * cpu_present_map on the other hand can change dynamically. | |
1296 | * In case when cpu_hotplug is not compiled, then we resort to current | |
1297 | * behaviour, which is cpu_possible == cpu_present. | |
1298 | * - Ashok Raj | |
1299 | * | |
1300 | * Three ways to find out the number of additional hotplug CPUs: | |
1301 | * - If the BIOS specified disabled CPUs in ACPI/mptables use that. | |
1302 | * - The user can overwrite it with additional_cpus=NUM | |
1303 | * - Otherwise don't reserve additional CPUs. | |
1304 | * We do this because additional CPUs waste a lot of memory. | |
1305 | * -AK | |
1306 | */ | |
1307 | __init void prefill_possible_map(void) | |
1308 | { | |
1309 | int i; | |
1310 | int possible; | |
1311 | ||
329513a3 YL |
1312 | /* no processor from mptable or madt */ |
1313 | if (!num_processors) | |
1314 | num_processors = 1; | |
1315 | ||
1316 | #ifdef CONFIG_HOTPLUG_CPU | |
68a1c3f8 GC |
1317 | if (additional_cpus == -1) { |
1318 | if (disabled_cpus > 0) | |
1319 | additional_cpus = disabled_cpus; | |
1320 | else | |
1321 | additional_cpus = 0; | |
1322 | } | |
329513a3 YL |
1323 | #else |
1324 | additional_cpus = 0; | |
1325 | #endif | |
68a1c3f8 GC |
1326 | possible = num_processors + additional_cpus; |
1327 | if (possible > NR_CPUS) | |
1328 | possible = NR_CPUS; | |
1329 | ||
1330 | printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", | |
1331 | possible, max_t(int, possible - num_processors, 0)); | |
1332 | ||
1333 | for (i = 0; i < possible; i++) | |
1334 | cpu_set(i, cpu_possible_map); | |
3461b0af MT |
1335 | |
1336 | nr_cpu_ids = possible; | |
68a1c3f8 | 1337 | } |
69c18c15 GC |
1338 | |
1339 | static void __ref remove_cpu_from_maps(int cpu) | |
1340 | { | |
1341 | cpu_clear(cpu, cpu_online_map); | |
69c18c15 GC |
1342 | cpu_clear(cpu, cpu_callout_map); |
1343 | cpu_clear(cpu, cpu_callin_map); | |
1344 | /* was set by cpu_init() */ | |
29cbeb0e | 1345 | cpu_clear(cpu, cpu_initialized); |
23ca4bba | 1346 | numa_remove_cpu(cpu); |
69c18c15 GC |
1347 | } |
1348 | ||
93be71b6 | 1349 | int native_cpu_disable(void) |
69c18c15 GC |
1350 | { |
1351 | int cpu = smp_processor_id(); | |
1352 | ||
1353 | /* | |
1354 | * Perhaps use cpufreq to drop frequency, but that could go | |
1355 | * into generic code. | |
1356 | * | |
1357 | * We won't take down the boot processor on i386 due to some | |
1358 | * interrupts only being able to be serviced by the BSP. | |
1359 | * Especially so if we're not using an IOAPIC -zwane | |
1360 | */ | |
1361 | if (cpu == 0) | |
1362 | return -EBUSY; | |
1363 | ||
1364 | if (nmi_watchdog == NMI_LOCAL_APIC) | |
1365 | stop_apic_nmi_watchdog(NULL); | |
1366 | clear_local_APIC(); | |
1367 | ||
1368 | /* | |
1369 | * HACK: | |
1370 | * Allow any queued timer interrupts to get serviced | |
1371 | * This is only a temporary solution until we cleanup | |
1372 | * fixup_irqs as we do for IA64. | |
1373 | */ | |
1374 | local_irq_enable(); | |
1375 | mdelay(1); | |
1376 | ||
1377 | local_irq_disable(); | |
1378 | remove_siblinginfo(cpu); | |
1379 | ||
1380 | /* It's now safe to remove this processor from the online map */ | |
d388e5fd | 1381 | lock_vector_lock(); |
69c18c15 | 1382 | remove_cpu_from_maps(cpu); |
d388e5fd | 1383 | unlock_vector_lock(); |
69c18c15 GC |
1384 | fixup_irqs(cpu_online_map); |
1385 | return 0; | |
1386 | } | |
1387 | ||
93be71b6 | 1388 | void native_cpu_die(unsigned int cpu) |
69c18c15 GC |
1389 | { |
1390 | /* We don't do anything here: idle task is faking death itself. */ | |
1391 | unsigned int i; | |
1392 | ||
1393 | for (i = 0; i < 10; i++) { | |
1394 | /* They ack this in play_dead by setting CPU_DEAD */ | |
1395 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | |
1396 | printk(KERN_INFO "CPU %d is now offline\n", cpu); | |
1397 | if (1 == num_online_cpus()) | |
1398 | alternatives_smp_switch(0); | |
1399 | return; | |
1400 | } | |
1401 | msleep(100); | |
1402 | } | |
1403 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | |
1404 | } | |
1405 | #else /* ... !CONFIG_HOTPLUG_CPU */ | |
93be71b6 | 1406 | int native_cpu_disable(void) |
69c18c15 GC |
1407 | { |
1408 | return -ENOSYS; | |
1409 | } | |
1410 | ||
93be71b6 | 1411 | void native_cpu_die(unsigned int cpu) |
69c18c15 GC |
1412 | { |
1413 | /* We said "no" in __cpu_disable */ | |
1414 | BUG(); | |
1415 | } | |
68a1c3f8 | 1416 | #endif |