Commit | Line | Data |
---|---|---|
c767a54b | 1 | /* |
4cedb334 GOC |
2 | * x86 SMP booting functions |
3 | * | |
87c6fe26 | 4 | * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> |
8f47e163 | 5 | * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com> |
4cedb334 GOC |
6 | * Copyright 2001 Andi Kleen, SuSE Labs. |
7 | * | |
8 | * Much of the core SMP work is based on previous work by Thomas Radke, to | |
9 | * whom a great many thanks are extended. | |
10 | * | |
11 | * Thanks to Intel for making available several different Pentium, | |
12 | * Pentium Pro and Pentium-II/Xeon MP machines. | |
13 | * Original development of Linux SMP code supported by Caldera. | |
14 | * | |
15 | * This code is released under the GNU General Public License version 2 or | |
16 | * later. | |
17 | * | |
18 | * Fixes | |
19 | * Felix Koop : NR_CPUS used properly | |
20 | * Jose Renau : Handle single CPU case. | |
21 | * Alan Cox : By repeated request 8) - Total BogoMIPS report. | |
22 | * Greg Wright : Fix for kernel stacks panic. | |
23 | * Erich Boleyn : MP v1.4 and additional changes. | |
24 | * Matthias Sattler : Changes for 2.1 kernel map. | |
25 | * Michel Lespinasse : Changes for 2.1 kernel map. | |
26 | * Michael Chastain : Change trampoline.S to gnu as. | |
27 | * Alan Cox : Dumb bug: 'B' step PPro's are fine | |
28 | * Ingo Molnar : Added APIC timers, based on code | |
29 | * from Jose Renau | |
30 | * Ingo Molnar : various cleanups and rewrites | |
31 | * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. | |
32 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs | |
33 | * Andi Kleen : Changed for SMP boot into long mode. | |
34 | * Martin J. Bligh : Added support for multi-quad systems | |
35 | * Dave Jones : Report invalid combinations of Athlon CPUs. | |
36 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. | |
37 | * Andi Kleen : Converted to new state machine. | |
38 | * Ashok Raj : CPU hotplug support | |
39 | * Glauber Costa : i386 and x86_64 integration | |
40 | */ | |
41 | ||
c767a54b JP |
42 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
43 | ||
68a1c3f8 GC |
44 | #include <linux/init.h> |
45 | #include <linux/smp.h> | |
a355352b | 46 | #include <linux/module.h> |
70708a18 | 47 | #include <linux/sched.h> |
69c18c15 | 48 | #include <linux/percpu.h> |
91718e8d | 49 | #include <linux/bootmem.h> |
cb3c8b90 GOC |
50 | #include <linux/err.h> |
51 | #include <linux/nmi.h> | |
69575d38 | 52 | #include <linux/tboot.h> |
35f720c5 | 53 | #include <linux/stackprotector.h> |
5a0e3ad6 | 54 | #include <linux/gfp.h> |
1a022e3f | 55 | #include <linux/cpuidle.h> |
69c18c15 | 56 | |
8aef135c | 57 | #include <asm/acpi.h> |
cb3c8b90 | 58 | #include <asm/desc.h> |
69c18c15 GC |
59 | #include <asm/nmi.h> |
60 | #include <asm/irq.h> | |
07bbc16a | 61 | #include <asm/idle.h> |
48927bbb | 62 | #include <asm/realmode.h> |
69c18c15 GC |
63 | #include <asm/cpu.h> |
64 | #include <asm/numa.h> | |
cb3c8b90 GOC |
65 | #include <asm/pgtable.h> |
66 | #include <asm/tlbflush.h> | |
67 | #include <asm/mtrr.h> | |
ea530692 | 68 | #include <asm/mwait.h> |
7b6aa335 | 69 | #include <asm/apic.h> |
7167d08e | 70 | #include <asm/io_apic.h> |
78f7f1e5 | 71 | #include <asm/fpu/internal.h> |
569712b2 | 72 | #include <asm/setup.h> |
bdbcdd48 | 73 | #include <asm/uv/uv.h> |
cb3c8b90 | 74 | #include <linux/mc146818rtc.h> |
b81bb373 | 75 | #include <asm/i8259.h> |
48927bbb | 76 | #include <asm/realmode.h> |
646e29a1 | 77 | #include <asm/misc.h> |
48927bbb | 78 | |
a355352b GC |
79 | /* Number of siblings per CPU package */ |
80 | int smp_num_siblings = 1; | |
81 | EXPORT_SYMBOL(smp_num_siblings); | |
82 | ||
83 | /* Last level cache ID of each logical CPU */ | |
0816b0f0 | 84 | DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; |
a355352b | 85 | |
a355352b | 86 | /* representing HT siblings of each logical CPU */ |
0816b0f0 | 87 | DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); |
a355352b GC |
88 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
89 | ||
90 | /* representing HT and core siblings of each logical CPU */ | |
0816b0f0 | 91 | DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); |
a355352b GC |
92 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
93 | ||
0816b0f0 | 94 | DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); |
b3d7336d | 95 | |
a355352b | 96 | /* Per CPU bogomips and other parameters */ |
2c773dd3 | 97 | DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); |
a355352b | 98 | EXPORT_PER_CPU_SYMBOL(cpu_info); |
768d9505 | 99 | |
f77aa308 TG |
100 | static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) |
101 | { | |
102 | unsigned long flags; | |
103 | ||
104 | spin_lock_irqsave(&rtc_lock, flags); | |
105 | CMOS_WRITE(0xa, 0xf); | |
106 | spin_unlock_irqrestore(&rtc_lock, flags); | |
107 | local_flush_tlb(); | |
108 | pr_debug("1.\n"); | |
109 | *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = | |
110 | start_eip >> 4; | |
111 | pr_debug("2.\n"); | |
112 | *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = | |
113 | start_eip & 0xf; | |
114 | pr_debug("3.\n"); | |
115 | } | |
116 | ||
117 | static inline void smpboot_restore_warm_reset_vector(void) | |
118 | { | |
119 | unsigned long flags; | |
120 | ||
121 | /* | |
122 | * Install writable page 0 entry to set BIOS data area. | |
123 | */ | |
124 | local_flush_tlb(); | |
125 | ||
126 | /* | |
127 | * Paranoid: Set warm reset code and vector here back | |
128 | * to default values. | |
129 | */ | |
130 | spin_lock_irqsave(&rtc_lock, flags); | |
131 | CMOS_WRITE(0, 0xf); | |
132 | spin_unlock_irqrestore(&rtc_lock, flags); | |
133 | ||
134 | *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0; | |
135 | } | |
136 | ||
cb3c8b90 | 137 | /* |
30106c17 FY |
138 | * Report back to the Boot Processor during boot time or to the caller processor |
139 | * during CPU online. | |
cb3c8b90 | 140 | */ |
148f9bb8 | 141 | static void smp_callin(void) |
cb3c8b90 GOC |
142 | { |
143 | int cpuid, phys_id; | |
cb3c8b90 GOC |
144 | |
145 | /* | |
146 | * If waken up by an INIT in an 82489DX configuration | |
656bba30 LB |
147 | * cpu_callout_mask guarantees we don't get here before |
148 | * an INIT_deassert IPI reaches our local APIC, so it is | |
149 | * now safe to touch our local APIC. | |
cb3c8b90 | 150 | */ |
e1c467e6 | 151 | cpuid = smp_processor_id(); |
cb3c8b90 GOC |
152 | |
153 | /* | |
154 | * (This works even if the APIC is not enabled.) | |
155 | */ | |
4c9961d5 | 156 | phys_id = read_apic_id(); |
cb3c8b90 GOC |
157 | |
158 | /* | |
159 | * the boot CPU has finished the init stage and is spinning | |
160 | * on callin_map until we finish. We are free to set up this | |
161 | * CPU, first the APIC. (this is probably redundant on most | |
162 | * boards) | |
163 | */ | |
05f7e46d | 164 | apic_ap_setup(); |
cb3c8b90 | 165 | |
b565201c JS |
166 | /* |
167 | * Save our processor parameters. Note: this information | |
168 | * is needed for clock calibration. | |
169 | */ | |
170 | smp_store_cpu_info(cpuid); | |
171 | ||
cb3c8b90 GOC |
172 | /* |
173 | * Get our bogomips. | |
b565201c JS |
174 | * Update loops_per_jiffy in cpu_data. Previous call to |
175 | * smp_store_cpu_info() stored a value that is close but not as | |
176 | * accurate as the value just calculated. | |
cb3c8b90 | 177 | */ |
cb3c8b90 | 178 | calibrate_delay(); |
b565201c | 179 | cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy; |
cfc1b9a6 | 180 | pr_debug("Stack at about %p\n", &cpuid); |
cb3c8b90 | 181 | |
5ef428c4 AK |
182 | /* |
183 | * This must be done before setting cpu_online_mask | |
184 | * or calling notify_cpu_starting. | |
185 | */ | |
186 | set_cpu_sibling_map(raw_smp_processor_id()); | |
187 | wmb(); | |
188 | ||
85257024 PZ |
189 | notify_cpu_starting(cpuid); |
190 | ||
cb3c8b90 GOC |
191 | /* |
192 | * Allow the master to continue. | |
193 | */ | |
c2d1cec1 | 194 | cpumask_set_cpu(cpuid, cpu_callin_mask); |
cb3c8b90 GOC |
195 | } |
196 | ||
e1c467e6 FY |
197 | static int cpu0_logical_apicid; |
198 | static int enable_start_cpu0; | |
bbc2ff6a GOC |
199 | /* |
200 | * Activate a secondary processor. | |
201 | */ | |
148f9bb8 | 202 | static void notrace start_secondary(void *unused) |
bbc2ff6a GOC |
203 | { |
204 | /* | |
205 | * Don't put *anything* before cpu_init(), SMP booting is too | |
206 | * fragile that we want to limit the things done here to the | |
207 | * most necessary things. | |
208 | */ | |
b40827fa | 209 | cpu_init(); |
df156f90 | 210 | x86_cpuinit.early_percpu_clock_init(); |
b40827fa BP |
211 | preempt_disable(); |
212 | smp_callin(); | |
fd89a137 | 213 | |
e1c467e6 FY |
214 | enable_start_cpu0 = 0; |
215 | ||
fd89a137 | 216 | #ifdef CONFIG_X86_32 |
b40827fa | 217 | /* switch away from the initial page table */ |
fd89a137 JR |
218 | load_cr3(swapper_pg_dir); |
219 | __flush_tlb_all(); | |
220 | #endif | |
221 | ||
bbc2ff6a GOC |
222 | /* otherwise gcc will move up smp_processor_id before the cpu_init */ |
223 | barrier(); | |
224 | /* | |
225 | * Check TSC synchronization with the BP: | |
226 | */ | |
227 | check_tsc_sync_target(); | |
228 | ||
bbc2ff6a | 229 | /* |
5a3f75e3 TG |
230 | * Lock vector_lock and initialize the vectors on this cpu |
231 | * before setting the cpu online. We must set it online with | |
232 | * vector_lock held to prevent a concurrent setup/teardown | |
233 | * from seeing a half valid vector space. | |
bbc2ff6a | 234 | */ |
d388e5fd | 235 | lock_vector_lock(); |
5a3f75e3 | 236 | setup_vector_irq(smp_processor_id()); |
c2d1cec1 | 237 | set_cpu_online(smp_processor_id(), true); |
d388e5fd | 238 | unlock_vector_lock(); |
2a442c9c | 239 | cpu_set_state_online(smp_processor_id()); |
78c06176 | 240 | x86_platform.nmi_init(); |
bbc2ff6a | 241 | |
0cefa5b9 MS |
242 | /* enable local interrupts */ |
243 | local_irq_enable(); | |
244 | ||
35f720c5 JP |
245 | /* to prevent fake stack check failure in clock setup */ |
246 | boot_init_stack_canary(); | |
0cefa5b9 | 247 | |
736decac | 248 | x86_cpuinit.setup_percpu_clockev(); |
bbc2ff6a GOC |
249 | |
250 | wmb(); | |
7d1a9417 | 251 | cpu_startup_entry(CPUHP_ONLINE); |
bbc2ff6a GOC |
252 | } |
253 | ||
30106c17 FY |
254 | void __init smp_store_boot_cpu_info(void) |
255 | { | |
256 | int id = 0; /* CPU 0 */ | |
257 | struct cpuinfo_x86 *c = &cpu_data(id); | |
258 | ||
259 | *c = boot_cpu_data; | |
260 | c->cpu_index = id; | |
261 | } | |
262 | ||
1d89a7f0 GOC |
263 | /* |
264 | * The bootstrap kernel entry code has set these up. Save them for | |
265 | * a given CPU | |
266 | */ | |
148f9bb8 | 267 | void smp_store_cpu_info(int id) |
1d89a7f0 GOC |
268 | { |
269 | struct cpuinfo_x86 *c = &cpu_data(id); | |
270 | ||
b3d7336d | 271 | *c = boot_cpu_data; |
1d89a7f0 | 272 | c->cpu_index = id; |
30106c17 FY |
273 | /* |
274 | * During boot time, CPU0 has this setup already. Save the info when | |
275 | * bringing up AP or offlined CPU0. | |
276 | */ | |
277 | identify_secondary_cpu(c); | |
1d89a7f0 GOC |
278 | } |
279 | ||
cebf15eb DH |
280 | static bool |
281 | topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |
282 | { | |
283 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; | |
284 | ||
285 | return (cpu_to_node(cpu1) == cpu_to_node(cpu2)); | |
286 | } | |
287 | ||
148f9bb8 | 288 | static bool |
316ad248 | 289 | topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) |
d4fbe4f0 | 290 | { |
316ad248 PZ |
291 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
292 | ||
cebf15eb | 293 | return !WARN_ONCE(!topology_same_node(c, o), |
316ad248 PZ |
294 | "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! " |
295 | "[node: %d != %d]. Ignoring dependency.\n", | |
296 | cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2)); | |
297 | } | |
298 | ||
7d79a7bd | 299 | #define link_mask(mfunc, c1, c2) \ |
316ad248 | 300 | do { \ |
7d79a7bd BG |
301 | cpumask_set_cpu((c1), mfunc(c2)); \ |
302 | cpumask_set_cpu((c2), mfunc(c1)); \ | |
316ad248 PZ |
303 | } while (0) |
304 | ||
148f9bb8 | 305 | static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
316ad248 | 306 | { |
193f3fcb | 307 | if (cpu_has_topoext) { |
316ad248 PZ |
308 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
309 | ||
310 | if (c->phys_proc_id == o->phys_proc_id && | |
311 | per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && | |
312 | c->compute_unit_id == o->compute_unit_id) | |
313 | return topology_sane(c, o, "smt"); | |
314 | ||
315 | } else if (c->phys_proc_id == o->phys_proc_id && | |
316 | c->cpu_core_id == o->cpu_core_id) { | |
317 | return topology_sane(c, o, "smt"); | |
318 | } | |
319 | ||
320 | return false; | |
321 | } | |
322 | ||
148f9bb8 | 323 | static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
316ad248 PZ |
324 | { |
325 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; | |
326 | ||
327 | if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID && | |
328 | per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) | |
329 | return topology_sane(c, o, "llc"); | |
330 | ||
331 | return false; | |
d4fbe4f0 AH |
332 | } |
333 | ||
cebf15eb DH |
334 | /* |
335 | * Unlike the other levels, we do not enforce keeping a | |
336 | * multicore group inside a NUMA node. If this happens, we will | |
337 | * discard the MC level of the topology later. | |
338 | */ | |
339 | static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |
316ad248 | 340 | { |
cebf15eb DH |
341 | if (c->phys_proc_id == o->phys_proc_id) |
342 | return true; | |
316ad248 PZ |
343 | return false; |
344 | } | |
1d89a7f0 | 345 | |
cebf15eb DH |
346 | static struct sched_domain_topology_level numa_inside_package_topology[] = { |
347 | #ifdef CONFIG_SCHED_SMT | |
348 | { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, | |
349 | #endif | |
350 | #ifdef CONFIG_SCHED_MC | |
351 | { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, | |
352 | #endif | |
353 | { NULL, }, | |
354 | }; | |
355 | /* | |
356 | * set_sched_topology() sets the topology internal to a CPU. The | |
357 | * NUMA topologies are layered on top of it to build the full | |
358 | * system topology. | |
359 | * | |
360 | * If NUMA nodes are observed to occur within a CPU package, this | |
361 | * function should be called. It forces the sched domain code to | |
362 | * only use the SMT level for the CPU portion of the topology. | |
363 | * This essentially falls back to relying on NUMA information | |
364 | * from the SRAT table to describe the entire system topology | |
365 | * (except for hyperthreads). | |
366 | */ | |
367 | static void primarily_use_numa_for_topology(void) | |
368 | { | |
369 | set_sched_topology(numa_inside_package_topology); | |
370 | } | |
371 | ||
148f9bb8 | 372 | void set_cpu_sibling_map(int cpu) |
768d9505 | 373 | { |
316ad248 | 374 | bool has_smt = smp_num_siblings > 1; |
b0bc225d | 375 | bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; |
768d9505 | 376 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
316ad248 PZ |
377 | struct cpuinfo_x86 *o; |
378 | int i; | |
768d9505 | 379 | |
c2d1cec1 | 380 | cpumask_set_cpu(cpu, cpu_sibling_setup_mask); |
768d9505 | 381 | |
b0bc225d | 382 | if (!has_mp) { |
7d79a7bd | 383 | cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu)); |
316ad248 | 384 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); |
7d79a7bd | 385 | cpumask_set_cpu(cpu, topology_core_cpumask(cpu)); |
768d9505 GC |
386 | c->booted_cores = 1; |
387 | return; | |
388 | } | |
389 | ||
c2d1cec1 | 390 | for_each_cpu(i, cpu_sibling_setup_mask) { |
316ad248 PZ |
391 | o = &cpu_data(i); |
392 | ||
393 | if ((i == cpu) || (has_smt && match_smt(c, o))) | |
7d79a7bd | 394 | link_mask(topology_sibling_cpumask, cpu, i); |
316ad248 | 395 | |
b0bc225d | 396 | if ((i == cpu) || (has_mp && match_llc(c, o))) |
7d79a7bd | 397 | link_mask(cpu_llc_shared_mask, cpu, i); |
316ad248 | 398 | |
ceb1cbac KB |
399 | } |
400 | ||
401 | /* | |
402 | * This needs a separate iteration over the cpus because we rely on all | |
7d79a7bd | 403 | * topology_sibling_cpumask links to be set-up. |
ceb1cbac KB |
404 | */ |
405 | for_each_cpu(i, cpu_sibling_setup_mask) { | |
406 | o = &cpu_data(i); | |
407 | ||
cebf15eb | 408 | if ((i == cpu) || (has_mp && match_die(c, o))) { |
7d79a7bd | 409 | link_mask(topology_core_cpumask, cpu, i); |
316ad248 | 410 | |
768d9505 GC |
411 | /* |
412 | * Does this new cpu bringup a new core? | |
413 | */ | |
7d79a7bd BG |
414 | if (cpumask_weight( |
415 | topology_sibling_cpumask(cpu)) == 1) { | |
768d9505 GC |
416 | /* |
417 | * for each core in package, increment | |
418 | * the booted_cores for this new cpu | |
419 | */ | |
7d79a7bd BG |
420 | if (cpumask_first( |
421 | topology_sibling_cpumask(i)) == i) | |
768d9505 GC |
422 | c->booted_cores++; |
423 | /* | |
424 | * increment the core count for all | |
425 | * the other cpus in this package | |
426 | */ | |
427 | if (i != cpu) | |
428 | cpu_data(i).booted_cores++; | |
429 | } else if (i != cpu && !c->booted_cores) | |
430 | c->booted_cores = cpu_data(i).booted_cores; | |
431 | } | |
728e5653 | 432 | if (match_die(c, o) && !topology_same_node(c, o)) |
cebf15eb | 433 | primarily_use_numa_for_topology(); |
768d9505 GC |
434 | } |
435 | } | |
436 | ||
70708a18 | 437 | /* maps the cpu to the sched domain representing multi-core */ |
030bb203 | 438 | const struct cpumask *cpu_coregroup_mask(int cpu) |
70708a18 | 439 | { |
9f646389 | 440 | return cpu_llc_shared_mask(cpu); |
030bb203 RR |
441 | } |
442 | ||
a4928cff | 443 | static void impress_friends(void) |
904541e2 GOC |
444 | { |
445 | int cpu; | |
446 | unsigned long bogosum = 0; | |
447 | /* | |
448 | * Allow the user to impress friends. | |
449 | */ | |
c767a54b | 450 | pr_debug("Before bogomips\n"); |
904541e2 | 451 | for_each_possible_cpu(cpu) |
c2d1cec1 | 452 | if (cpumask_test_cpu(cpu, cpu_callout_mask)) |
904541e2 | 453 | bogosum += cpu_data(cpu).loops_per_jiffy; |
c767a54b | 454 | pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n", |
f68e00a3 | 455 | num_online_cpus(), |
904541e2 GOC |
456 | bogosum/(500000/HZ), |
457 | (bogosum/(5000/HZ))%100); | |
458 | ||
c767a54b | 459 | pr_debug("Before bogocount - setting activated=1\n"); |
904541e2 GOC |
460 | } |
461 | ||
569712b2 | 462 | void __inquire_remote_apic(int apicid) |
cb3c8b90 GOC |
463 | { |
464 | unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; | |
a6c23905 | 465 | const char * const names[] = { "ID", "VERSION", "SPIV" }; |
cb3c8b90 GOC |
466 | int timeout; |
467 | u32 status; | |
468 | ||
c767a54b | 469 | pr_info("Inquiring remote APIC 0x%x...\n", apicid); |
cb3c8b90 GOC |
470 | |
471 | for (i = 0; i < ARRAY_SIZE(regs); i++) { | |
c767a54b | 472 | pr_info("... APIC 0x%x %s: ", apicid, names[i]); |
cb3c8b90 GOC |
473 | |
474 | /* | |
475 | * Wait for idle. | |
476 | */ | |
477 | status = safe_apic_wait_icr_idle(); | |
478 | if (status) | |
c767a54b | 479 | pr_cont("a previous APIC delivery may have failed\n"); |
cb3c8b90 | 480 | |
1b374e4d | 481 | apic_icr_write(APIC_DM_REMRD | regs[i], apicid); |
cb3c8b90 GOC |
482 | |
483 | timeout = 0; | |
484 | do { | |
485 | udelay(100); | |
486 | status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; | |
487 | } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); | |
488 | ||
489 | switch (status) { | |
490 | case APIC_ICR_RR_VALID: | |
491 | status = apic_read(APIC_RRR); | |
c767a54b | 492 | pr_cont("%08x\n", status); |
cb3c8b90 GOC |
493 | break; |
494 | default: | |
c767a54b | 495 | pr_cont("failed\n"); |
cb3c8b90 GOC |
496 | } |
497 | } | |
498 | } | |
499 | ||
d68921f9 LB |
500 | /* |
501 | * The Multiprocessor Specification 1.4 (1997) example code suggests | |
502 | * that there should be a 10ms delay between the BSP asserting INIT | |
503 | * and de-asserting INIT, when starting a remote processor. | |
504 | * But that slows boot and resume on modern processors, which include | |
505 | * many cores and don't require that delay. | |
506 | * | |
507 | * Cmdline "init_cpu_udelay=" is available to over-ride this delay. | |
1a744cb3 | 508 | * Modern processor families are quirked to remove the delay entirely. |
d68921f9 LB |
509 | */ |
510 | #define UDELAY_10MS_DEFAULT 10000 | |
511 | ||
f1ccd249 | 512 | static unsigned int init_udelay = INT_MAX; |
d68921f9 LB |
513 | |
514 | static int __init cpu_init_udelay(char *str) | |
515 | { | |
516 | get_option(&str, &init_udelay); | |
517 | ||
518 | return 0; | |
519 | } | |
520 | early_param("cpu_init_udelay", cpu_init_udelay); | |
521 | ||
1a744cb3 LB |
522 | static void __init smp_quirk_init_udelay(void) |
523 | { | |
524 | /* if cmdline changed it from default, leave it alone */ | |
f1ccd249 | 525 | if (init_udelay != INT_MAX) |
1a744cb3 LB |
526 | return; |
527 | ||
528 | /* if modern processor, use no delay */ | |
529 | if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || | |
530 | ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) | |
531 | init_udelay = 0; | |
f1ccd249 LB |
532 | |
533 | /* else, use legacy delay */ | |
534 | init_udelay = UDELAY_10MS_DEFAULT; | |
1a744cb3 LB |
535 | } |
536 | ||
cb3c8b90 GOC |
537 | /* |
538 | * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal | |
539 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this | |
540 | * won't ... remember to clear down the APIC, etc later. | |
541 | */ | |
148f9bb8 | 542 | int |
e1c467e6 | 543 | wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip) |
cb3c8b90 GOC |
544 | { |
545 | unsigned long send_status, accept_status = 0; | |
546 | int maxlvt; | |
547 | ||
548 | /* Target chip */ | |
cb3c8b90 GOC |
549 | /* Boot on the stack */ |
550 | /* Kick the second */ | |
e1c467e6 | 551 | apic_icr_write(APIC_DM_NMI | apic->dest_logical, apicid); |
cb3c8b90 | 552 | |
cfc1b9a6 | 553 | pr_debug("Waiting for send to finish...\n"); |
cb3c8b90 GOC |
554 | send_status = safe_apic_wait_icr_idle(); |
555 | ||
556 | /* | |
557 | * Give the other CPU some time to accept the IPI. | |
558 | */ | |
559 | udelay(200); | |
569712b2 | 560 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { |
59ef48a5 CG |
561 | maxlvt = lapic_get_maxlvt(); |
562 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ | |
563 | apic_write(APIC_ESR, 0); | |
564 | accept_status = (apic_read(APIC_ESR) & 0xEF); | |
565 | } | |
c767a54b | 566 | pr_debug("NMI sent\n"); |
cb3c8b90 GOC |
567 | |
568 | if (send_status) | |
c767a54b | 569 | pr_err("APIC never delivered???\n"); |
cb3c8b90 | 570 | if (accept_status) |
c767a54b | 571 | pr_err("APIC delivery error (%lx)\n", accept_status); |
cb3c8b90 GOC |
572 | |
573 | return (send_status | accept_status); | |
574 | } | |
cb3c8b90 | 575 | |
148f9bb8 | 576 | static int |
569712b2 | 577 | wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) |
cb3c8b90 | 578 | { |
f5d6a52f | 579 | unsigned long send_status = 0, accept_status = 0; |
cb3c8b90 GOC |
580 | int maxlvt, num_starts, j; |
581 | ||
593f4a78 MR |
582 | maxlvt = lapic_get_maxlvt(); |
583 | ||
cb3c8b90 GOC |
584 | /* |
585 | * Be paranoid about clearing APIC errors. | |
586 | */ | |
587 | if (APIC_INTEGRATED(apic_version[phys_apicid])) { | |
593f4a78 MR |
588 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
589 | apic_write(APIC_ESR, 0); | |
cb3c8b90 GOC |
590 | apic_read(APIC_ESR); |
591 | } | |
592 | ||
c767a54b | 593 | pr_debug("Asserting INIT\n"); |
cb3c8b90 GOC |
594 | |
595 | /* | |
596 | * Turn INIT on target chip | |
597 | */ | |
cb3c8b90 GOC |
598 | /* |
599 | * Send IPI | |
600 | */ | |
1b374e4d SS |
601 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT, |
602 | phys_apicid); | |
cb3c8b90 | 603 | |
cfc1b9a6 | 604 | pr_debug("Waiting for send to finish...\n"); |
cb3c8b90 GOC |
605 | send_status = safe_apic_wait_icr_idle(); |
606 | ||
7cb68598 | 607 | udelay(init_udelay); |
cb3c8b90 | 608 | |
c767a54b | 609 | pr_debug("Deasserting INIT\n"); |
cb3c8b90 GOC |
610 | |
611 | /* Target chip */ | |
cb3c8b90 | 612 | /* Send IPI */ |
1b374e4d | 613 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid); |
cb3c8b90 | 614 | |
cfc1b9a6 | 615 | pr_debug("Waiting for send to finish...\n"); |
cb3c8b90 GOC |
616 | send_status = safe_apic_wait_icr_idle(); |
617 | ||
618 | mb(); | |
cb3c8b90 GOC |
619 | |
620 | /* | |
621 | * Should we send STARTUP IPIs ? | |
622 | * | |
623 | * Determine this based on the APIC version. | |
624 | * If we don't have an integrated APIC, don't send the STARTUP IPIs. | |
625 | */ | |
626 | if (APIC_INTEGRATED(apic_version[phys_apicid])) | |
627 | num_starts = 2; | |
628 | else | |
629 | num_starts = 0; | |
630 | ||
631 | /* | |
632 | * Paravirt / VMI wants a startup IPI hook here to set up the | |
633 | * target processor state. | |
634 | */ | |
635 | startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, | |
11d4c3f9 | 636 | stack_start); |
cb3c8b90 GOC |
637 | |
638 | /* | |
639 | * Run STARTUP IPI loop. | |
640 | */ | |
c767a54b | 641 | pr_debug("#startup loops: %d\n", num_starts); |
cb3c8b90 | 642 | |
cb3c8b90 | 643 | for (j = 1; j <= num_starts; j++) { |
c767a54b | 644 | pr_debug("Sending STARTUP #%d\n", j); |
593f4a78 MR |
645 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
646 | apic_write(APIC_ESR, 0); | |
cb3c8b90 | 647 | apic_read(APIC_ESR); |
c767a54b | 648 | pr_debug("After apic_write\n"); |
cb3c8b90 GOC |
649 | |
650 | /* | |
651 | * STARTUP IPI | |
652 | */ | |
653 | ||
654 | /* Target chip */ | |
cb3c8b90 GOC |
655 | /* Boot on the stack */ |
656 | /* Kick the second */ | |
1b374e4d SS |
657 | apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12), |
658 | phys_apicid); | |
cb3c8b90 GOC |
659 | |
660 | /* | |
661 | * Give the other CPU some time to accept the IPI. | |
662 | */ | |
a9bcaa02 LB |
663 | if (init_udelay) |
664 | udelay(300); | |
cb3c8b90 | 665 | |
c767a54b | 666 | pr_debug("Startup point 1\n"); |
cb3c8b90 | 667 | |
cfc1b9a6 | 668 | pr_debug("Waiting for send to finish...\n"); |
cb3c8b90 GOC |
669 | send_status = safe_apic_wait_icr_idle(); |
670 | ||
671 | /* | |
672 | * Give the other CPU some time to accept the IPI. | |
673 | */ | |
a9bcaa02 LB |
674 | if (init_udelay) |
675 | udelay(200); | |
cb3c8b90 | 676 | |
593f4a78 | 677 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
cb3c8b90 | 678 | apic_write(APIC_ESR, 0); |
cb3c8b90 GOC |
679 | accept_status = (apic_read(APIC_ESR) & 0xEF); |
680 | if (send_status || accept_status) | |
681 | break; | |
682 | } | |
c767a54b | 683 | pr_debug("After Startup\n"); |
cb3c8b90 GOC |
684 | |
685 | if (send_status) | |
c767a54b | 686 | pr_err("APIC never delivered???\n"); |
cb3c8b90 | 687 | if (accept_status) |
c767a54b | 688 | pr_err("APIC delivery error (%lx)\n", accept_status); |
cb3c8b90 GOC |
689 | |
690 | return (send_status | accept_status); | |
691 | } | |
cb3c8b90 | 692 | |
a17bce4d BP |
693 | void smp_announce(void) |
694 | { | |
695 | int num_nodes = num_online_nodes(); | |
696 | ||
697 | printk(KERN_INFO "x86: Booted up %d node%s, %d CPUs\n", | |
698 | num_nodes, (num_nodes > 1 ? "s" : ""), num_online_cpus()); | |
699 | } | |
700 | ||
2eaad1fd | 701 | /* reduce the number of lines printed when booting a large cpu count system */ |
148f9bb8 | 702 | static void announce_cpu(int cpu, int apicid) |
2eaad1fd MT |
703 | { |
704 | static int current_node = -1; | |
4adc8b71 | 705 | int node = early_cpu_to_node(cpu); |
a17bce4d | 706 | static int width, node_width; |
646e29a1 BP |
707 | |
708 | if (!width) | |
709 | width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */ | |
2eaad1fd | 710 | |
a17bce4d BP |
711 | if (!node_width) |
712 | node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */ | |
713 | ||
714 | if (cpu == 1) | |
715 | printk(KERN_INFO "x86: Booting SMP configuration:\n"); | |
716 | ||
2eaad1fd MT |
717 | if (system_state == SYSTEM_BOOTING) { |
718 | if (node != current_node) { | |
719 | if (current_node > (-1)) | |
a17bce4d | 720 | pr_cont("\n"); |
2eaad1fd | 721 | current_node = node; |
a17bce4d BP |
722 | |
723 | printk(KERN_INFO ".... node %*s#%d, CPUs: ", | |
724 | node_width - num_digits(node), " ", node); | |
2eaad1fd | 725 | } |
646e29a1 BP |
726 | |
727 | /* Add padding for the BSP */ | |
728 | if (cpu == 1) | |
729 | pr_cont("%*s", width + 1, " "); | |
730 | ||
731 | pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu); | |
732 | ||
2eaad1fd MT |
733 | } else |
734 | pr_info("Booting Node %d Processor %d APIC 0x%x\n", | |
735 | node, cpu, apicid); | |
736 | } | |
737 | ||
e1c467e6 FY |
738 | static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs) |
739 | { | |
740 | int cpu; | |
741 | ||
742 | cpu = smp_processor_id(); | |
743 | if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0) | |
744 | return NMI_HANDLED; | |
745 | ||
746 | return NMI_DONE; | |
747 | } | |
748 | ||
749 | /* | |
750 | * Wake up AP by INIT, INIT, STARTUP sequence. | |
751 | * | |
752 | * Instead of waiting for STARTUP after INITs, BSP will execute the BIOS | |
753 | * boot-strap code which is not a desired behavior for waking up BSP. To | |
754 | * void the boot-strap code, wake up CPU0 by NMI instead. | |
755 | * | |
756 | * This works to wake up soft offlined CPU0 only. If CPU0 is hard offlined | |
757 | * (i.e. physically hot removed and then hot added), NMI won't wake it up. | |
758 | * We'll change this code in the future to wake up hard offlined CPU0 if | |
759 | * real platform and request are available. | |
760 | */ | |
148f9bb8 | 761 | static int |
e1c467e6 FY |
762 | wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid, |
763 | int *cpu0_nmi_registered) | |
764 | { | |
765 | int id; | |
766 | int boot_error; | |
767 | ||
ea7bdc65 JK |
768 | preempt_disable(); |
769 | ||
e1c467e6 FY |
770 | /* |
771 | * Wake up AP by INIT, INIT, STARTUP sequence. | |
772 | */ | |
ea7bdc65 JK |
773 | if (cpu) { |
774 | boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip); | |
775 | goto out; | |
776 | } | |
e1c467e6 FY |
777 | |
778 | /* | |
779 | * Wake up BSP by nmi. | |
780 | * | |
781 | * Register a NMI handler to help wake up CPU0. | |
782 | */ | |
783 | boot_error = register_nmi_handler(NMI_LOCAL, | |
784 | wakeup_cpu0_nmi, 0, "wake_cpu0"); | |
785 | ||
786 | if (!boot_error) { | |
787 | enable_start_cpu0 = 1; | |
788 | *cpu0_nmi_registered = 1; | |
789 | if (apic->dest_logical == APIC_DEST_LOGICAL) | |
790 | id = cpu0_logical_apicid; | |
791 | else | |
792 | id = apicid; | |
793 | boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip); | |
794 | } | |
ea7bdc65 JK |
795 | |
796 | out: | |
797 | preempt_enable(); | |
e1c467e6 FY |
798 | |
799 | return boot_error; | |
800 | } | |
801 | ||
3f85483b BO |
802 | void common_cpu_up(unsigned int cpu, struct task_struct *idle) |
803 | { | |
804 | /* Just in case we booted with a single CPU. */ | |
805 | alternatives_enable_smp(); | |
806 | ||
807 | per_cpu(current_task, cpu) = idle; | |
808 | ||
809 | #ifdef CONFIG_X86_32 | |
810 | /* Stack for startup_32 can be just as for start_secondary onwards */ | |
811 | irq_ctx_init(cpu); | |
812 | per_cpu(cpu_current_top_of_stack, cpu) = | |
813 | (unsigned long)task_stack_page(idle) + THREAD_SIZE; | |
814 | #else | |
815 | clear_tsk_thread_flag(idle, TIF_FORK); | |
816 | initial_gs = per_cpu_offset(cpu); | |
817 | #endif | |
3f85483b BO |
818 | } |
819 | ||
cb3c8b90 GOC |
820 | /* |
821 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad | |
822 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. | |
1f5bcabf IM |
823 | * Returns zero if CPU booted OK, else error code from |
824 | * ->wakeup_secondary_cpu. | |
cb3c8b90 | 825 | */ |
148f9bb8 | 826 | static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) |
cb3c8b90 | 827 | { |
48927bbb | 828 | volatile u32 *trampoline_status = |
b429dbf6 | 829 | (volatile u32 *) __va(real_mode_header->trampoline_status); |
48927bbb | 830 | /* start_ip had better be page-aligned! */ |
f37240f1 | 831 | unsigned long start_ip = real_mode_header->trampoline_start; |
48927bbb | 832 | |
cb3c8b90 | 833 | unsigned long boot_error = 0; |
e1c467e6 | 834 | int cpu0_nmi_registered = 0; |
ce4b1b16 | 835 | unsigned long timeout; |
cb3c8b90 | 836 | |
7eb43a6d TG |
837 | idle->thread.sp = (unsigned long) (((struct pt_regs *) |
838 | (THREAD_SIZE + task_stack_page(idle))) - 1); | |
cb3c8b90 | 839 | |
a939098a | 840 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
3e970473 | 841 | initial_code = (unsigned long)start_secondary; |
7eb43a6d | 842 | stack_start = idle->thread.sp; |
cb3c8b90 | 843 | |
20d5e4a9 ZG |
844 | /* |
845 | * Enable the espfix hack for this CPU | |
846 | */ | |
847 | #ifdef CONFIG_X86_ESPFIX64 | |
848 | init_espfix_ap(cpu); | |
849 | #endif | |
850 | ||
2eaad1fd MT |
851 | /* So we see what's up */ |
852 | announce_cpu(cpu, apicid); | |
cb3c8b90 GOC |
853 | |
854 | /* | |
855 | * This grunge runs the startup process for | |
856 | * the targeted processor. | |
857 | */ | |
858 | ||
34d05591 | 859 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { |
cb3c8b90 | 860 | |
cfc1b9a6 | 861 | pr_debug("Setting warm reset code and vector.\n"); |
cb3c8b90 | 862 | |
34d05591 JS |
863 | smpboot_setup_warm_reset_vector(start_ip); |
864 | /* | |
865 | * Be paranoid about clearing APIC errors. | |
db96b0a0 CG |
866 | */ |
867 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { | |
868 | apic_write(APIC_ESR, 0); | |
869 | apic_read(APIC_ESR); | |
870 | } | |
34d05591 | 871 | } |
cb3c8b90 | 872 | |
ce4b1b16 IM |
873 | /* |
874 | * AP might wait on cpu_callout_mask in cpu_init() with | |
875 | * cpu_initialized_mask set if previous attempt to online | |
876 | * it timed-out. Clear cpu_initialized_mask so that after | |
877 | * INIT/SIPI it could start with a clean state. | |
878 | */ | |
879 | cpumask_clear_cpu(cpu, cpu_initialized_mask); | |
880 | smp_mb(); | |
881 | ||
cb3c8b90 | 882 | /* |
e1c467e6 FY |
883 | * Wake up a CPU in difference cases: |
884 | * - Use the method in the APIC driver if it's defined | |
885 | * Otherwise, | |
886 | * - Use an INIT boot APIC message for APs or NMI for BSP. | |
cb3c8b90 | 887 | */ |
1f5bcabf IM |
888 | if (apic->wakeup_secondary_cpu) |
889 | boot_error = apic->wakeup_secondary_cpu(apicid, start_ip); | |
890 | else | |
e1c467e6 FY |
891 | boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid, |
892 | &cpu0_nmi_registered); | |
cb3c8b90 GOC |
893 | |
894 | if (!boot_error) { | |
895 | /* | |
6e38f1e7 | 896 | * Wait 10s total for first sign of life from AP |
cb3c8b90 | 897 | */ |
ce4b1b16 IM |
898 | boot_error = -1; |
899 | timeout = jiffies + 10*HZ; | |
900 | while (time_before(jiffies, timeout)) { | |
901 | if (cpumask_test_cpu(cpu, cpu_initialized_mask)) { | |
902 | /* | |
903 | * Tell AP to proceed with initialization | |
904 | */ | |
905 | cpumask_set_cpu(cpu, cpu_callout_mask); | |
906 | boot_error = 0; | |
907 | break; | |
908 | } | |
ce4b1b16 IM |
909 | schedule(); |
910 | } | |
911 | } | |
cb3c8b90 | 912 | |
ce4b1b16 | 913 | if (!boot_error) { |
cb3c8b90 | 914 | /* |
ce4b1b16 | 915 | * Wait till AP completes initial initialization |
cb3c8b90 | 916 | */ |
ce4b1b16 | 917 | while (!cpumask_test_cpu(cpu, cpu_callin_mask)) { |
68f202e4 SS |
918 | /* |
919 | * Allow other tasks to run while we wait for the | |
920 | * AP to come online. This also gives a chance | |
921 | * for the MTRR work(triggered by the AP coming online) | |
922 | * to be completed in the stop machine context. | |
923 | */ | |
924 | schedule(); | |
cb3c8b90 | 925 | } |
cb3c8b90 GOC |
926 | } |
927 | ||
928 | /* mark "stuck" area as not stuck */ | |
48927bbb | 929 | *trampoline_status = 0; |
cb3c8b90 | 930 | |
02421f98 YL |
931 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { |
932 | /* | |
933 | * Cleanup possible dangling ends... | |
934 | */ | |
935 | smpboot_restore_warm_reset_vector(); | |
936 | } | |
e1c467e6 FY |
937 | /* |
938 | * Clean up the nmi handler. Do this after the callin and callout sync | |
939 | * to avoid impact of possible long unregister time. | |
940 | */ | |
941 | if (cpu0_nmi_registered) | |
942 | unregister_nmi_handler(NMI_LOCAL, "wake_cpu0"); | |
943 | ||
cb3c8b90 GOC |
944 | return boot_error; |
945 | } | |
946 | ||
148f9bb8 | 947 | int native_cpu_up(unsigned int cpu, struct task_struct *tidle) |
cb3c8b90 | 948 | { |
a21769a4 | 949 | int apicid = apic->cpu_present_to_apicid(cpu); |
cb3c8b90 GOC |
950 | unsigned long flags; |
951 | int err; | |
952 | ||
953 | WARN_ON(irqs_disabled()); | |
954 | ||
cfc1b9a6 | 955 | pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu); |
cb3c8b90 | 956 | |
30106c17 | 957 | if (apicid == BAD_APICID || |
c284b42a | 958 | !physid_isset(apicid, phys_cpu_present_map) || |
fa63030e | 959 | !apic->apic_id_valid(apicid)) { |
c767a54b | 960 | pr_err("%s: bad cpu %d\n", __func__, cpu); |
cb3c8b90 GOC |
961 | return -EINVAL; |
962 | } | |
963 | ||
964 | /* | |
965 | * Already booted CPU? | |
966 | */ | |
c2d1cec1 | 967 | if (cpumask_test_cpu(cpu, cpu_callin_mask)) { |
cfc1b9a6 | 968 | pr_debug("do_boot_cpu %d Already started\n", cpu); |
cb3c8b90 GOC |
969 | return -ENOSYS; |
970 | } | |
971 | ||
972 | /* | |
973 | * Save current MTRR state in case it was changed since early boot | |
974 | * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: | |
975 | */ | |
976 | mtrr_save_state(); | |
977 | ||
2a442c9c PM |
978 | /* x86 CPUs take themselves offline, so delayed offline is OK. */ |
979 | err = cpu_check_up_prepare(cpu); | |
980 | if (err && err != -EBUSY) | |
981 | return err; | |
cb3c8b90 | 982 | |
644c1541 VP |
983 | /* the FPU context is blank, nobody can own it */ |
984 | __cpu_disable_lazy_restore(cpu); | |
985 | ||
3f85483b BO |
986 | common_cpu_up(cpu, tidle); |
987 | ||
ce0d3c0a TG |
988 | /* |
989 | * We have to walk the irq descriptors to setup the vector | |
990 | * space for the cpu which comes online. Prevent irq | |
991 | * alloc/free across the bringup. | |
992 | */ | |
993 | irq_lock_sparse(); | |
994 | ||
7eb43a6d | 995 | err = do_boot_cpu(apicid, cpu, tidle); |
ce0d3c0a | 996 | |
61165d7a | 997 | if (err) { |
ce0d3c0a | 998 | irq_unlock_sparse(); |
feef1e8e | 999 | pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu); |
61165d7a | 1000 | return -EIO; |
cb3c8b90 GOC |
1001 | } |
1002 | ||
1003 | /* | |
1004 | * Check TSC synchronization with the AP (keep irqs disabled | |
1005 | * while doing so): | |
1006 | */ | |
1007 | local_irq_save(flags); | |
1008 | check_tsc_sync_source(cpu); | |
1009 | local_irq_restore(flags); | |
1010 | ||
7c04e64a | 1011 | while (!cpu_online(cpu)) { |
cb3c8b90 GOC |
1012 | cpu_relax(); |
1013 | touch_nmi_watchdog(); | |
1014 | } | |
1015 | ||
ce0d3c0a TG |
1016 | irq_unlock_sparse(); |
1017 | ||
cb3c8b90 GOC |
1018 | return 0; |
1019 | } | |
1020 | ||
7167d08e HK |
1021 | /** |
1022 | * arch_disable_smp_support() - disables SMP support for x86 at runtime | |
1023 | */ | |
1024 | void arch_disable_smp_support(void) | |
1025 | { | |
1026 | disable_ioapic_support(); | |
1027 | } | |
1028 | ||
8aef135c GOC |
1029 | /* |
1030 | * Fall back to non SMP mode after errors. | |
1031 | * | |
1032 | * RED-PEN audit/test this more. I bet there is more state messed up here. | |
1033 | */ | |
1034 | static __init void disable_smp(void) | |
1035 | { | |
613c25ef TG |
1036 | pr_info("SMP disabled\n"); |
1037 | ||
ef4c59a4 TG |
1038 | disable_ioapic_support(); |
1039 | ||
4f062896 RR |
1040 | init_cpu_present(cpumask_of(0)); |
1041 | init_cpu_possible(cpumask_of(0)); | |
0f385d1d | 1042 | |
8aef135c | 1043 | if (smp_found_config) |
b6df1b8b | 1044 | physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); |
8aef135c | 1045 | else |
b6df1b8b | 1046 | physid_set_mask_of_physid(0, &phys_cpu_present_map); |
7d79a7bd BG |
1047 | cpumask_set_cpu(0, topology_sibling_cpumask(0)); |
1048 | cpumask_set_cpu(0, topology_core_cpumask(0)); | |
8aef135c GOC |
1049 | } |
1050 | ||
613c25ef TG |
1051 | enum { |
1052 | SMP_OK, | |
1053 | SMP_NO_CONFIG, | |
1054 | SMP_NO_APIC, | |
1055 | SMP_FORCE_UP, | |
1056 | }; | |
1057 | ||
8aef135c GOC |
1058 | /* |
1059 | * Various sanity checks. | |
1060 | */ | |
1061 | static int __init smp_sanity_check(unsigned max_cpus) | |
1062 | { | |
ac23d4ee | 1063 | preempt_disable(); |
a58f03b0 | 1064 | |
1ff2f20d | 1065 | #if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32) |
a58f03b0 YL |
1066 | if (def_to_bigsmp && nr_cpu_ids > 8) { |
1067 | unsigned int cpu; | |
1068 | unsigned nr; | |
1069 | ||
c767a54b JP |
1070 | pr_warn("More than 8 CPUs detected - skipping them\n" |
1071 | "Use CONFIG_X86_BIGSMP\n"); | |
a58f03b0 YL |
1072 | |
1073 | nr = 0; | |
1074 | for_each_present_cpu(cpu) { | |
1075 | if (nr >= 8) | |
c2d1cec1 | 1076 | set_cpu_present(cpu, false); |
a58f03b0 YL |
1077 | nr++; |
1078 | } | |
1079 | ||
1080 | nr = 0; | |
1081 | for_each_possible_cpu(cpu) { | |
1082 | if (nr >= 8) | |
c2d1cec1 | 1083 | set_cpu_possible(cpu, false); |
a58f03b0 YL |
1084 | nr++; |
1085 | } | |
1086 | ||
1087 | nr_cpu_ids = 8; | |
1088 | } | |
1089 | #endif | |
1090 | ||
8aef135c | 1091 | if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { |
c767a54b | 1092 | pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n", |
55c395b4 MT |
1093 | hard_smp_processor_id()); |
1094 | ||
8aef135c GOC |
1095 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); |
1096 | } | |
1097 | ||
1098 | /* | |
1099 | * If we couldn't find an SMP configuration at boot time, | |
1100 | * get out of here now! | |
1101 | */ | |
1102 | if (!smp_found_config && !acpi_lapic) { | |
ac23d4ee | 1103 | preempt_enable(); |
c767a54b | 1104 | pr_notice("SMP motherboard not detected\n"); |
613c25ef | 1105 | return SMP_NO_CONFIG; |
8aef135c GOC |
1106 | } |
1107 | ||
1108 | /* | |
1109 | * Should not be necessary because the MP table should list the boot | |
1110 | * CPU too, but we do it for the sake of robustness anyway. | |
1111 | */ | |
a27a6210 | 1112 | if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) { |
c767a54b JP |
1113 | pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n", |
1114 | boot_cpu_physical_apicid); | |
8aef135c GOC |
1115 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); |
1116 | } | |
ac23d4ee | 1117 | preempt_enable(); |
8aef135c GOC |
1118 | |
1119 | /* | |
1120 | * If we couldn't find a local APIC, then get out of here now! | |
1121 | */ | |
1122 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && | |
1123 | !cpu_has_apic) { | |
103428e5 CG |
1124 | if (!disable_apic) { |
1125 | pr_err("BIOS bug, local APIC #%d not detected!...\n", | |
1126 | boot_cpu_physical_apicid); | |
c767a54b | 1127 | pr_err("... forcing use of dummy APIC emulation (tell your hw vendor)\n"); |
103428e5 | 1128 | } |
613c25ef | 1129 | return SMP_NO_APIC; |
8aef135c GOC |
1130 | } |
1131 | ||
8aef135c GOC |
1132 | /* |
1133 | * If SMP should be disabled, then really disable it! | |
1134 | */ | |
1135 | if (!max_cpus) { | |
c767a54b | 1136 | pr_info("SMP mode deactivated\n"); |
613c25ef | 1137 | return SMP_FORCE_UP; |
8aef135c GOC |
1138 | } |
1139 | ||
613c25ef | 1140 | return SMP_OK; |
8aef135c GOC |
1141 | } |
1142 | ||
1143 | static void __init smp_cpu_index_default(void) | |
1144 | { | |
1145 | int i; | |
1146 | struct cpuinfo_x86 *c; | |
1147 | ||
7c04e64a | 1148 | for_each_possible_cpu(i) { |
8aef135c GOC |
1149 | c = &cpu_data(i); |
1150 | /* mark all to hotplug */ | |
9628937d | 1151 | c->cpu_index = nr_cpu_ids; |
8aef135c GOC |
1152 | } |
1153 | } | |
1154 | ||
1155 | /* | |
1156 | * Prepare for SMP bootup. The MP table or ACPI has been read | |
1157 | * earlier. Just do some sanity checking here and enable APIC mode. | |
1158 | */ | |
1159 | void __init native_smp_prepare_cpus(unsigned int max_cpus) | |
1160 | { | |
7ad728f9 RR |
1161 | unsigned int i; |
1162 | ||
8aef135c | 1163 | smp_cpu_index_default(); |
792363d2 | 1164 | |
8aef135c GOC |
1165 | /* |
1166 | * Setup boot CPU information | |
1167 | */ | |
30106c17 | 1168 | smp_store_boot_cpu_info(); /* Final full version of the data */ |
792363d2 YL |
1169 | cpumask_copy(cpu_callin_mask, cpumask_of(0)); |
1170 | mb(); | |
bd22a2f1 | 1171 | |
8aef135c | 1172 | current_thread_info()->cpu = 0; /* needed? */ |
7ad728f9 | 1173 | for_each_possible_cpu(i) { |
79f55997 LZ |
1174 | zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); |
1175 | zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); | |
b3d7336d | 1176 | zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); |
7ad728f9 | 1177 | } |
8aef135c GOC |
1178 | set_cpu_sibling_map(0); |
1179 | ||
613c25ef TG |
1180 | switch (smp_sanity_check(max_cpus)) { |
1181 | case SMP_NO_CONFIG: | |
8aef135c | 1182 | disable_smp(); |
613c25ef TG |
1183 | if (APIC_init_uniprocessor()) |
1184 | pr_notice("Local APIC not detected. Using dummy APIC emulation.\n"); | |
1185 | return; | |
1186 | case SMP_NO_APIC: | |
1187 | disable_smp(); | |
1188 | return; | |
1189 | case SMP_FORCE_UP: | |
1190 | disable_smp(); | |
374aab33 | 1191 | apic_bsp_setup(false); |
250a1ac6 | 1192 | return; |
613c25ef TG |
1193 | case SMP_OK: |
1194 | break; | |
8aef135c GOC |
1195 | } |
1196 | ||
fa47f7e5 SS |
1197 | default_setup_apic_routing(); |
1198 | ||
4c9961d5 | 1199 | if (read_apic_id() != boot_cpu_physical_apicid) { |
8aef135c | 1200 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", |
4c9961d5 | 1201 | read_apic_id(), boot_cpu_physical_apicid); |
8aef135c GOC |
1202 | /* Or can we switch back to PIC here? */ |
1203 | } | |
1204 | ||
374aab33 | 1205 | cpu0_logical_apicid = apic_bsp_setup(false); |
ef4c59a4 | 1206 | |
c767a54b | 1207 | pr_info("CPU%d: ", 0); |
8aef135c | 1208 | print_cpu_info(&cpu_data(0)); |
c4bd1fda MS |
1209 | |
1210 | if (is_uv_system()) | |
1211 | uv_system_init(); | |
d0af9eed SS |
1212 | |
1213 | set_mtrr_aps_delayed_init(); | |
1a744cb3 LB |
1214 | |
1215 | smp_quirk_init_udelay(); | |
8aef135c | 1216 | } |
d0af9eed SS |
1217 | |
1218 | void arch_enable_nonboot_cpus_begin(void) | |
1219 | { | |
1220 | set_mtrr_aps_delayed_init(); | |
1221 | } | |
1222 | ||
1223 | void arch_enable_nonboot_cpus_end(void) | |
1224 | { | |
1225 | mtrr_aps_init(); | |
1226 | } | |
1227 | ||
a8db8453 GOC |
1228 | /* |
1229 | * Early setup to make printk work. | |
1230 | */ | |
1231 | void __init native_smp_prepare_boot_cpu(void) | |
1232 | { | |
1233 | int me = smp_processor_id(); | |
552be871 | 1234 | switch_to_new_gdt(me); |
c2d1cec1 MT |
1235 | /* already set me in cpu_online_mask in boot_cpu_init() */ |
1236 | cpumask_set_cpu(me, cpu_callout_mask); | |
2a442c9c | 1237 | cpu_set_state_online(me); |
a8db8453 GOC |
1238 | } |
1239 | ||
83f7eb9c GOC |
1240 | void __init native_smp_cpus_done(unsigned int max_cpus) |
1241 | { | |
c767a54b | 1242 | pr_debug("Boot done\n"); |
83f7eb9c | 1243 | |
99e8b9ca | 1244 | nmi_selftest(); |
83f7eb9c | 1245 | impress_friends(); |
83f7eb9c | 1246 | setup_ioapic_dest(); |
d0af9eed | 1247 | mtrr_aps_init(); |
83f7eb9c GOC |
1248 | } |
1249 | ||
3b11ce7f MT |
1250 | static int __initdata setup_possible_cpus = -1; |
1251 | static int __init _setup_possible_cpus(char *str) | |
1252 | { | |
1253 | get_option(&str, &setup_possible_cpus); | |
1254 | return 0; | |
1255 | } | |
1256 | early_param("possible_cpus", _setup_possible_cpus); | |
1257 | ||
1258 | ||
68a1c3f8 | 1259 | /* |
4f062896 | 1260 | * cpu_possible_mask should be static, it cannot change as cpu's |
68a1c3f8 GC |
1261 | * are onlined, or offlined. The reason is per-cpu data-structures |
1262 | * are allocated by some modules at init time, and dont expect to | |
1263 | * do this dynamically on cpu arrival/departure. | |
4f062896 | 1264 | * cpu_present_mask on the other hand can change dynamically. |
68a1c3f8 GC |
1265 | * In case when cpu_hotplug is not compiled, then we resort to current |
1266 | * behaviour, which is cpu_possible == cpu_present. | |
1267 | * - Ashok Raj | |
1268 | * | |
1269 | * Three ways to find out the number of additional hotplug CPUs: | |
1270 | * - If the BIOS specified disabled CPUs in ACPI/mptables use that. | |
3b11ce7f | 1271 | * - The user can overwrite it with possible_cpus=NUM |
68a1c3f8 GC |
1272 | * - Otherwise don't reserve additional CPUs. |
1273 | * We do this because additional CPUs waste a lot of memory. | |
1274 | * -AK | |
1275 | */ | |
1276 | __init void prefill_possible_map(void) | |
1277 | { | |
cb48bb59 | 1278 | int i, possible; |
68a1c3f8 | 1279 | |
329513a3 YL |
1280 | /* no processor from mptable or madt */ |
1281 | if (!num_processors) | |
1282 | num_processors = 1; | |
1283 | ||
5f2eb550 JB |
1284 | i = setup_max_cpus ?: 1; |
1285 | if (setup_possible_cpus == -1) { | |
1286 | possible = num_processors; | |
1287 | #ifdef CONFIG_HOTPLUG_CPU | |
1288 | if (setup_max_cpus) | |
1289 | possible += disabled_cpus; | |
1290 | #else | |
1291 | if (possible > i) | |
1292 | possible = i; | |
1293 | #endif | |
1294 | } else | |
3b11ce7f MT |
1295 | possible = setup_possible_cpus; |
1296 | ||
730cf272 MT |
1297 | total_cpus = max_t(int, possible, num_processors + disabled_cpus); |
1298 | ||
2b633e3f YL |
1299 | /* nr_cpu_ids could be reduced via nr_cpus= */ |
1300 | if (possible > nr_cpu_ids) { | |
c767a54b | 1301 | pr_warn("%d Processors exceeds NR_CPUS limit of %d\n", |
2b633e3f YL |
1302 | possible, nr_cpu_ids); |
1303 | possible = nr_cpu_ids; | |
3b11ce7f | 1304 | } |
68a1c3f8 | 1305 | |
5f2eb550 JB |
1306 | #ifdef CONFIG_HOTPLUG_CPU |
1307 | if (!setup_max_cpus) | |
1308 | #endif | |
1309 | if (possible > i) { | |
c767a54b | 1310 | pr_warn("%d Processors exceeds max_cpus limit of %u\n", |
5f2eb550 JB |
1311 | possible, setup_max_cpus); |
1312 | possible = i; | |
1313 | } | |
1314 | ||
c767a54b | 1315 | pr_info("Allowing %d CPUs, %d hotplug CPUs\n", |
68a1c3f8 GC |
1316 | possible, max_t(int, possible - num_processors, 0)); |
1317 | ||
1318 | for (i = 0; i < possible; i++) | |
c2d1cec1 | 1319 | set_cpu_possible(i, true); |
5f2eb550 JB |
1320 | for (; i < NR_CPUS; i++) |
1321 | set_cpu_possible(i, false); | |
3461b0af MT |
1322 | |
1323 | nr_cpu_ids = possible; | |
68a1c3f8 | 1324 | } |
69c18c15 | 1325 | |
14adf855 CE |
1326 | #ifdef CONFIG_HOTPLUG_CPU |
1327 | ||
1328 | static void remove_siblinginfo(int cpu) | |
1329 | { | |
1330 | int sibling; | |
1331 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
1332 | ||
7d79a7bd BG |
1333 | for_each_cpu(sibling, topology_core_cpumask(cpu)) { |
1334 | cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); | |
14adf855 CE |
1335 | /*/ |
1336 | * last thread sibling in this cpu core going down | |
1337 | */ | |
7d79a7bd | 1338 | if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1) |
14adf855 CE |
1339 | cpu_data(sibling).booted_cores--; |
1340 | } | |
1341 | ||
7d79a7bd BG |
1342 | for_each_cpu(sibling, topology_sibling_cpumask(cpu)) |
1343 | cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); | |
03bd4e1f WL |
1344 | for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) |
1345 | cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling)); | |
1346 | cpumask_clear(cpu_llc_shared_mask(cpu)); | |
7d79a7bd BG |
1347 | cpumask_clear(topology_sibling_cpumask(cpu)); |
1348 | cpumask_clear(topology_core_cpumask(cpu)); | |
14adf855 CE |
1349 | c->phys_proc_id = 0; |
1350 | c->cpu_core_id = 0; | |
c2d1cec1 | 1351 | cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); |
14adf855 CE |
1352 | } |
1353 | ||
4daa832d | 1354 | static void remove_cpu_from_maps(int cpu) |
69c18c15 | 1355 | { |
c2d1cec1 MT |
1356 | set_cpu_online(cpu, false); |
1357 | cpumask_clear_cpu(cpu, cpu_callout_mask); | |
1358 | cpumask_clear_cpu(cpu, cpu_callin_mask); | |
69c18c15 | 1359 | /* was set by cpu_init() */ |
c2d1cec1 | 1360 | cpumask_clear_cpu(cpu, cpu_initialized_mask); |
23ca4bba | 1361 | numa_remove_cpu(cpu); |
69c18c15 GC |
1362 | } |
1363 | ||
8227dce7 | 1364 | void cpu_disable_common(void) |
69c18c15 GC |
1365 | { |
1366 | int cpu = smp_processor_id(); | |
69c18c15 | 1367 | |
69c18c15 GC |
1368 | remove_siblinginfo(cpu); |
1369 | ||
1370 | /* It's now safe to remove this processor from the online map */ | |
d388e5fd | 1371 | lock_vector_lock(); |
69c18c15 | 1372 | remove_cpu_from_maps(cpu); |
d388e5fd | 1373 | unlock_vector_lock(); |
d7b381bb | 1374 | fixup_irqs(); |
8227dce7 AN |
1375 | } |
1376 | ||
1377 | int native_cpu_disable(void) | |
1378 | { | |
da6139e4 PB |
1379 | int ret; |
1380 | ||
1381 | ret = check_irq_vectors_for_cpu_disable(); | |
1382 | if (ret) | |
1383 | return ret; | |
1384 | ||
8227dce7 | 1385 | clear_local_APIC(); |
8227dce7 | 1386 | cpu_disable_common(); |
2ed53c0d | 1387 | |
69c18c15 GC |
1388 | return 0; |
1389 | } | |
1390 | ||
2a442c9c | 1391 | int common_cpu_die(unsigned int cpu) |
54279552 | 1392 | { |
2a442c9c | 1393 | int ret = 0; |
54279552 | 1394 | |
69c18c15 | 1395 | /* We don't do anything here: idle task is faking death itself. */ |
54279552 | 1396 | |
2ed53c0d | 1397 | /* They ack this in play_dead() by setting CPU_DEAD */ |
2a442c9c | 1398 | if (cpu_wait_death(cpu, 5)) { |
2ed53c0d LT |
1399 | if (system_state == SYSTEM_RUNNING) |
1400 | pr_info("CPU %u is now offline\n", cpu); | |
1401 | } else { | |
1402 | pr_err("CPU %u didn't die...\n", cpu); | |
2a442c9c | 1403 | ret = -1; |
69c18c15 | 1404 | } |
2a442c9c PM |
1405 | |
1406 | return ret; | |
1407 | } | |
1408 | ||
1409 | void native_cpu_die(unsigned int cpu) | |
1410 | { | |
1411 | common_cpu_die(cpu); | |
69c18c15 | 1412 | } |
a21f5d88 AN |
1413 | |
1414 | void play_dead_common(void) | |
1415 | { | |
1416 | idle_task_exit(); | |
1417 | reset_lazy_tlbstate(); | |
02c68a02 | 1418 | amd_e400_remove_cpu(raw_smp_processor_id()); |
a21f5d88 | 1419 | |
a21f5d88 | 1420 | /* Ack it */ |
2a442c9c | 1421 | (void)cpu_report_death(); |
a21f5d88 AN |
1422 | |
1423 | /* | |
1424 | * With physical CPU hotplug, we should halt the cpu | |
1425 | */ | |
1426 | local_irq_disable(); | |
1427 | } | |
1428 | ||
e1c467e6 FY |
1429 | static bool wakeup_cpu0(void) |
1430 | { | |
1431 | if (smp_processor_id() == 0 && enable_start_cpu0) | |
1432 | return true; | |
1433 | ||
1434 | return false; | |
1435 | } | |
1436 | ||
ea530692 PA |
1437 | /* |
1438 | * We need to flush the caches before going to sleep, lest we have | |
1439 | * dirty data in our caches when we come back up. | |
1440 | */ | |
1441 | static inline void mwait_play_dead(void) | |
1442 | { | |
1443 | unsigned int eax, ebx, ecx, edx; | |
1444 | unsigned int highest_cstate = 0; | |
1445 | unsigned int highest_subcstate = 0; | |
ce5f6824 | 1446 | void *mwait_ptr; |
576cfb40 | 1447 | int i; |
ea530692 | 1448 | |
69fb3676 | 1449 | if (!this_cpu_has(X86_FEATURE_MWAIT)) |
ea530692 | 1450 | return; |
840d2830 | 1451 | if (!this_cpu_has(X86_FEATURE_CLFLUSH)) |
ce5f6824 | 1452 | return; |
7b543a53 | 1453 | if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF) |
ea530692 PA |
1454 | return; |
1455 | ||
1456 | eax = CPUID_MWAIT_LEAF; | |
1457 | ecx = 0; | |
1458 | native_cpuid(&eax, &ebx, &ecx, &edx); | |
1459 | ||
1460 | /* | |
1461 | * eax will be 0 if EDX enumeration is not valid. | |
1462 | * Initialized below to cstate, sub_cstate value when EDX is valid. | |
1463 | */ | |
1464 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { | |
1465 | eax = 0; | |
1466 | } else { | |
1467 | edx >>= MWAIT_SUBSTATE_SIZE; | |
1468 | for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { | |
1469 | if (edx & MWAIT_SUBSTATE_MASK) { | |
1470 | highest_cstate = i; | |
1471 | highest_subcstate = edx & MWAIT_SUBSTATE_MASK; | |
1472 | } | |
1473 | } | |
1474 | eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | | |
1475 | (highest_subcstate - 1); | |
1476 | } | |
1477 | ||
ce5f6824 PA |
1478 | /* |
1479 | * This should be a memory location in a cache line which is | |
1480 | * unlikely to be touched by other processors. The actual | |
1481 | * content is immaterial as it is not actually modified in any way. | |
1482 | */ | |
1483 | mwait_ptr = ¤t_thread_info()->flags; | |
1484 | ||
a68e5c94 PA |
1485 | wbinvd(); |
1486 | ||
ea530692 | 1487 | while (1) { |
ce5f6824 PA |
1488 | /* |
1489 | * The CLFLUSH is a workaround for erratum AAI65 for | |
1490 | * the Xeon 7400 series. It's not clear it is actually | |
1491 | * needed, but it should be harmless in either case. | |
1492 | * The WBINVD is insufficient due to the spurious-wakeup | |
1493 | * case where we return around the loop. | |
1494 | */ | |
7d590cca | 1495 | mb(); |
ce5f6824 | 1496 | clflush(mwait_ptr); |
7d590cca | 1497 | mb(); |
ce5f6824 | 1498 | __monitor(mwait_ptr, 0, 0); |
ea530692 PA |
1499 | mb(); |
1500 | __mwait(eax, 0); | |
e1c467e6 FY |
1501 | /* |
1502 | * If NMI wants to wake up CPU0, start CPU0. | |
1503 | */ | |
1504 | if (wakeup_cpu0()) | |
1505 | start_cpu0(); | |
ea530692 PA |
1506 | } |
1507 | } | |
1508 | ||
1509 | static inline void hlt_play_dead(void) | |
1510 | { | |
7b543a53 | 1511 | if (__this_cpu_read(cpu_info.x86) >= 4) |
a68e5c94 PA |
1512 | wbinvd(); |
1513 | ||
ea530692 | 1514 | while (1) { |
ea530692 | 1515 | native_halt(); |
e1c467e6 FY |
1516 | /* |
1517 | * If NMI wants to wake up CPU0, start CPU0. | |
1518 | */ | |
1519 | if (wakeup_cpu0()) | |
1520 | start_cpu0(); | |
ea530692 PA |
1521 | } |
1522 | } | |
1523 | ||
a21f5d88 AN |
1524 | void native_play_dead(void) |
1525 | { | |
1526 | play_dead_common(); | |
86886e55 | 1527 | tboot_shutdown(TB_SHUTDOWN_WFS); |
ea530692 PA |
1528 | |
1529 | mwait_play_dead(); /* Only returns on failure */ | |
1a022e3f BO |
1530 | if (cpuidle_play_dead()) |
1531 | hlt_play_dead(); | |
a21f5d88 AN |
1532 | } |
1533 | ||
69c18c15 | 1534 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
93be71b6 | 1535 | int native_cpu_disable(void) |
69c18c15 GC |
1536 | { |
1537 | return -ENOSYS; | |
1538 | } | |
1539 | ||
93be71b6 | 1540 | void native_cpu_die(unsigned int cpu) |
69c18c15 GC |
1541 | { |
1542 | /* We said "no" in __cpu_disable */ | |
1543 | BUG(); | |
1544 | } | |
a21f5d88 AN |
1545 | |
1546 | void native_play_dead(void) | |
1547 | { | |
1548 | BUG(); | |
1549 | } | |
1550 | ||
68a1c3f8 | 1551 | #endif |