| 1 | /* |
| 2 | * x86 SMP booting functions |
| 3 | * |
| 4 | * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> |
| 5 | * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com> |
| 6 | * Copyright 2001 Andi Kleen, SuSE Labs. |
| 7 | * |
| 8 | * Much of the core SMP work is based on previous work by Thomas Radke, to |
| 9 | * whom a great many thanks are extended. |
| 10 | * |
| 11 | * Thanks to Intel for making available several different Pentium, |
| 12 | * Pentium Pro and Pentium-II/Xeon MP machines. |
| 13 | * Original development of Linux SMP code supported by Caldera. |
| 14 | * |
| 15 | * This code is released under the GNU General Public License version 2 or |
| 16 | * later. |
| 17 | * |
| 18 | * Fixes |
| 19 | * Felix Koop : NR_CPUS used properly |
| 20 | * Jose Renau : Handle single CPU case. |
| 21 | * Alan Cox : By repeated request 8) - Total BogoMIPS report. |
| 22 | * Greg Wright : Fix for kernel stacks panic. |
| 23 | * Erich Boleyn : MP v1.4 and additional changes. |
| 24 | * Matthias Sattler : Changes for 2.1 kernel map. |
| 25 | * Michel Lespinasse : Changes for 2.1 kernel map. |
| 26 | * Michael Chastain : Change trampoline.S to gnu as. |
| 27 | * Alan Cox : Dumb bug: 'B' step PPro's are fine |
| 28 | * Ingo Molnar : Added APIC timers, based on code |
| 29 | * from Jose Renau |
| 30 | * Ingo Molnar : various cleanups and rewrites |
| 31 | * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. |
| 32 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs |
| 33 | * Andi Kleen : Changed for SMP boot into long mode. |
| 34 | * Martin J. Bligh : Added support for multi-quad systems |
| 35 | * Dave Jones : Report invalid combinations of Athlon CPUs. |
| 36 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. |
| 37 | * Andi Kleen : Converted to new state machine. |
| 38 | * Ashok Raj : CPU hotplug support |
| 39 | * Glauber Costa : i386 and x86_64 integration |
| 40 | */ |
| 41 | |
| 42 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 43 | |
| 44 | #include <linux/init.h> |
| 45 | #include <linux/smp.h> |
| 46 | #include <linux/module.h> |
| 47 | #include <linux/sched.h> |
| 48 | #include <linux/percpu.h> |
| 49 | #include <linux/bootmem.h> |
| 50 | #include <linux/err.h> |
| 51 | #include <linux/nmi.h> |
| 52 | #include <linux/tboot.h> |
| 53 | #include <linux/stackprotector.h> |
| 54 | #include <linux/gfp.h> |
| 55 | #include <linux/cpuidle.h> |
| 56 | |
| 57 | #include <asm/acpi.h> |
| 58 | #include <asm/desc.h> |
| 59 | #include <asm/nmi.h> |
| 60 | #include <asm/irq.h> |
| 61 | #include <asm/idle.h> |
| 62 | #include <asm/realmode.h> |
| 63 | #include <asm/cpu.h> |
| 64 | #include <asm/numa.h> |
| 65 | #include <asm/pgtable.h> |
| 66 | #include <asm/tlbflush.h> |
| 67 | #include <asm/mtrr.h> |
| 68 | #include <asm/mwait.h> |
| 69 | #include <asm/apic.h> |
| 70 | #include <asm/io_apic.h> |
| 71 | #include <asm/i387.h> |
| 72 | #include <asm/fpu-internal.h> |
| 73 | #include <asm/setup.h> |
| 74 | #include <asm/uv/uv.h> |
| 75 | #include <linux/mc146818rtc.h> |
| 76 | #include <asm/i8259.h> |
| 77 | #include <asm/realmode.h> |
| 78 | #include <asm/misc.h> |
| 79 | |
| 80 | /* State of each CPU */ |
| 81 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
| 82 | |
| 83 | /* Number of siblings per CPU package */ |
| 84 | int smp_num_siblings = 1; |
| 85 | EXPORT_SYMBOL(smp_num_siblings); |
| 86 | |
| 87 | /* Last level cache ID of each logical CPU */ |
| 88 | DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; |
| 89 | |
| 90 | /* representing HT siblings of each logical CPU */ |
| 91 | DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); |
| 92 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
| 93 | |
| 94 | /* representing HT and core siblings of each logical CPU */ |
| 95 | DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); |
| 96 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
| 97 | |
| 98 | DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); |
| 99 | |
| 100 | /* Per CPU bogomips and other parameters */ |
| 101 | DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); |
| 102 | EXPORT_PER_CPU_SYMBOL(cpu_info); |
| 103 | |
| 104 | atomic_t init_deasserted; |
| 105 | |
| 106 | static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) |
| 107 | { |
| 108 | unsigned long flags; |
| 109 | |
| 110 | spin_lock_irqsave(&rtc_lock, flags); |
| 111 | CMOS_WRITE(0xa, 0xf); |
| 112 | spin_unlock_irqrestore(&rtc_lock, flags); |
| 113 | local_flush_tlb(); |
| 114 | pr_debug("1.\n"); |
| 115 | *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = |
| 116 | start_eip >> 4; |
| 117 | pr_debug("2.\n"); |
| 118 | *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = |
| 119 | start_eip & 0xf; |
| 120 | pr_debug("3.\n"); |
| 121 | } |
| 122 | |
| 123 | static inline void smpboot_restore_warm_reset_vector(void) |
| 124 | { |
| 125 | unsigned long flags; |
| 126 | |
| 127 | /* |
| 128 | * Install writable page 0 entry to set BIOS data area. |
| 129 | */ |
| 130 | local_flush_tlb(); |
| 131 | |
| 132 | /* |
| 133 | * Paranoid: Set warm reset code and vector here back |
| 134 | * to default values. |
| 135 | */ |
| 136 | spin_lock_irqsave(&rtc_lock, flags); |
| 137 | CMOS_WRITE(0, 0xf); |
| 138 | spin_unlock_irqrestore(&rtc_lock, flags); |
| 139 | |
| 140 | *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0; |
| 141 | } |
| 142 | |
| 143 | /* |
| 144 | * Report back to the Boot Processor during boot time or to the caller processor |
| 145 | * during CPU online. |
| 146 | */ |
| 147 | static void smp_callin(void) |
| 148 | { |
| 149 | int cpuid, phys_id; |
| 150 | |
| 151 | /* |
| 152 | * If waken up by an INIT in an 82489DX configuration |
| 153 | * we may get here before an INIT-deassert IPI reaches |
| 154 | * our local APIC. We have to wait for the IPI or we'll |
| 155 | * lock up on an APIC access. |
| 156 | * |
| 157 | * Since CPU0 is not wakened up by INIT, it doesn't wait for the IPI. |
| 158 | */ |
| 159 | cpuid = smp_processor_id(); |
| 160 | if (apic->wait_for_init_deassert && cpuid) |
| 161 | while (!atomic_read(&init_deasserted)) |
| 162 | cpu_relax(); |
| 163 | |
| 164 | /* |
| 165 | * (This works even if the APIC is not enabled.) |
| 166 | */ |
| 167 | phys_id = read_apic_id(); |
| 168 | |
| 169 | /* |
| 170 | * the boot CPU has finished the init stage and is spinning |
| 171 | * on callin_map until we finish. We are free to set up this |
| 172 | * CPU, first the APIC. (this is probably redundant on most |
| 173 | * boards) |
| 174 | */ |
| 175 | apic_ap_setup(); |
| 176 | |
| 177 | /* |
| 178 | * Need to setup vector mappings before we enable interrupts. |
| 179 | */ |
| 180 | setup_vector_irq(smp_processor_id()); |
| 181 | |
| 182 | /* |
| 183 | * Save our processor parameters. Note: this information |
| 184 | * is needed for clock calibration. |
| 185 | */ |
| 186 | smp_store_cpu_info(cpuid); |
| 187 | |
| 188 | /* |
| 189 | * Get our bogomips. |
| 190 | * Update loops_per_jiffy in cpu_data. Previous call to |
| 191 | * smp_store_cpu_info() stored a value that is close but not as |
| 192 | * accurate as the value just calculated. |
| 193 | */ |
| 194 | calibrate_delay(); |
| 195 | cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy; |
| 196 | pr_debug("Stack at about %p\n", &cpuid); |
| 197 | |
| 198 | /* |
| 199 | * This must be done before setting cpu_online_mask |
| 200 | * or calling notify_cpu_starting. |
| 201 | */ |
| 202 | set_cpu_sibling_map(raw_smp_processor_id()); |
| 203 | wmb(); |
| 204 | |
| 205 | notify_cpu_starting(cpuid); |
| 206 | |
| 207 | /* |
| 208 | * Allow the master to continue. |
| 209 | */ |
| 210 | cpumask_set_cpu(cpuid, cpu_callin_mask); |
| 211 | } |
| 212 | |
| 213 | static int cpu0_logical_apicid; |
| 214 | static int enable_start_cpu0; |
| 215 | /* |
| 216 | * Activate a secondary processor. |
| 217 | */ |
| 218 | static void notrace start_secondary(void *unused) |
| 219 | { |
| 220 | /* |
| 221 | * Don't put *anything* before cpu_init(), SMP booting is too |
| 222 | * fragile that we want to limit the things done here to the |
| 223 | * most necessary things. |
| 224 | */ |
| 225 | cpu_init(); |
| 226 | x86_cpuinit.early_percpu_clock_init(); |
| 227 | preempt_disable(); |
| 228 | smp_callin(); |
| 229 | |
| 230 | enable_start_cpu0 = 0; |
| 231 | |
| 232 | #ifdef CONFIG_X86_32 |
| 233 | /* switch away from the initial page table */ |
| 234 | load_cr3(swapper_pg_dir); |
| 235 | __flush_tlb_all(); |
| 236 | #endif |
| 237 | |
| 238 | /* otherwise gcc will move up smp_processor_id before the cpu_init */ |
| 239 | barrier(); |
| 240 | /* |
| 241 | * Check TSC synchronization with the BP: |
| 242 | */ |
| 243 | check_tsc_sync_target(); |
| 244 | |
| 245 | /* |
| 246 | * Enable the espfix hack for this CPU |
| 247 | */ |
| 248 | #ifdef CONFIG_X86_ESPFIX64 |
| 249 | init_espfix_ap(); |
| 250 | #endif |
| 251 | |
| 252 | /* |
| 253 | * We need to hold vector_lock so there the set of online cpus |
| 254 | * does not change while we are assigning vectors to cpus. Holding |
| 255 | * this lock ensures we don't half assign or remove an irq from a cpu. |
| 256 | */ |
| 257 | lock_vector_lock(); |
| 258 | set_cpu_online(smp_processor_id(), true); |
| 259 | unlock_vector_lock(); |
| 260 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
| 261 | x86_platform.nmi_init(); |
| 262 | |
| 263 | /* enable local interrupts */ |
| 264 | local_irq_enable(); |
| 265 | |
| 266 | /* to prevent fake stack check failure in clock setup */ |
| 267 | boot_init_stack_canary(); |
| 268 | |
| 269 | x86_cpuinit.setup_percpu_clockev(); |
| 270 | |
| 271 | wmb(); |
| 272 | cpu_startup_entry(CPUHP_ONLINE); |
| 273 | } |
| 274 | |
| 275 | void __init smp_store_boot_cpu_info(void) |
| 276 | { |
| 277 | int id = 0; /* CPU 0 */ |
| 278 | struct cpuinfo_x86 *c = &cpu_data(id); |
| 279 | |
| 280 | *c = boot_cpu_data; |
| 281 | c->cpu_index = id; |
| 282 | } |
| 283 | |
| 284 | /* |
| 285 | * The bootstrap kernel entry code has set these up. Save them for |
| 286 | * a given CPU |
| 287 | */ |
| 288 | void smp_store_cpu_info(int id) |
| 289 | { |
| 290 | struct cpuinfo_x86 *c = &cpu_data(id); |
| 291 | |
| 292 | *c = boot_cpu_data; |
| 293 | c->cpu_index = id; |
| 294 | /* |
| 295 | * During boot time, CPU0 has this setup already. Save the info when |
| 296 | * bringing up AP or offlined CPU0. |
| 297 | */ |
| 298 | identify_secondary_cpu(c); |
| 299 | } |
| 300 | |
| 301 | static bool |
| 302 | topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
| 303 | { |
| 304 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
| 305 | |
| 306 | return (cpu_to_node(cpu1) == cpu_to_node(cpu2)); |
| 307 | } |
| 308 | |
| 309 | static bool |
| 310 | topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) |
| 311 | { |
| 312 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
| 313 | |
| 314 | return !WARN_ONCE(!topology_same_node(c, o), |
| 315 | "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! " |
| 316 | "[node: %d != %d]. Ignoring dependency.\n", |
| 317 | cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2)); |
| 318 | } |
| 319 | |
| 320 | #define link_mask(_m, c1, c2) \ |
| 321 | do { \ |
| 322 | cpumask_set_cpu((c1), cpu_##_m##_mask(c2)); \ |
| 323 | cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \ |
| 324 | } while (0) |
| 325 | |
| 326 | static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
| 327 | { |
| 328 | if (cpu_has_topoext) { |
| 329 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
| 330 | |
| 331 | if (c->phys_proc_id == o->phys_proc_id && |
| 332 | per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && |
| 333 | c->compute_unit_id == o->compute_unit_id) |
| 334 | return topology_sane(c, o, "smt"); |
| 335 | |
| 336 | } else if (c->phys_proc_id == o->phys_proc_id && |
| 337 | c->cpu_core_id == o->cpu_core_id) { |
| 338 | return topology_sane(c, o, "smt"); |
| 339 | } |
| 340 | |
| 341 | return false; |
| 342 | } |
| 343 | |
| 344 | static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
| 345 | { |
| 346 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
| 347 | |
| 348 | if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID && |
| 349 | per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) |
| 350 | return topology_sane(c, o, "llc"); |
| 351 | |
| 352 | return false; |
| 353 | } |
| 354 | |
| 355 | /* |
| 356 | * Unlike the other levels, we do not enforce keeping a |
| 357 | * multicore group inside a NUMA node. If this happens, we will |
| 358 | * discard the MC level of the topology later. |
| 359 | */ |
| 360 | static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
| 361 | { |
| 362 | if (c->phys_proc_id == o->phys_proc_id) |
| 363 | return true; |
| 364 | return false; |
| 365 | } |
| 366 | |
| 367 | static struct sched_domain_topology_level numa_inside_package_topology[] = { |
| 368 | #ifdef CONFIG_SCHED_SMT |
| 369 | { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, |
| 370 | #endif |
| 371 | #ifdef CONFIG_SCHED_MC |
| 372 | { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, |
| 373 | #endif |
| 374 | { NULL, }, |
| 375 | }; |
| 376 | /* |
| 377 | * set_sched_topology() sets the topology internal to a CPU. The |
| 378 | * NUMA topologies are layered on top of it to build the full |
| 379 | * system topology. |
| 380 | * |
| 381 | * If NUMA nodes are observed to occur within a CPU package, this |
| 382 | * function should be called. It forces the sched domain code to |
| 383 | * only use the SMT level for the CPU portion of the topology. |
| 384 | * This essentially falls back to relying on NUMA information |
| 385 | * from the SRAT table to describe the entire system topology |
| 386 | * (except for hyperthreads). |
| 387 | */ |
| 388 | static void primarily_use_numa_for_topology(void) |
| 389 | { |
| 390 | set_sched_topology(numa_inside_package_topology); |
| 391 | } |
| 392 | |
| 393 | void set_cpu_sibling_map(int cpu) |
| 394 | { |
| 395 | bool has_smt = smp_num_siblings > 1; |
| 396 | bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; |
| 397 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
| 398 | struct cpuinfo_x86 *o; |
| 399 | int i; |
| 400 | |
| 401 | cpumask_set_cpu(cpu, cpu_sibling_setup_mask); |
| 402 | |
| 403 | if (!has_mp) { |
| 404 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); |
| 405 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); |
| 406 | cpumask_set_cpu(cpu, cpu_core_mask(cpu)); |
| 407 | c->booted_cores = 1; |
| 408 | return; |
| 409 | } |
| 410 | |
| 411 | for_each_cpu(i, cpu_sibling_setup_mask) { |
| 412 | o = &cpu_data(i); |
| 413 | |
| 414 | if ((i == cpu) || (has_smt && match_smt(c, o))) |
| 415 | link_mask(sibling, cpu, i); |
| 416 | |
| 417 | if ((i == cpu) || (has_mp && match_llc(c, o))) |
| 418 | link_mask(llc_shared, cpu, i); |
| 419 | |
| 420 | } |
| 421 | |
| 422 | /* |
| 423 | * This needs a separate iteration over the cpus because we rely on all |
| 424 | * cpu_sibling_mask links to be set-up. |
| 425 | */ |
| 426 | for_each_cpu(i, cpu_sibling_setup_mask) { |
| 427 | o = &cpu_data(i); |
| 428 | |
| 429 | if ((i == cpu) || (has_mp && match_die(c, o))) { |
| 430 | link_mask(core, cpu, i); |
| 431 | |
| 432 | /* |
| 433 | * Does this new cpu bringup a new core? |
| 434 | */ |
| 435 | if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) { |
| 436 | /* |
| 437 | * for each core in package, increment |
| 438 | * the booted_cores for this new cpu |
| 439 | */ |
| 440 | if (cpumask_first(cpu_sibling_mask(i)) == i) |
| 441 | c->booted_cores++; |
| 442 | /* |
| 443 | * increment the core count for all |
| 444 | * the other cpus in this package |
| 445 | */ |
| 446 | if (i != cpu) |
| 447 | cpu_data(i).booted_cores++; |
| 448 | } else if (i != cpu && !c->booted_cores) |
| 449 | c->booted_cores = cpu_data(i).booted_cores; |
| 450 | } |
| 451 | if (match_die(c, o) && !topology_same_node(c, o)) |
| 452 | primarily_use_numa_for_topology(); |
| 453 | } |
| 454 | } |
| 455 | |
| 456 | /* maps the cpu to the sched domain representing multi-core */ |
| 457 | const struct cpumask *cpu_coregroup_mask(int cpu) |
| 458 | { |
| 459 | return cpu_llc_shared_mask(cpu); |
| 460 | } |
| 461 | |
| 462 | static void impress_friends(void) |
| 463 | { |
| 464 | int cpu; |
| 465 | unsigned long bogosum = 0; |
| 466 | /* |
| 467 | * Allow the user to impress friends. |
| 468 | */ |
| 469 | pr_debug("Before bogomips\n"); |
| 470 | for_each_possible_cpu(cpu) |
| 471 | if (cpumask_test_cpu(cpu, cpu_callout_mask)) |
| 472 | bogosum += cpu_data(cpu).loops_per_jiffy; |
| 473 | pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n", |
| 474 | num_online_cpus(), |
| 475 | bogosum/(500000/HZ), |
| 476 | (bogosum/(5000/HZ))%100); |
| 477 | |
| 478 | pr_debug("Before bogocount - setting activated=1\n"); |
| 479 | } |
| 480 | |
| 481 | void __inquire_remote_apic(int apicid) |
| 482 | { |
| 483 | unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; |
| 484 | const char * const names[] = { "ID", "VERSION", "SPIV" }; |
| 485 | int timeout; |
| 486 | u32 status; |
| 487 | |
| 488 | pr_info("Inquiring remote APIC 0x%x...\n", apicid); |
| 489 | |
| 490 | for (i = 0; i < ARRAY_SIZE(regs); i++) { |
| 491 | pr_info("... APIC 0x%x %s: ", apicid, names[i]); |
| 492 | |
| 493 | /* |
| 494 | * Wait for idle. |
| 495 | */ |
| 496 | status = safe_apic_wait_icr_idle(); |
| 497 | if (status) |
| 498 | pr_cont("a previous APIC delivery may have failed\n"); |
| 499 | |
| 500 | apic_icr_write(APIC_DM_REMRD | regs[i], apicid); |
| 501 | |
| 502 | timeout = 0; |
| 503 | do { |
| 504 | udelay(100); |
| 505 | status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; |
| 506 | } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); |
| 507 | |
| 508 | switch (status) { |
| 509 | case APIC_ICR_RR_VALID: |
| 510 | status = apic_read(APIC_RRR); |
| 511 | pr_cont("%08x\n", status); |
| 512 | break; |
| 513 | default: |
| 514 | pr_cont("failed\n"); |
| 515 | } |
| 516 | } |
| 517 | } |
| 518 | |
| 519 | /* |
| 520 | * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal |
| 521 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this |
| 522 | * won't ... remember to clear down the APIC, etc later. |
| 523 | */ |
| 524 | int |
| 525 | wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip) |
| 526 | { |
| 527 | unsigned long send_status, accept_status = 0; |
| 528 | int maxlvt; |
| 529 | |
| 530 | /* Target chip */ |
| 531 | /* Boot on the stack */ |
| 532 | /* Kick the second */ |
| 533 | apic_icr_write(APIC_DM_NMI | apic->dest_logical, apicid); |
| 534 | |
| 535 | pr_debug("Waiting for send to finish...\n"); |
| 536 | send_status = safe_apic_wait_icr_idle(); |
| 537 | |
| 538 | /* |
| 539 | * Give the other CPU some time to accept the IPI. |
| 540 | */ |
| 541 | udelay(200); |
| 542 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { |
| 543 | maxlvt = lapic_get_maxlvt(); |
| 544 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
| 545 | apic_write(APIC_ESR, 0); |
| 546 | accept_status = (apic_read(APIC_ESR) & 0xEF); |
| 547 | } |
| 548 | pr_debug("NMI sent\n"); |
| 549 | |
| 550 | if (send_status) |
| 551 | pr_err("APIC never delivered???\n"); |
| 552 | if (accept_status) |
| 553 | pr_err("APIC delivery error (%lx)\n", accept_status); |
| 554 | |
| 555 | return (send_status | accept_status); |
| 556 | } |
| 557 | |
| 558 | static int |
| 559 | wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) |
| 560 | { |
| 561 | unsigned long send_status, accept_status = 0; |
| 562 | int maxlvt, num_starts, j; |
| 563 | |
| 564 | maxlvt = lapic_get_maxlvt(); |
| 565 | |
| 566 | /* |
| 567 | * Be paranoid about clearing APIC errors. |
| 568 | */ |
| 569 | if (APIC_INTEGRATED(apic_version[phys_apicid])) { |
| 570 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
| 571 | apic_write(APIC_ESR, 0); |
| 572 | apic_read(APIC_ESR); |
| 573 | } |
| 574 | |
| 575 | pr_debug("Asserting INIT\n"); |
| 576 | |
| 577 | /* |
| 578 | * Turn INIT on target chip |
| 579 | */ |
| 580 | /* |
| 581 | * Send IPI |
| 582 | */ |
| 583 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT, |
| 584 | phys_apicid); |
| 585 | |
| 586 | pr_debug("Waiting for send to finish...\n"); |
| 587 | send_status = safe_apic_wait_icr_idle(); |
| 588 | |
| 589 | mdelay(10); |
| 590 | |
| 591 | pr_debug("Deasserting INIT\n"); |
| 592 | |
| 593 | /* Target chip */ |
| 594 | /* Send IPI */ |
| 595 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid); |
| 596 | |
| 597 | pr_debug("Waiting for send to finish...\n"); |
| 598 | send_status = safe_apic_wait_icr_idle(); |
| 599 | |
| 600 | mb(); |
| 601 | atomic_set(&init_deasserted, 1); |
| 602 | |
| 603 | /* |
| 604 | * Should we send STARTUP IPIs ? |
| 605 | * |
| 606 | * Determine this based on the APIC version. |
| 607 | * If we don't have an integrated APIC, don't send the STARTUP IPIs. |
| 608 | */ |
| 609 | if (APIC_INTEGRATED(apic_version[phys_apicid])) |
| 610 | num_starts = 2; |
| 611 | else |
| 612 | num_starts = 0; |
| 613 | |
| 614 | /* |
| 615 | * Paravirt / VMI wants a startup IPI hook here to set up the |
| 616 | * target processor state. |
| 617 | */ |
| 618 | startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, |
| 619 | stack_start); |
| 620 | |
| 621 | /* |
| 622 | * Run STARTUP IPI loop. |
| 623 | */ |
| 624 | pr_debug("#startup loops: %d\n", num_starts); |
| 625 | |
| 626 | for (j = 1; j <= num_starts; j++) { |
| 627 | pr_debug("Sending STARTUP #%d\n", j); |
| 628 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
| 629 | apic_write(APIC_ESR, 0); |
| 630 | apic_read(APIC_ESR); |
| 631 | pr_debug("After apic_write\n"); |
| 632 | |
| 633 | /* |
| 634 | * STARTUP IPI |
| 635 | */ |
| 636 | |
| 637 | /* Target chip */ |
| 638 | /* Boot on the stack */ |
| 639 | /* Kick the second */ |
| 640 | apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12), |
| 641 | phys_apicid); |
| 642 | |
| 643 | /* |
| 644 | * Give the other CPU some time to accept the IPI. |
| 645 | */ |
| 646 | udelay(300); |
| 647 | |
| 648 | pr_debug("Startup point 1\n"); |
| 649 | |
| 650 | pr_debug("Waiting for send to finish...\n"); |
| 651 | send_status = safe_apic_wait_icr_idle(); |
| 652 | |
| 653 | /* |
| 654 | * Give the other CPU some time to accept the IPI. |
| 655 | */ |
| 656 | udelay(200); |
| 657 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
| 658 | apic_write(APIC_ESR, 0); |
| 659 | accept_status = (apic_read(APIC_ESR) & 0xEF); |
| 660 | if (send_status || accept_status) |
| 661 | break; |
| 662 | } |
| 663 | pr_debug("After Startup\n"); |
| 664 | |
| 665 | if (send_status) |
| 666 | pr_err("APIC never delivered???\n"); |
| 667 | if (accept_status) |
| 668 | pr_err("APIC delivery error (%lx)\n", accept_status); |
| 669 | |
| 670 | return (send_status | accept_status); |
| 671 | } |
| 672 | |
| 673 | void smp_announce(void) |
| 674 | { |
| 675 | int num_nodes = num_online_nodes(); |
| 676 | |
| 677 | printk(KERN_INFO "x86: Booted up %d node%s, %d CPUs\n", |
| 678 | num_nodes, (num_nodes > 1 ? "s" : ""), num_online_cpus()); |
| 679 | } |
| 680 | |
| 681 | /* reduce the number of lines printed when booting a large cpu count system */ |
| 682 | static void announce_cpu(int cpu, int apicid) |
| 683 | { |
| 684 | static int current_node = -1; |
| 685 | int node = early_cpu_to_node(cpu); |
| 686 | static int width, node_width; |
| 687 | |
| 688 | if (!width) |
| 689 | width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */ |
| 690 | |
| 691 | if (!node_width) |
| 692 | node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */ |
| 693 | |
| 694 | if (cpu == 1) |
| 695 | printk(KERN_INFO "x86: Booting SMP configuration:\n"); |
| 696 | |
| 697 | if (system_state == SYSTEM_BOOTING) { |
| 698 | if (node != current_node) { |
| 699 | if (current_node > (-1)) |
| 700 | pr_cont("\n"); |
| 701 | current_node = node; |
| 702 | |
| 703 | printk(KERN_INFO ".... node %*s#%d, CPUs: ", |
| 704 | node_width - num_digits(node), " ", node); |
| 705 | } |
| 706 | |
| 707 | /* Add padding for the BSP */ |
| 708 | if (cpu == 1) |
| 709 | pr_cont("%*s", width + 1, " "); |
| 710 | |
| 711 | pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu); |
| 712 | |
| 713 | } else |
| 714 | pr_info("Booting Node %d Processor %d APIC 0x%x\n", |
| 715 | node, cpu, apicid); |
| 716 | } |
| 717 | |
| 718 | static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs) |
| 719 | { |
| 720 | int cpu; |
| 721 | |
| 722 | cpu = smp_processor_id(); |
| 723 | if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0) |
| 724 | return NMI_HANDLED; |
| 725 | |
| 726 | return NMI_DONE; |
| 727 | } |
| 728 | |
| 729 | /* |
| 730 | * Wake up AP by INIT, INIT, STARTUP sequence. |
| 731 | * |
| 732 | * Instead of waiting for STARTUP after INITs, BSP will execute the BIOS |
| 733 | * boot-strap code which is not a desired behavior for waking up BSP. To |
| 734 | * void the boot-strap code, wake up CPU0 by NMI instead. |
| 735 | * |
| 736 | * This works to wake up soft offlined CPU0 only. If CPU0 is hard offlined |
| 737 | * (i.e. physically hot removed and then hot added), NMI won't wake it up. |
| 738 | * We'll change this code in the future to wake up hard offlined CPU0 if |
| 739 | * real platform and request are available. |
| 740 | */ |
| 741 | static int |
| 742 | wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid, |
| 743 | int *cpu0_nmi_registered) |
| 744 | { |
| 745 | int id; |
| 746 | int boot_error; |
| 747 | |
| 748 | preempt_disable(); |
| 749 | |
| 750 | /* |
| 751 | * Wake up AP by INIT, INIT, STARTUP sequence. |
| 752 | */ |
| 753 | if (cpu) { |
| 754 | boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip); |
| 755 | goto out; |
| 756 | } |
| 757 | |
| 758 | /* |
| 759 | * Wake up BSP by nmi. |
| 760 | * |
| 761 | * Register a NMI handler to help wake up CPU0. |
| 762 | */ |
| 763 | boot_error = register_nmi_handler(NMI_LOCAL, |
| 764 | wakeup_cpu0_nmi, 0, "wake_cpu0"); |
| 765 | |
| 766 | if (!boot_error) { |
| 767 | enable_start_cpu0 = 1; |
| 768 | *cpu0_nmi_registered = 1; |
| 769 | if (apic->dest_logical == APIC_DEST_LOGICAL) |
| 770 | id = cpu0_logical_apicid; |
| 771 | else |
| 772 | id = apicid; |
| 773 | boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip); |
| 774 | } |
| 775 | |
| 776 | out: |
| 777 | preempt_enable(); |
| 778 | |
| 779 | return boot_error; |
| 780 | } |
| 781 | |
| 782 | /* |
| 783 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad |
| 784 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. |
| 785 | * Returns zero if CPU booted OK, else error code from |
| 786 | * ->wakeup_secondary_cpu. |
| 787 | */ |
| 788 | static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) |
| 789 | { |
| 790 | volatile u32 *trampoline_status = |
| 791 | (volatile u32 *) __va(real_mode_header->trampoline_status); |
| 792 | /* start_ip had better be page-aligned! */ |
| 793 | unsigned long start_ip = real_mode_header->trampoline_start; |
| 794 | |
| 795 | unsigned long boot_error = 0; |
| 796 | int cpu0_nmi_registered = 0; |
| 797 | unsigned long timeout; |
| 798 | |
| 799 | /* Just in case we booted with a single CPU. */ |
| 800 | alternatives_enable_smp(); |
| 801 | |
| 802 | idle->thread.sp = (unsigned long) (((struct pt_regs *) |
| 803 | (THREAD_SIZE + task_stack_page(idle))) - 1); |
| 804 | per_cpu(current_task, cpu) = idle; |
| 805 | |
| 806 | #ifdef CONFIG_X86_32 |
| 807 | /* Stack for startup_32 can be just as for start_secondary onwards */ |
| 808 | irq_ctx_init(cpu); |
| 809 | per_cpu(cpu_current_top_of_stack, cpu) = |
| 810 | (unsigned long)task_stack_page(idle) + THREAD_SIZE; |
| 811 | #else |
| 812 | clear_tsk_thread_flag(idle, TIF_FORK); |
| 813 | initial_gs = per_cpu_offset(cpu); |
| 814 | #endif |
| 815 | per_cpu(kernel_stack, cpu) = |
| 816 | (unsigned long)task_stack_page(idle) - |
| 817 | KERNEL_STACK_OFFSET + THREAD_SIZE; |
| 818 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
| 819 | initial_code = (unsigned long)start_secondary; |
| 820 | stack_start = idle->thread.sp; |
| 821 | |
| 822 | /* So we see what's up */ |
| 823 | announce_cpu(cpu, apicid); |
| 824 | |
| 825 | /* |
| 826 | * This grunge runs the startup process for |
| 827 | * the targeted processor. |
| 828 | */ |
| 829 | |
| 830 | atomic_set(&init_deasserted, 0); |
| 831 | |
| 832 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { |
| 833 | |
| 834 | pr_debug("Setting warm reset code and vector.\n"); |
| 835 | |
| 836 | smpboot_setup_warm_reset_vector(start_ip); |
| 837 | /* |
| 838 | * Be paranoid about clearing APIC errors. |
| 839 | */ |
| 840 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { |
| 841 | apic_write(APIC_ESR, 0); |
| 842 | apic_read(APIC_ESR); |
| 843 | } |
| 844 | } |
| 845 | |
| 846 | /* |
| 847 | * AP might wait on cpu_callout_mask in cpu_init() with |
| 848 | * cpu_initialized_mask set if previous attempt to online |
| 849 | * it timed-out. Clear cpu_initialized_mask so that after |
| 850 | * INIT/SIPI it could start with a clean state. |
| 851 | */ |
| 852 | cpumask_clear_cpu(cpu, cpu_initialized_mask); |
| 853 | smp_mb(); |
| 854 | |
| 855 | /* |
| 856 | * Wake up a CPU in difference cases: |
| 857 | * - Use the method in the APIC driver if it's defined |
| 858 | * Otherwise, |
| 859 | * - Use an INIT boot APIC message for APs or NMI for BSP. |
| 860 | */ |
| 861 | if (apic->wakeup_secondary_cpu) |
| 862 | boot_error = apic->wakeup_secondary_cpu(apicid, start_ip); |
| 863 | else |
| 864 | boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid, |
| 865 | &cpu0_nmi_registered); |
| 866 | |
| 867 | if (!boot_error) { |
| 868 | /* |
| 869 | * Wait 10s total for a response from AP |
| 870 | */ |
| 871 | boot_error = -1; |
| 872 | timeout = jiffies + 10*HZ; |
| 873 | while (time_before(jiffies, timeout)) { |
| 874 | if (cpumask_test_cpu(cpu, cpu_initialized_mask)) { |
| 875 | /* |
| 876 | * Tell AP to proceed with initialization |
| 877 | */ |
| 878 | cpumask_set_cpu(cpu, cpu_callout_mask); |
| 879 | boot_error = 0; |
| 880 | break; |
| 881 | } |
| 882 | udelay(100); |
| 883 | schedule(); |
| 884 | } |
| 885 | } |
| 886 | |
| 887 | if (!boot_error) { |
| 888 | /* |
| 889 | * Wait till AP completes initial initialization |
| 890 | */ |
| 891 | while (!cpumask_test_cpu(cpu, cpu_callin_mask)) { |
| 892 | /* |
| 893 | * Allow other tasks to run while we wait for the |
| 894 | * AP to come online. This also gives a chance |
| 895 | * for the MTRR work(triggered by the AP coming online) |
| 896 | * to be completed in the stop machine context. |
| 897 | */ |
| 898 | udelay(100); |
| 899 | schedule(); |
| 900 | } |
| 901 | } |
| 902 | |
| 903 | /* mark "stuck" area as not stuck */ |
| 904 | *trampoline_status = 0; |
| 905 | |
| 906 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { |
| 907 | /* |
| 908 | * Cleanup possible dangling ends... |
| 909 | */ |
| 910 | smpboot_restore_warm_reset_vector(); |
| 911 | } |
| 912 | /* |
| 913 | * Clean up the nmi handler. Do this after the callin and callout sync |
| 914 | * to avoid impact of possible long unregister time. |
| 915 | */ |
| 916 | if (cpu0_nmi_registered) |
| 917 | unregister_nmi_handler(NMI_LOCAL, "wake_cpu0"); |
| 918 | |
| 919 | return boot_error; |
| 920 | } |
| 921 | |
| 922 | int native_cpu_up(unsigned int cpu, struct task_struct *tidle) |
| 923 | { |
| 924 | int apicid = apic->cpu_present_to_apicid(cpu); |
| 925 | unsigned long flags; |
| 926 | int err; |
| 927 | |
| 928 | WARN_ON(irqs_disabled()); |
| 929 | |
| 930 | pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu); |
| 931 | |
| 932 | if (apicid == BAD_APICID || |
| 933 | !physid_isset(apicid, phys_cpu_present_map) || |
| 934 | !apic->apic_id_valid(apicid)) { |
| 935 | pr_err("%s: bad cpu %d\n", __func__, cpu); |
| 936 | return -EINVAL; |
| 937 | } |
| 938 | |
| 939 | /* |
| 940 | * Already booted CPU? |
| 941 | */ |
| 942 | if (cpumask_test_cpu(cpu, cpu_callin_mask)) { |
| 943 | pr_debug("do_boot_cpu %d Already started\n", cpu); |
| 944 | return -ENOSYS; |
| 945 | } |
| 946 | |
| 947 | /* |
| 948 | * Save current MTRR state in case it was changed since early boot |
| 949 | * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: |
| 950 | */ |
| 951 | mtrr_save_state(); |
| 952 | |
| 953 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
| 954 | |
| 955 | /* the FPU context is blank, nobody can own it */ |
| 956 | __cpu_disable_lazy_restore(cpu); |
| 957 | |
| 958 | err = do_boot_cpu(apicid, cpu, tidle); |
| 959 | if (err) { |
| 960 | pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu); |
| 961 | return -EIO; |
| 962 | } |
| 963 | |
| 964 | /* |
| 965 | * Check TSC synchronization with the AP (keep irqs disabled |
| 966 | * while doing so): |
| 967 | */ |
| 968 | local_irq_save(flags); |
| 969 | check_tsc_sync_source(cpu); |
| 970 | local_irq_restore(flags); |
| 971 | |
| 972 | while (!cpu_online(cpu)) { |
| 973 | cpu_relax(); |
| 974 | touch_nmi_watchdog(); |
| 975 | } |
| 976 | |
| 977 | return 0; |
| 978 | } |
| 979 | |
| 980 | /** |
| 981 | * arch_disable_smp_support() - disables SMP support for x86 at runtime |
| 982 | */ |
| 983 | void arch_disable_smp_support(void) |
| 984 | { |
| 985 | disable_ioapic_support(); |
| 986 | } |
| 987 | |
| 988 | /* |
| 989 | * Fall back to non SMP mode after errors. |
| 990 | * |
| 991 | * RED-PEN audit/test this more. I bet there is more state messed up here. |
| 992 | */ |
| 993 | static __init void disable_smp(void) |
| 994 | { |
| 995 | pr_info("SMP disabled\n"); |
| 996 | |
| 997 | disable_ioapic_support(); |
| 998 | |
| 999 | init_cpu_present(cpumask_of(0)); |
| 1000 | init_cpu_possible(cpumask_of(0)); |
| 1001 | |
| 1002 | if (smp_found_config) |
| 1003 | physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); |
| 1004 | else |
| 1005 | physid_set_mask_of_physid(0, &phys_cpu_present_map); |
| 1006 | cpumask_set_cpu(0, cpu_sibling_mask(0)); |
| 1007 | cpumask_set_cpu(0, cpu_core_mask(0)); |
| 1008 | } |
| 1009 | |
| 1010 | enum { |
| 1011 | SMP_OK, |
| 1012 | SMP_NO_CONFIG, |
| 1013 | SMP_NO_APIC, |
| 1014 | SMP_FORCE_UP, |
| 1015 | }; |
| 1016 | |
| 1017 | /* |
| 1018 | * Various sanity checks. |
| 1019 | */ |
| 1020 | static int __init smp_sanity_check(unsigned max_cpus) |
| 1021 | { |
| 1022 | preempt_disable(); |
| 1023 | |
| 1024 | #if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32) |
| 1025 | if (def_to_bigsmp && nr_cpu_ids > 8) { |
| 1026 | unsigned int cpu; |
| 1027 | unsigned nr; |
| 1028 | |
| 1029 | pr_warn("More than 8 CPUs detected - skipping them\n" |
| 1030 | "Use CONFIG_X86_BIGSMP\n"); |
| 1031 | |
| 1032 | nr = 0; |
| 1033 | for_each_present_cpu(cpu) { |
| 1034 | if (nr >= 8) |
| 1035 | set_cpu_present(cpu, false); |
| 1036 | nr++; |
| 1037 | } |
| 1038 | |
| 1039 | nr = 0; |
| 1040 | for_each_possible_cpu(cpu) { |
| 1041 | if (nr >= 8) |
| 1042 | set_cpu_possible(cpu, false); |
| 1043 | nr++; |
| 1044 | } |
| 1045 | |
| 1046 | nr_cpu_ids = 8; |
| 1047 | } |
| 1048 | #endif |
| 1049 | |
| 1050 | if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { |
| 1051 | pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n", |
| 1052 | hard_smp_processor_id()); |
| 1053 | |
| 1054 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); |
| 1055 | } |
| 1056 | |
| 1057 | /* |
| 1058 | * If we couldn't find an SMP configuration at boot time, |
| 1059 | * get out of here now! |
| 1060 | */ |
| 1061 | if (!smp_found_config && !acpi_lapic) { |
| 1062 | preempt_enable(); |
| 1063 | pr_notice("SMP motherboard not detected\n"); |
| 1064 | return SMP_NO_CONFIG; |
| 1065 | } |
| 1066 | |
| 1067 | /* |
| 1068 | * Should not be necessary because the MP table should list the boot |
| 1069 | * CPU too, but we do it for the sake of robustness anyway. |
| 1070 | */ |
| 1071 | if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) { |
| 1072 | pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n", |
| 1073 | boot_cpu_physical_apicid); |
| 1074 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); |
| 1075 | } |
| 1076 | preempt_enable(); |
| 1077 | |
| 1078 | /* |
| 1079 | * If we couldn't find a local APIC, then get out of here now! |
| 1080 | */ |
| 1081 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && |
| 1082 | !cpu_has_apic) { |
| 1083 | if (!disable_apic) { |
| 1084 | pr_err("BIOS bug, local APIC #%d not detected!...\n", |
| 1085 | boot_cpu_physical_apicid); |
| 1086 | pr_err("... forcing use of dummy APIC emulation (tell your hw vendor)\n"); |
| 1087 | } |
| 1088 | return SMP_NO_APIC; |
| 1089 | } |
| 1090 | |
| 1091 | verify_local_APIC(); |
| 1092 | |
| 1093 | /* |
| 1094 | * If SMP should be disabled, then really disable it! |
| 1095 | */ |
| 1096 | if (!max_cpus) { |
| 1097 | pr_info("SMP mode deactivated\n"); |
| 1098 | return SMP_FORCE_UP; |
| 1099 | } |
| 1100 | |
| 1101 | return SMP_OK; |
| 1102 | } |
| 1103 | |
| 1104 | static void __init smp_cpu_index_default(void) |
| 1105 | { |
| 1106 | int i; |
| 1107 | struct cpuinfo_x86 *c; |
| 1108 | |
| 1109 | for_each_possible_cpu(i) { |
| 1110 | c = &cpu_data(i); |
| 1111 | /* mark all to hotplug */ |
| 1112 | c->cpu_index = nr_cpu_ids; |
| 1113 | } |
| 1114 | } |
| 1115 | |
| 1116 | /* |
| 1117 | * Prepare for SMP bootup. The MP table or ACPI has been read |
| 1118 | * earlier. Just do some sanity checking here and enable APIC mode. |
| 1119 | */ |
| 1120 | void __init native_smp_prepare_cpus(unsigned int max_cpus) |
| 1121 | { |
| 1122 | unsigned int i; |
| 1123 | |
| 1124 | smp_cpu_index_default(); |
| 1125 | |
| 1126 | /* |
| 1127 | * Setup boot CPU information |
| 1128 | */ |
| 1129 | smp_store_boot_cpu_info(); /* Final full version of the data */ |
| 1130 | cpumask_copy(cpu_callin_mask, cpumask_of(0)); |
| 1131 | mb(); |
| 1132 | |
| 1133 | current_thread_info()->cpu = 0; /* needed? */ |
| 1134 | for_each_possible_cpu(i) { |
| 1135 | zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); |
| 1136 | zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); |
| 1137 | zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); |
| 1138 | } |
| 1139 | set_cpu_sibling_map(0); |
| 1140 | |
| 1141 | switch (smp_sanity_check(max_cpus)) { |
| 1142 | case SMP_NO_CONFIG: |
| 1143 | disable_smp(); |
| 1144 | if (APIC_init_uniprocessor()) |
| 1145 | pr_notice("Local APIC not detected. Using dummy APIC emulation.\n"); |
| 1146 | return; |
| 1147 | case SMP_NO_APIC: |
| 1148 | disable_smp(); |
| 1149 | return; |
| 1150 | case SMP_FORCE_UP: |
| 1151 | disable_smp(); |
| 1152 | apic_bsp_setup(false); |
| 1153 | return; |
| 1154 | case SMP_OK: |
| 1155 | break; |
| 1156 | } |
| 1157 | |
| 1158 | default_setup_apic_routing(); |
| 1159 | |
| 1160 | if (read_apic_id() != boot_cpu_physical_apicid) { |
| 1161 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", |
| 1162 | read_apic_id(), boot_cpu_physical_apicid); |
| 1163 | /* Or can we switch back to PIC here? */ |
| 1164 | } |
| 1165 | |
| 1166 | cpu0_logical_apicid = apic_bsp_setup(false); |
| 1167 | |
| 1168 | pr_info("CPU%d: ", 0); |
| 1169 | print_cpu_info(&cpu_data(0)); |
| 1170 | |
| 1171 | if (is_uv_system()) |
| 1172 | uv_system_init(); |
| 1173 | |
| 1174 | set_mtrr_aps_delayed_init(); |
| 1175 | } |
| 1176 | |
| 1177 | void arch_enable_nonboot_cpus_begin(void) |
| 1178 | { |
| 1179 | set_mtrr_aps_delayed_init(); |
| 1180 | } |
| 1181 | |
| 1182 | void arch_enable_nonboot_cpus_end(void) |
| 1183 | { |
| 1184 | mtrr_aps_init(); |
| 1185 | } |
| 1186 | |
| 1187 | /* |
| 1188 | * Early setup to make printk work. |
| 1189 | */ |
| 1190 | void __init native_smp_prepare_boot_cpu(void) |
| 1191 | { |
| 1192 | int me = smp_processor_id(); |
| 1193 | switch_to_new_gdt(me); |
| 1194 | /* already set me in cpu_online_mask in boot_cpu_init() */ |
| 1195 | cpumask_set_cpu(me, cpu_callout_mask); |
| 1196 | per_cpu(cpu_state, me) = CPU_ONLINE; |
| 1197 | } |
| 1198 | |
| 1199 | void __init native_smp_cpus_done(unsigned int max_cpus) |
| 1200 | { |
| 1201 | pr_debug("Boot done\n"); |
| 1202 | |
| 1203 | nmi_selftest(); |
| 1204 | impress_friends(); |
| 1205 | setup_ioapic_dest(); |
| 1206 | mtrr_aps_init(); |
| 1207 | } |
| 1208 | |
| 1209 | static int __initdata setup_possible_cpus = -1; |
| 1210 | static int __init _setup_possible_cpus(char *str) |
| 1211 | { |
| 1212 | get_option(&str, &setup_possible_cpus); |
| 1213 | return 0; |
| 1214 | } |
| 1215 | early_param("possible_cpus", _setup_possible_cpus); |
| 1216 | |
| 1217 | |
| 1218 | /* |
| 1219 | * cpu_possible_mask should be static, it cannot change as cpu's |
| 1220 | * are onlined, or offlined. The reason is per-cpu data-structures |
| 1221 | * are allocated by some modules at init time, and dont expect to |
| 1222 | * do this dynamically on cpu arrival/departure. |
| 1223 | * cpu_present_mask on the other hand can change dynamically. |
| 1224 | * In case when cpu_hotplug is not compiled, then we resort to current |
| 1225 | * behaviour, which is cpu_possible == cpu_present. |
| 1226 | * - Ashok Raj |
| 1227 | * |
| 1228 | * Three ways to find out the number of additional hotplug CPUs: |
| 1229 | * - If the BIOS specified disabled CPUs in ACPI/mptables use that. |
| 1230 | * - The user can overwrite it with possible_cpus=NUM |
| 1231 | * - Otherwise don't reserve additional CPUs. |
| 1232 | * We do this because additional CPUs waste a lot of memory. |
| 1233 | * -AK |
| 1234 | */ |
| 1235 | __init void prefill_possible_map(void) |
| 1236 | { |
| 1237 | int i, possible; |
| 1238 | |
| 1239 | /* no processor from mptable or madt */ |
| 1240 | if (!num_processors) |
| 1241 | num_processors = 1; |
| 1242 | |
| 1243 | i = setup_max_cpus ?: 1; |
| 1244 | if (setup_possible_cpus == -1) { |
| 1245 | possible = num_processors; |
| 1246 | #ifdef CONFIG_HOTPLUG_CPU |
| 1247 | if (setup_max_cpus) |
| 1248 | possible += disabled_cpus; |
| 1249 | #else |
| 1250 | if (possible > i) |
| 1251 | possible = i; |
| 1252 | #endif |
| 1253 | } else |
| 1254 | possible = setup_possible_cpus; |
| 1255 | |
| 1256 | total_cpus = max_t(int, possible, num_processors + disabled_cpus); |
| 1257 | |
| 1258 | /* nr_cpu_ids could be reduced via nr_cpus= */ |
| 1259 | if (possible > nr_cpu_ids) { |
| 1260 | pr_warn("%d Processors exceeds NR_CPUS limit of %d\n", |
| 1261 | possible, nr_cpu_ids); |
| 1262 | possible = nr_cpu_ids; |
| 1263 | } |
| 1264 | |
| 1265 | #ifdef CONFIG_HOTPLUG_CPU |
| 1266 | if (!setup_max_cpus) |
| 1267 | #endif |
| 1268 | if (possible > i) { |
| 1269 | pr_warn("%d Processors exceeds max_cpus limit of %u\n", |
| 1270 | possible, setup_max_cpus); |
| 1271 | possible = i; |
| 1272 | } |
| 1273 | |
| 1274 | pr_info("Allowing %d CPUs, %d hotplug CPUs\n", |
| 1275 | possible, max_t(int, possible - num_processors, 0)); |
| 1276 | |
| 1277 | for (i = 0; i < possible; i++) |
| 1278 | set_cpu_possible(i, true); |
| 1279 | for (; i < NR_CPUS; i++) |
| 1280 | set_cpu_possible(i, false); |
| 1281 | |
| 1282 | nr_cpu_ids = possible; |
| 1283 | } |
| 1284 | |
| 1285 | #ifdef CONFIG_HOTPLUG_CPU |
| 1286 | |
| 1287 | static void remove_siblinginfo(int cpu) |
| 1288 | { |
| 1289 | int sibling; |
| 1290 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
| 1291 | |
| 1292 | for_each_cpu(sibling, cpu_core_mask(cpu)) { |
| 1293 | cpumask_clear_cpu(cpu, cpu_core_mask(sibling)); |
| 1294 | /*/ |
| 1295 | * last thread sibling in this cpu core going down |
| 1296 | */ |
| 1297 | if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) |
| 1298 | cpu_data(sibling).booted_cores--; |
| 1299 | } |
| 1300 | |
| 1301 | for_each_cpu(sibling, cpu_sibling_mask(cpu)) |
| 1302 | cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling)); |
| 1303 | for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) |
| 1304 | cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling)); |
| 1305 | cpumask_clear(cpu_llc_shared_mask(cpu)); |
| 1306 | cpumask_clear(cpu_sibling_mask(cpu)); |
| 1307 | cpumask_clear(cpu_core_mask(cpu)); |
| 1308 | c->phys_proc_id = 0; |
| 1309 | c->cpu_core_id = 0; |
| 1310 | cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); |
| 1311 | } |
| 1312 | |
| 1313 | static void __ref remove_cpu_from_maps(int cpu) |
| 1314 | { |
| 1315 | set_cpu_online(cpu, false); |
| 1316 | cpumask_clear_cpu(cpu, cpu_callout_mask); |
| 1317 | cpumask_clear_cpu(cpu, cpu_callin_mask); |
| 1318 | /* was set by cpu_init() */ |
| 1319 | cpumask_clear_cpu(cpu, cpu_initialized_mask); |
| 1320 | numa_remove_cpu(cpu); |
| 1321 | } |
| 1322 | |
| 1323 | static DEFINE_PER_CPU(struct completion, die_complete); |
| 1324 | |
| 1325 | void cpu_disable_common(void) |
| 1326 | { |
| 1327 | int cpu = smp_processor_id(); |
| 1328 | |
| 1329 | init_completion(&per_cpu(die_complete, smp_processor_id())); |
| 1330 | |
| 1331 | remove_siblinginfo(cpu); |
| 1332 | |
| 1333 | /* It's now safe to remove this processor from the online map */ |
| 1334 | lock_vector_lock(); |
| 1335 | remove_cpu_from_maps(cpu); |
| 1336 | unlock_vector_lock(); |
| 1337 | fixup_irqs(); |
| 1338 | } |
| 1339 | |
| 1340 | int native_cpu_disable(void) |
| 1341 | { |
| 1342 | int ret; |
| 1343 | |
| 1344 | ret = check_irq_vectors_for_cpu_disable(); |
| 1345 | if (ret) |
| 1346 | return ret; |
| 1347 | |
| 1348 | clear_local_APIC(); |
| 1349 | cpu_disable_common(); |
| 1350 | |
| 1351 | return 0; |
| 1352 | } |
| 1353 | |
| 1354 | void cpu_die_common(unsigned int cpu) |
| 1355 | { |
| 1356 | wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ); |
| 1357 | } |
| 1358 | |
| 1359 | void native_cpu_die(unsigned int cpu) |
| 1360 | { |
| 1361 | /* We don't do anything here: idle task is faking death itself. */ |
| 1362 | |
| 1363 | cpu_die_common(cpu); |
| 1364 | |
| 1365 | /* They ack this in play_dead() by setting CPU_DEAD */ |
| 1366 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { |
| 1367 | if (system_state == SYSTEM_RUNNING) |
| 1368 | pr_info("CPU %u is now offline\n", cpu); |
| 1369 | } else { |
| 1370 | pr_err("CPU %u didn't die...\n", cpu); |
| 1371 | } |
| 1372 | } |
| 1373 | |
| 1374 | void play_dead_common(void) |
| 1375 | { |
| 1376 | idle_task_exit(); |
| 1377 | reset_lazy_tlbstate(); |
| 1378 | amd_e400_remove_cpu(raw_smp_processor_id()); |
| 1379 | |
| 1380 | mb(); |
| 1381 | /* Ack it */ |
| 1382 | __this_cpu_write(cpu_state, CPU_DEAD); |
| 1383 | complete(&per_cpu(die_complete, smp_processor_id())); |
| 1384 | |
| 1385 | /* |
| 1386 | * With physical CPU hotplug, we should halt the cpu |
| 1387 | */ |
| 1388 | local_irq_disable(); |
| 1389 | } |
| 1390 | |
| 1391 | static bool wakeup_cpu0(void) |
| 1392 | { |
| 1393 | if (smp_processor_id() == 0 && enable_start_cpu0) |
| 1394 | return true; |
| 1395 | |
| 1396 | return false; |
| 1397 | } |
| 1398 | |
| 1399 | /* |
| 1400 | * We need to flush the caches before going to sleep, lest we have |
| 1401 | * dirty data in our caches when we come back up. |
| 1402 | */ |
| 1403 | static inline void mwait_play_dead(void) |
| 1404 | { |
| 1405 | unsigned int eax, ebx, ecx, edx; |
| 1406 | unsigned int highest_cstate = 0; |
| 1407 | unsigned int highest_subcstate = 0; |
| 1408 | void *mwait_ptr; |
| 1409 | int i; |
| 1410 | |
| 1411 | if (!this_cpu_has(X86_FEATURE_MWAIT)) |
| 1412 | return; |
| 1413 | if (!this_cpu_has(X86_FEATURE_CLFLUSH)) |
| 1414 | return; |
| 1415 | if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF) |
| 1416 | return; |
| 1417 | |
| 1418 | eax = CPUID_MWAIT_LEAF; |
| 1419 | ecx = 0; |
| 1420 | native_cpuid(&eax, &ebx, &ecx, &edx); |
| 1421 | |
| 1422 | /* |
| 1423 | * eax will be 0 if EDX enumeration is not valid. |
| 1424 | * Initialized below to cstate, sub_cstate value when EDX is valid. |
| 1425 | */ |
| 1426 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { |
| 1427 | eax = 0; |
| 1428 | } else { |
| 1429 | edx >>= MWAIT_SUBSTATE_SIZE; |
| 1430 | for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { |
| 1431 | if (edx & MWAIT_SUBSTATE_MASK) { |
| 1432 | highest_cstate = i; |
| 1433 | highest_subcstate = edx & MWAIT_SUBSTATE_MASK; |
| 1434 | } |
| 1435 | } |
| 1436 | eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | |
| 1437 | (highest_subcstate - 1); |
| 1438 | } |
| 1439 | |
| 1440 | /* |
| 1441 | * This should be a memory location in a cache line which is |
| 1442 | * unlikely to be touched by other processors. The actual |
| 1443 | * content is immaterial as it is not actually modified in any way. |
| 1444 | */ |
| 1445 | mwait_ptr = ¤t_thread_info()->flags; |
| 1446 | |
| 1447 | wbinvd(); |
| 1448 | |
| 1449 | while (1) { |
| 1450 | /* |
| 1451 | * The CLFLUSH is a workaround for erratum AAI65 for |
| 1452 | * the Xeon 7400 series. It's not clear it is actually |
| 1453 | * needed, but it should be harmless in either case. |
| 1454 | * The WBINVD is insufficient due to the spurious-wakeup |
| 1455 | * case where we return around the loop. |
| 1456 | */ |
| 1457 | mb(); |
| 1458 | clflush(mwait_ptr); |
| 1459 | mb(); |
| 1460 | __monitor(mwait_ptr, 0, 0); |
| 1461 | mb(); |
| 1462 | __mwait(eax, 0); |
| 1463 | /* |
| 1464 | * If NMI wants to wake up CPU0, start CPU0. |
| 1465 | */ |
| 1466 | if (wakeup_cpu0()) |
| 1467 | start_cpu0(); |
| 1468 | } |
| 1469 | } |
| 1470 | |
| 1471 | static inline void hlt_play_dead(void) |
| 1472 | { |
| 1473 | if (__this_cpu_read(cpu_info.x86) >= 4) |
| 1474 | wbinvd(); |
| 1475 | |
| 1476 | while (1) { |
| 1477 | native_halt(); |
| 1478 | /* |
| 1479 | * If NMI wants to wake up CPU0, start CPU0. |
| 1480 | */ |
| 1481 | if (wakeup_cpu0()) |
| 1482 | start_cpu0(); |
| 1483 | } |
| 1484 | } |
| 1485 | |
| 1486 | void native_play_dead(void) |
| 1487 | { |
| 1488 | play_dead_common(); |
| 1489 | tboot_shutdown(TB_SHUTDOWN_WFS); |
| 1490 | |
| 1491 | mwait_play_dead(); /* Only returns on failure */ |
| 1492 | if (cpuidle_play_dead()) |
| 1493 | hlt_play_dead(); |
| 1494 | } |
| 1495 | |
| 1496 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
| 1497 | int native_cpu_disable(void) |
| 1498 | { |
| 1499 | return -ENOSYS; |
| 1500 | } |
| 1501 | |
| 1502 | void native_cpu_die(unsigned int cpu) |
| 1503 | { |
| 1504 | /* We said "no" in __cpu_disable */ |
| 1505 | BUG(); |
| 1506 | } |
| 1507 | |
| 1508 | void native_play_dead(void) |
| 1509 | { |
| 1510 | BUG(); |
| 1511 | } |
| 1512 | |
| 1513 | #endif |