3 * Common boot and setup code.
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
15 #include <linux/export.h>
16 #include <linux/string.h>
17 #include <linux/sched.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/initrd.h>
23 #include <linux/seq_file.h>
24 #include <linux/ioport.h>
25 #include <linux/console.h>
26 #include <linux/utsname.h>
27 #include <linux/tty.h>
28 #include <linux/root_dev.h>
29 #include <linux/notifier.h>
30 #include <linux/cpu.h>
31 #include <linux/unistd.h>
32 #include <linux/serial.h>
33 #include <linux/serial_8250.h>
34 #include <linux/bootmem.h>
35 #include <linux/pci.h>
36 #include <linux/lockdep.h>
37 #include <linux/memblock.h>
38 #include <linux/memory.h>
39 #include <linux/nmi.h>
42 #include <asm/kdump.h>
44 #include <asm/processor.h>
45 #include <asm/pgtable.h>
48 #include <asm/machdep.h>
51 #include <asm/cputable.h>
52 #include <asm/sections.h>
53 #include <asm/btext.h>
54 #include <asm/nvram.h>
55 #include <asm/setup.h>
57 #include <asm/iommu.h>
58 #include <asm/serial.h>
59 #include <asm/cache.h>
62 #include <asm/firmware.h>
65 #include <asm/kexec.h>
66 #include <asm/code-patching.h>
67 #include <asm/livepatch.h>
69 #include <asm/cputhreads.h>
72 #define DBG(fmt...) udbg_printf(fmt)
77 int spinning_secondaries
;
80 /* Pick defaults since we might want to patch instructions
81 * before we've read this from the device tree.
83 struct ppc64_caches ppc64_caches
= {
89 EXPORT_SYMBOL_GPL(ppc64_caches
);
92 * These are used in binfmt_elf.c to put aux entries on the stack
93 * for each elf executable being started.
99 #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
100 void __init
setup_tlb_core_data(void)
104 BUILD_BUG_ON(offsetof(struct tlb_core_data
, lock
) != 0);
106 for_each_possible_cpu(cpu
) {
107 int first
= cpu_first_thread_sibling(cpu
);
110 * If we boot via kdump on a non-primary thread,
111 * make sure we point at the thread that actually
114 if (cpu_first_thread_sibling(boot_cpuid
) == first
)
117 paca
[cpu
].tcd_ptr
= &paca
[first
].tcd
;
120 * If we have threads, we need either tlbsrx.
121 * or e6500 tablewalk mode, or else TLB handlers
122 * will be racy and could produce duplicate entries.
124 if (smt_enabled_at_boot
>= 2 &&
125 !mmu_has_feature(MMU_FTR_USE_TLBRSRV
) &&
126 book3e_htw_mode
!= PPC_HTW_E6500
) {
127 /* Should we panic instead? */
128 WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n",
137 static char *smt_enabled_cmdline
;
139 /* Look for ibm,smt-enabled OF option */
140 void __init
check_smt_enabled(void)
142 struct device_node
*dn
;
143 const char *smt_option
;
145 /* Default to enabling all threads */
146 smt_enabled_at_boot
= threads_per_core
;
148 /* Allow the command line to overrule the OF option */
149 if (smt_enabled_cmdline
) {
150 if (!strcmp(smt_enabled_cmdline
, "on"))
151 smt_enabled_at_boot
= threads_per_core
;
152 else if (!strcmp(smt_enabled_cmdline
, "off"))
153 smt_enabled_at_boot
= 0;
158 rc
= kstrtoint(smt_enabled_cmdline
, 10, &smt
);
160 smt_enabled_at_boot
=
161 min(threads_per_core
, smt
);
164 dn
= of_find_node_by_path("/options");
166 smt_option
= of_get_property(dn
, "ibm,smt-enabled",
170 if (!strcmp(smt_option
, "on"))
171 smt_enabled_at_boot
= threads_per_core
;
172 else if (!strcmp(smt_option
, "off"))
173 smt_enabled_at_boot
= 0;
181 /* Look for smt-enabled= cmdline option */
182 static int __init
early_smt_enabled(char *p
)
184 smt_enabled_cmdline
= p
;
187 early_param("smt-enabled", early_smt_enabled
);
189 #endif /* CONFIG_SMP */
191 /** Fix up paca fields required for the boot cpu */
192 static void __init
fixup_boot_paca(void)
194 /* The boot cpu is started */
195 get_paca()->cpu_start
= 1;
196 /* Allow percpu accesses to work until we setup percpu data */
197 get_paca()->data_offset
= 0;
200 static void __init
configure_exceptions(void)
203 * Setup the trampolines from the lowmem exception vectors
204 * to the kdump kernel when not using a relocatable kernel.
206 setup_kdump_trampoline();
208 /* Under a PAPR hypervisor, we need hypercalls */
209 if (firmware_has_feature(FW_FEATURE_SET_MODE
)) {
210 /* Enable AIL if possible */
211 pseries_enable_reloc_on_exc();
214 * Tell the hypervisor that we want our exceptions to
215 * be taken in little endian mode.
217 * We don't call this for big endian as our calling convention
218 * makes us always enter in BE, and the call may fail under
219 * some circumstances with kdump.
221 #ifdef __LITTLE_ENDIAN__
222 pseries_little_endian_exceptions();
225 /* Set endian mode using OPAL */
226 if (firmware_has_feature(FW_FEATURE_OPAL
))
227 opal_configure_cores();
229 /* Enable AIL if supported, and we are in hypervisor mode */
230 if (early_cpu_has_feature(CPU_FTR_HVMODE
) &&
231 early_cpu_has_feature(CPU_FTR_ARCH_207S
)) {
232 unsigned long lpcr
= mfspr(SPRN_LPCR
);
233 mtspr(SPRN_LPCR
, lpcr
| LPCR_AIL_3
);
238 static void cpu_ready_for_interrupts(void)
240 /* Set IR and DR in PACA MSR */
241 get_paca()->kernel_msr
= MSR_KERNEL
;
245 * Early initialization entry point. This is called by head.S
246 * with MMU translation disabled. We rely on the "feature" of
247 * the CPU that ignores the top 2 bits of the address in real
248 * mode so we can access kernel globals normally provided we
249 * only toy with things in the RMO region. From here, we do
250 * some early parsing of the device-tree to setup out MEMBLOCK
251 * data structures, and allocate & initialize the hash table
252 * and segment tables so we can start running with translation
255 * It is this function which will call the probe() callback of
256 * the various platform types and copy the matching one to the
257 * global ppc_md structure. Your platform can eventually do
258 * some very early initializations from the probe() routine, but
259 * this is not recommended, be very careful as, for example, the
260 * device-tree is not accessible via normal means at this point.
263 void __init
early_setup(unsigned long dt_ptr
)
265 static __initdata
struct paca_struct boot_paca
;
267 /* -------- printk is _NOT_ safe to use here ! ------- */
269 /* Identify CPU type */
270 identify_cpu(0, mfspr(SPRN_PVR
));
272 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
273 initialise_paca(&boot_paca
, 0);
274 setup_paca(&boot_paca
);
277 /* -------- printk is now safe to use ------- */
279 /* Enable early debugging if any specified (see udbg.h) */
282 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr
);
285 * Do early initialization using the flattened device
286 * tree, such as retrieving the physical memory map or
287 * calculating/retrieving the hash table size.
289 early_init_devtree(__va(dt_ptr
));
291 /* Now we know the logical id of our boot cpu, setup the paca. */
292 setup_paca(&paca
[boot_cpuid
]);
296 * Configure exception handlers. This include setting up trampolines
297 * if needed, setting exception endian mode, etc...
299 configure_exceptions();
301 /* Apply all the dynamic patching */
302 apply_feature_fixups();
304 /* Initialize the hash table or TLB handling */
308 * At this point, we can let interrupts switch to virtual mode
309 * (the MMU has been setup), so adjust the MSR in the PACA to
310 * have IR and DR set and enable AIL if it exists
312 cpu_ready_for_interrupts();
314 DBG(" <- early_setup()\n");
316 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
318 * This needs to be done *last* (after the above DBG() even)
320 * Right after we return from this function, we turn on the MMU
321 * which means the real-mode access trick that btext does will
322 * no longer work, it needs to switch to using a real MMU
323 * mapping. This call will ensure that it does
326 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
330 void early_setup_secondary(void)
332 /* Mark interrupts disabled in PACA */
333 get_paca()->soft_enabled
= 0;
335 /* Initialize the hash table or TLB handling */
336 early_init_mmu_secondary();
339 * At this point, we can let interrupts switch to virtual mode
340 * (the MMU has been setup), so adjust the MSR in the PACA to
341 * have IR and DR set.
343 cpu_ready_for_interrupts();
346 #endif /* CONFIG_SMP */
348 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
349 static bool use_spinloop(void)
351 if (!IS_ENABLED(CONFIG_PPC_BOOK3E
))
355 * When book3e boots from kexec, the ePAPR spin table does
358 return of_property_read_bool(of_chosen
, "linux,booted-from-kexec");
361 void smp_release_cpus(void)
369 DBG(" -> smp_release_cpus()\n");
371 /* All secondary cpus are spinning on a common spinloop, release them
372 * all now so they can start to spin on their individual paca
373 * spinloops. For non SMP kernels, the secondary cpus never get out
374 * of the common spinloop.
377 ptr
= (unsigned long *)((unsigned long)&__secondary_hold_spinloop
379 *ptr
= ppc_function_entry(generic_secondary_smp_init
);
381 /* And wait a bit for them to catch up */
382 for (i
= 0; i
< 100000; i
++) {
385 if (spinning_secondaries
== 0)
389 DBG("spinning_secondaries = %d\n", spinning_secondaries
);
391 DBG(" <- smp_release_cpus()\n");
393 #endif /* CONFIG_SMP || CONFIG_KEXEC */
396 * Initialize some remaining members of the ppc64_caches and systemcfg
398 * (at least until we get rid of them completely). This is mostly some
399 * cache informations about the CPU that will be used by cache flush
400 * routines and/or provided to userland
402 void __init
initialize_cache_info(void)
404 struct device_node
*np
;
405 unsigned long num_cpus
= 0;
407 DBG(" -> initialize_cache_info()\n");
409 for_each_node_by_type(np
, "cpu") {
413 * We're assuming *all* of the CPUs have the same
414 * d-cache and i-cache sizes... -Peter
417 const __be32
*sizep
, *lsizep
;
421 lsize
= cur_cpu_spec
->dcache_bsize
;
422 sizep
= of_get_property(np
, "d-cache-size", NULL
);
424 size
= be32_to_cpu(*sizep
);
425 lsizep
= of_get_property(np
, "d-cache-block-size",
427 /* fallback if block size missing */
429 lsizep
= of_get_property(np
,
433 lsize
= be32_to_cpu(*lsizep
);
434 if (sizep
== NULL
|| lsizep
== NULL
)
435 DBG("Argh, can't find dcache properties ! "
436 "sizep: %p, lsizep: %p\n", sizep
, lsizep
);
438 ppc64_caches
.dsize
= size
;
439 ppc64_caches
.dline_size
= lsize
;
440 ppc64_caches
.log_dline_size
= __ilog2(lsize
);
441 ppc64_caches
.dlines_per_page
= PAGE_SIZE
/ lsize
;
444 lsize
= cur_cpu_spec
->icache_bsize
;
445 sizep
= of_get_property(np
, "i-cache-size", NULL
);
447 size
= be32_to_cpu(*sizep
);
448 lsizep
= of_get_property(np
, "i-cache-block-size",
451 lsizep
= of_get_property(np
,
455 lsize
= be32_to_cpu(*lsizep
);
456 if (sizep
== NULL
|| lsizep
== NULL
)
457 DBG("Argh, can't find icache properties ! "
458 "sizep: %p, lsizep: %p\n", sizep
, lsizep
);
460 ppc64_caches
.isize
= size
;
461 ppc64_caches
.iline_size
= lsize
;
462 ppc64_caches
.log_iline_size
= __ilog2(lsize
);
463 ppc64_caches
.ilines_per_page
= PAGE_SIZE
/ lsize
;
467 /* For use by binfmt_elf */
468 dcache_bsize
= ppc64_caches
.dline_size
;
469 icache_bsize
= ppc64_caches
.iline_size
;
471 DBG(" <- initialize_cache_info()\n");
474 /* This returns the limit below which memory accesses to the linear
475 * mapping are guarnateed not to cause a TLB or SLB miss. This is
476 * used to allocate interrupt or emergency stacks for which our
477 * exception entry path doesn't deal with being interrupted.
479 static __init u64
safe_stack_limit(void)
481 #ifdef CONFIG_PPC_BOOK3E
482 /* Freescale BookE bolts the entire linear mapping */
483 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E
))
484 return linear_map_top
;
485 /* Other BookE, we assume the first GB is bolted */
488 /* BookS, the first segment is bolted */
489 if (mmu_has_feature(MMU_FTR_1T_SEGMENT
))
490 return 1UL << SID_SHIFT_1T
;
491 return 1UL << SID_SHIFT
;
495 void __init
irqstack_early_init(void)
497 u64 limit
= safe_stack_limit();
501 * Interrupt stacks must be in the first segment since we
502 * cannot afford to take SLB misses on them.
504 for_each_possible_cpu(i
) {
505 softirq_ctx
[i
] = (struct thread_info
*)
506 __va(memblock_alloc_base(THREAD_SIZE
,
507 THREAD_SIZE
, limit
));
508 hardirq_ctx
[i
] = (struct thread_info
*)
509 __va(memblock_alloc_base(THREAD_SIZE
,
510 THREAD_SIZE
, limit
));
514 #ifdef CONFIG_PPC_BOOK3E
515 void __init
exc_lvl_early_init(void)
520 for_each_possible_cpu(i
) {
521 sp
= memblock_alloc(THREAD_SIZE
, THREAD_SIZE
);
522 critirq_ctx
[i
] = (struct thread_info
*)__va(sp
);
523 paca
[i
].crit_kstack
= __va(sp
+ THREAD_SIZE
);
525 sp
= memblock_alloc(THREAD_SIZE
, THREAD_SIZE
);
526 dbgirq_ctx
[i
] = (struct thread_info
*)__va(sp
);
527 paca
[i
].dbg_kstack
= __va(sp
+ THREAD_SIZE
);
529 sp
= memblock_alloc(THREAD_SIZE
, THREAD_SIZE
);
530 mcheckirq_ctx
[i
] = (struct thread_info
*)__va(sp
);
531 paca
[i
].mc_kstack
= __va(sp
+ THREAD_SIZE
);
534 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC
))
535 patch_exception(0x040, exc_debug_debug_book3e
);
540 * Stack space used when we detect a bad kernel stack pointer, and
541 * early in SMP boots before relocation is enabled. Exclusive emergency
542 * stack for machine checks.
544 void __init
emergency_stack_init(void)
550 * Emergency stacks must be under 256MB, we cannot afford to take
551 * SLB misses on them. The ABI also requires them to be 128-byte
554 * Since we use these as temporary stacks during secondary CPU
555 * bringup, we need to get at them in real mode. This means they
556 * must also be within the RMO region.
558 limit
= min(safe_stack_limit(), ppc64_rma_size
);
560 for_each_possible_cpu(i
) {
561 struct thread_info
*ti
;
562 ti
= __va(memblock_alloc_base(THREAD_SIZE
, THREAD_SIZE
, limit
));
563 klp_init_thread_info(ti
);
564 paca
[i
].emergency_sp
= (void *)ti
+ THREAD_SIZE
;
566 #ifdef CONFIG_PPC_BOOK3S_64
567 /* emergency stack for machine check exception handling. */
568 ti
= __va(memblock_alloc_base(THREAD_SIZE
, THREAD_SIZE
, limit
));
569 klp_init_thread_info(ti
);
570 paca
[i
].mc_emergency_sp
= (void *)ti
+ THREAD_SIZE
;
576 #define PCPU_DYN_SIZE ()
578 static void * __init
pcpu_fc_alloc(unsigned int cpu
, size_t size
, size_t align
)
580 return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu
)), size
, align
,
581 __pa(MAX_DMA_ADDRESS
));
584 static void __init
pcpu_fc_free(void *ptr
, size_t size
)
586 free_bootmem(__pa(ptr
), size
);
589 static int pcpu_cpu_distance(unsigned int from
, unsigned int to
)
591 if (cpu_to_node(from
) == cpu_to_node(to
))
592 return LOCAL_DISTANCE
;
594 return REMOTE_DISTANCE
;
597 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
598 EXPORT_SYMBOL(__per_cpu_offset
);
600 void __init
setup_per_cpu_areas(void)
602 const size_t dyn_size
= PERCPU_MODULE_RESERVE
+ PERCPU_DYNAMIC_RESERVE
;
609 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
610 * to group units. For larger mappings, use 1M atom which
611 * should be large enough to contain a number of units.
613 if (mmu_linear_psize
== MMU_PAGE_4K
)
614 atom_size
= PAGE_SIZE
;
618 rc
= pcpu_embed_first_chunk(0, dyn_size
, atom_size
, pcpu_cpu_distance
,
619 pcpu_fc_alloc
, pcpu_fc_free
);
621 panic("cannot initialize percpu area (err=%d)", rc
);
623 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
624 for_each_possible_cpu(cpu
) {
625 __per_cpu_offset
[cpu
] = delta
+ pcpu_unit_offsets
[cpu
];
626 paca
[cpu
].data_offset
= __per_cpu_offset
[cpu
];
631 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
632 unsigned long memory_block_size_bytes(void)
634 if (ppc_md
.memory_block_size
)
635 return ppc_md
.memory_block_size();
637 return MIN_MEMORY_BLOCK_SIZE
;
641 #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
642 struct ppc_pci_io ppc_pci_io
;
643 EXPORT_SYMBOL(ppc_pci_io
);
646 #ifdef CONFIG_HARDLOCKUP_DETECTOR
647 u64
hw_nmi_get_sample_period(int watchdog_thresh
)
649 return ppc_proc_freq
* watchdog_thresh
;
653 * The hardlockup detector breaks PMU event based branches and is likely
654 * to get false positives in KVM guests, so disable it by default.
656 static int __init
disable_hardlockup_detector(void)
658 hardlockup_detector_disable();
662 early_initcall(disable_hardlockup_detector
);