Merge branch 'pm-opp'
[deliverable/linux.git] / arch / arm64 / kernel / setup.c
1 /*
2 * Based on arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/acpi.h>
21 #include <linux/export.h>
22 #include <linux/kernel.h>
23 #include <linux/stddef.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
26 #include <linux/utsname.h>
27 #include <linux/initrd.h>
28 #include <linux/console.h>
29 #include <linux/cache.h>
30 #include <linux/bootmem.h>
31 #include <linux/seq_file.h>
32 #include <linux/screen_info.h>
33 #include <linux/init.h>
34 #include <linux/kexec.h>
35 #include <linux/crash_dump.h>
36 #include <linux/root_dev.h>
37 #include <linux/cpu.h>
38 #include <linux/interrupt.h>
39 #include <linux/smp.h>
40 #include <linux/fs.h>
41 #include <linux/proc_fs.h>
42 #include <linux/memblock.h>
43 #include <linux/of_iommu.h>
44 #include <linux/of_fdt.h>
45 #include <linux/of_platform.h>
46 #include <linux/efi.h>
47 #include <linux/personality.h>
48
49 #include <asm/acpi.h>
50 #include <asm/fixmap.h>
51 #include <asm/cpu.h>
52 #include <asm/cputype.h>
53 #include <asm/elf.h>
54 #include <asm/cpufeature.h>
55 #include <asm/cpu_ops.h>
56 #include <asm/sections.h>
57 #include <asm/setup.h>
58 #include <asm/smp_plat.h>
59 #include <asm/cacheflush.h>
60 #include <asm/tlbflush.h>
61 #include <asm/traps.h>
62 #include <asm/memblock.h>
63 #include <asm/psci.h>
64 #include <asm/efi.h>
65 #include <asm/virt.h>
66 #include <asm/xen/hypervisor.h>
67
68 unsigned long elf_hwcap __read_mostly;
69 EXPORT_SYMBOL_GPL(elf_hwcap);
70
71 #ifdef CONFIG_COMPAT
72 #define COMPAT_ELF_HWCAP_DEFAULT \
73 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
74 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
75 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
76 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
77 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
78 COMPAT_HWCAP_LPAE)
79 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
80 unsigned int compat_elf_hwcap2 __read_mostly;
81 #endif
82
83 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
84
85 phys_addr_t __fdt_pointer __initdata;
86
87 /*
88 * Standard memory resources
89 */
90 static struct resource mem_res[] = {
91 {
92 .name = "Kernel code",
93 .start = 0,
94 .end = 0,
95 .flags = IORESOURCE_MEM
96 },
97 {
98 .name = "Kernel data",
99 .start = 0,
100 .end = 0,
101 .flags = IORESOURCE_MEM
102 }
103 };
104
105 #define kernel_code mem_res[0]
106 #define kernel_data mem_res[1]
107
108 /*
109 * The recorded values of x0 .. x3 upon kernel entry.
110 */
111 u64 __cacheline_aligned boot_args[4];
112
113 void __init smp_setup_processor_id(void)
114 {
115 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
116 cpu_logical_map(0) = mpidr;
117
118 /*
119 * clear __my_cpu_offset on boot CPU to avoid hang caused by
120 * using percpu variable early, for example, lockdep will
121 * access percpu variable inside lock_release
122 */
123 set_my_cpu_offset(0);
124 pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
125 }
126
127 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
128 {
129 return phys_id == cpu_logical_map(cpu);
130 }
131
132 struct mpidr_hash mpidr_hash;
133 #ifdef CONFIG_SMP
134 /**
135 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
136 * level in order to build a linear index from an
137 * MPIDR value. Resulting algorithm is a collision
138 * free hash carried out through shifting and ORing
139 */
140 static void __init smp_build_mpidr_hash(void)
141 {
142 u32 i, affinity, fs[4], bits[4], ls;
143 u64 mask = 0;
144 /*
145 * Pre-scan the list of MPIDRS and filter out bits that do
146 * not contribute to affinity levels, ie they never toggle.
147 */
148 for_each_possible_cpu(i)
149 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
150 pr_debug("mask of set bits %#llx\n", mask);
151 /*
152 * Find and stash the last and first bit set at all affinity levels to
153 * check how many bits are required to represent them.
154 */
155 for (i = 0; i < 4; i++) {
156 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
157 /*
158 * Find the MSB bit and LSB bits position
159 * to determine how many bits are required
160 * to express the affinity level.
161 */
162 ls = fls(affinity);
163 fs[i] = affinity ? ffs(affinity) - 1 : 0;
164 bits[i] = ls - fs[i];
165 }
166 /*
167 * An index can be created from the MPIDR_EL1 by isolating the
168 * significant bits at each affinity level and by shifting
169 * them in order to compress the 32 bits values space to a
170 * compressed set of values. This is equivalent to hashing
171 * the MPIDR_EL1 through shifting and ORing. It is a collision free
172 * hash though not minimal since some levels might contain a number
173 * of CPUs that is not an exact power of 2 and their bit
174 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
175 */
176 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
177 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
178 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
179 (bits[1] + bits[0]);
180 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
181 fs[3] - (bits[2] + bits[1] + bits[0]);
182 mpidr_hash.mask = mask;
183 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
184 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
185 mpidr_hash.shift_aff[0],
186 mpidr_hash.shift_aff[1],
187 mpidr_hash.shift_aff[2],
188 mpidr_hash.shift_aff[3],
189 mpidr_hash.mask,
190 mpidr_hash.bits);
191 /*
192 * 4x is an arbitrary value used to warn on a hash table much bigger
193 * than expected on most systems.
194 */
195 if (mpidr_hash_size() > 4 * num_possible_cpus())
196 pr_warn("Large number of MPIDR hash buckets detected\n");
197 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
198 }
199 #endif
200
201 static void __init hyp_mode_check(void)
202 {
203 if (is_hyp_mode_available())
204 pr_info("CPU: All CPU(s) started at EL2\n");
205 else if (is_hyp_mode_mismatched())
206 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
207 "CPU: CPUs started in inconsistent modes");
208 else
209 pr_info("CPU: All CPU(s) started at EL1\n");
210 }
211
212 void __init do_post_cpus_up_work(void)
213 {
214 hyp_mode_check();
215 apply_alternatives_all();
216 }
217
218 #ifdef CONFIG_UP_LATE_INIT
219 void __init up_late_init(void)
220 {
221 do_post_cpus_up_work();
222 }
223 #endif /* CONFIG_UP_LATE_INIT */
224
225 static void __init setup_processor(void)
226 {
227 u64 features, block;
228 u32 cwg;
229 int cls;
230
231 printk("CPU: AArch64 Processor [%08x] revision %d\n",
232 read_cpuid_id(), read_cpuid_id() & 15);
233
234 sprintf(init_utsname()->machine, ELF_PLATFORM);
235 elf_hwcap = 0;
236
237 cpuinfo_store_boot_cpu();
238
239 /*
240 * Check for sane CTR_EL0.CWG value.
241 */
242 cwg = cache_type_cwg();
243 cls = cache_line_size();
244 if (!cwg)
245 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
246 cls);
247 if (L1_CACHE_BYTES < cls)
248 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
249 L1_CACHE_BYTES, cls);
250
251 /*
252 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
253 * The blocks we test below represent incremental functionality
254 * for non-negative values. Negative values are reserved.
255 */
256 features = read_cpuid(ID_AA64ISAR0_EL1);
257 block = (features >> 4) & 0xf;
258 if (!(block & 0x8)) {
259 switch (block) {
260 default:
261 case 2:
262 elf_hwcap |= HWCAP_PMULL;
263 case 1:
264 elf_hwcap |= HWCAP_AES;
265 case 0:
266 break;
267 }
268 }
269
270 block = (features >> 8) & 0xf;
271 if (block && !(block & 0x8))
272 elf_hwcap |= HWCAP_SHA1;
273
274 block = (features >> 12) & 0xf;
275 if (block && !(block & 0x8))
276 elf_hwcap |= HWCAP_SHA2;
277
278 block = (features >> 16) & 0xf;
279 if (block && !(block & 0x8))
280 elf_hwcap |= HWCAP_CRC32;
281
282 #ifdef CONFIG_COMPAT
283 /*
284 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
285 * the Aarch32 32-bit execution state.
286 */
287 features = read_cpuid(ID_ISAR5_EL1);
288 block = (features >> 4) & 0xf;
289 if (!(block & 0x8)) {
290 switch (block) {
291 default:
292 case 2:
293 compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
294 case 1:
295 compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
296 case 0:
297 break;
298 }
299 }
300
301 block = (features >> 8) & 0xf;
302 if (block && !(block & 0x8))
303 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
304
305 block = (features >> 12) & 0xf;
306 if (block && !(block & 0x8))
307 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
308
309 block = (features >> 16) & 0xf;
310 if (block && !(block & 0x8))
311 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
312 #endif
313 }
314
315 static void __init setup_machine_fdt(phys_addr_t dt_phys)
316 {
317 void *dt_virt = fixmap_remap_fdt(dt_phys);
318
319 if (!dt_virt || !early_init_dt_scan(dt_virt)) {
320 pr_crit("\n"
321 "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
322 "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
323 "\nPlease check your bootloader.",
324 &dt_phys, dt_virt);
325
326 while (true)
327 cpu_relax();
328 }
329
330 dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
331 }
332
333 static void __init request_standard_resources(void)
334 {
335 struct memblock_region *region;
336 struct resource *res;
337
338 kernel_code.start = virt_to_phys(_text);
339 kernel_code.end = virt_to_phys(_etext - 1);
340 kernel_data.start = virt_to_phys(_sdata);
341 kernel_data.end = virt_to_phys(_end - 1);
342
343 for_each_memblock(memory, region) {
344 res = alloc_bootmem_low(sizeof(*res));
345 res->name = "System RAM";
346 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
347 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
348 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
349
350 request_resource(&iomem_resource, res);
351
352 if (kernel_code.start >= res->start &&
353 kernel_code.end <= res->end)
354 request_resource(res, &kernel_code);
355 if (kernel_data.start >= res->start &&
356 kernel_data.end <= res->end)
357 request_resource(res, &kernel_data);
358 }
359 }
360
361 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
362
363 void __init setup_arch(char **cmdline_p)
364 {
365 setup_processor();
366
367 init_mm.start_code = (unsigned long) _text;
368 init_mm.end_code = (unsigned long) _etext;
369 init_mm.end_data = (unsigned long) _edata;
370 init_mm.brk = (unsigned long) _end;
371
372 *cmdline_p = boot_command_line;
373
374 early_fixmap_init();
375 early_ioremap_init();
376
377 setup_machine_fdt(__fdt_pointer);
378
379 parse_early_param();
380
381 /*
382 * Unmask asynchronous aborts after bringing up possible earlycon.
383 * (Report possible System Errors once we can report this occurred)
384 */
385 local_async_enable();
386
387 efi_init();
388 arm64_memblock_init();
389
390 /* Parse the ACPI tables for possible boot-time configuration */
391 acpi_boot_table_init();
392
393 paging_init();
394 request_standard_resources();
395
396 early_ioremap_reset();
397
398 if (acpi_disabled) {
399 unflatten_device_tree();
400 psci_dt_init();
401 } else {
402 psci_acpi_init();
403 }
404 xen_early_init();
405
406 cpu_read_bootcpu_ops();
407 #ifdef CONFIG_SMP
408 smp_init_cpus();
409 smp_build_mpidr_hash();
410 #endif
411
412 #ifdef CONFIG_VT
413 #if defined(CONFIG_VGA_CONSOLE)
414 conswitchp = &vga_con;
415 #elif defined(CONFIG_DUMMY_CONSOLE)
416 conswitchp = &dummy_con;
417 #endif
418 #endif
419 if (boot_args[1] || boot_args[2] || boot_args[3]) {
420 pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
421 "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
422 "This indicates a broken bootloader or old kernel\n",
423 boot_args[1], boot_args[2], boot_args[3]);
424 }
425 }
426
427 static int __init arm64_device_init(void)
428 {
429 of_iommu_init();
430 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
431 return 0;
432 }
433 arch_initcall_sync(arm64_device_init);
434
435 static int __init topology_init(void)
436 {
437 int i;
438
439 for_each_possible_cpu(i) {
440 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
441 cpu->hotpluggable = 1;
442 register_cpu(cpu, i);
443 }
444
445 return 0;
446 }
447 subsys_initcall(topology_init);
448
449 static const char *hwcap_str[] = {
450 "fp",
451 "asimd",
452 "evtstrm",
453 "aes",
454 "pmull",
455 "sha1",
456 "sha2",
457 "crc32",
458 NULL
459 };
460
461 #ifdef CONFIG_COMPAT
462 static const char *compat_hwcap_str[] = {
463 "swp",
464 "half",
465 "thumb",
466 "26bit",
467 "fastmult",
468 "fpa",
469 "vfp",
470 "edsp",
471 "java",
472 "iwmmxt",
473 "crunch",
474 "thumbee",
475 "neon",
476 "vfpv3",
477 "vfpv3d16",
478 "tls",
479 "vfpv4",
480 "idiva",
481 "idivt",
482 "vfpd32",
483 "lpae",
484 "evtstrm"
485 };
486
487 static const char *compat_hwcap2_str[] = {
488 "aes",
489 "pmull",
490 "sha1",
491 "sha2",
492 "crc32",
493 NULL
494 };
495 #endif /* CONFIG_COMPAT */
496
497 static int c_show(struct seq_file *m, void *v)
498 {
499 int i, j;
500
501 for_each_online_cpu(i) {
502 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
503 u32 midr = cpuinfo->reg_midr;
504
505 /*
506 * glibc reads /proc/cpuinfo to determine the number of
507 * online processors, looking for lines beginning with
508 * "processor". Give glibc what it expects.
509 */
510 #ifdef CONFIG_SMP
511 seq_printf(m, "processor\t: %d\n", i);
512 #endif
513
514 /*
515 * Dump out the common processor features in a single line.
516 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
517 * rather than attempting to parse this, but there's a body of
518 * software which does already (at least for 32-bit).
519 */
520 seq_puts(m, "Features\t:");
521 if (personality(current->personality) == PER_LINUX32) {
522 #ifdef CONFIG_COMPAT
523 for (j = 0; compat_hwcap_str[j]; j++)
524 if (compat_elf_hwcap & (1 << j))
525 seq_printf(m, " %s", compat_hwcap_str[j]);
526
527 for (j = 0; compat_hwcap2_str[j]; j++)
528 if (compat_elf_hwcap2 & (1 << j))
529 seq_printf(m, " %s", compat_hwcap2_str[j]);
530 #endif /* CONFIG_COMPAT */
531 } else {
532 for (j = 0; hwcap_str[j]; j++)
533 if (elf_hwcap & (1 << j))
534 seq_printf(m, " %s", hwcap_str[j]);
535 }
536 seq_puts(m, "\n");
537
538 seq_printf(m, "CPU implementer\t: 0x%02x\n",
539 MIDR_IMPLEMENTOR(midr));
540 seq_printf(m, "CPU architecture: 8\n");
541 seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
542 seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
543 seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
544 }
545
546 return 0;
547 }
548
549 static void *c_start(struct seq_file *m, loff_t *pos)
550 {
551 return *pos < 1 ? (void *)1 : NULL;
552 }
553
554 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
555 {
556 ++*pos;
557 return NULL;
558 }
559
560 static void c_stop(struct seq_file *m, void *v)
561 {
562 }
563
564 const struct seq_operations cpuinfo_op = {
565 .start = c_start,
566 .next = c_next,
567 .stop = c_stop,
568 .show = c_show
569 };
This page took 0.051045 seconds and 6 git commands to generate.