Merge tag 'imx-clk-fixes-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/shawng...
[deliverable/linux.git] / arch / arm / kernel / setup.c
1 /*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/efi.h>
11 #include <linux/export.h>
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/ioport.h>
15 #include <linux/delay.h>
16 #include <linux/utsname.h>
17 #include <linux/initrd.h>
18 #include <linux/console.h>
19 #include <linux/bootmem.h>
20 #include <linux/seq_file.h>
21 #include <linux/screen_info.h>
22 #include <linux/of_iommu.h>
23 #include <linux/of_platform.h>
24 #include <linux/init.h>
25 #include <linux/kexec.h>
26 #include <linux/of_fdt.h>
27 #include <linux/cpu.h>
28 #include <linux/interrupt.h>
29 #include <linux/smp.h>
30 #include <linux/proc_fs.h>
31 #include <linux/memblock.h>
32 #include <linux/bug.h>
33 #include <linux/compiler.h>
34 #include <linux/sort.h>
35 #include <linux/psci.h>
36
37 #include <asm/unified.h>
38 #include <asm/cp15.h>
39 #include <asm/cpu.h>
40 #include <asm/cputype.h>
41 #include <asm/efi.h>
42 #include <asm/elf.h>
43 #include <asm/early_ioremap.h>
44 #include <asm/fixmap.h>
45 #include <asm/procinfo.h>
46 #include <asm/psci.h>
47 #include <asm/sections.h>
48 #include <asm/setup.h>
49 #include <asm/smp_plat.h>
50 #include <asm/mach-types.h>
51 #include <asm/cacheflush.h>
52 #include <asm/cachetype.h>
53 #include <asm/tlbflush.h>
54 #include <asm/xen/hypervisor.h>
55
56 #include <asm/prom.h>
57 #include <asm/mach/arch.h>
58 #include <asm/mach/irq.h>
59 #include <asm/mach/time.h>
60 #include <asm/system_info.h>
61 #include <asm/system_misc.h>
62 #include <asm/traps.h>
63 #include <asm/unwind.h>
64 #include <asm/memblock.h>
65 #include <asm/virt.h>
66
67 #include "atags.h"
68
69
70 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
71 char fpe_type[8];
72
73 static int __init fpe_setup(char *line)
74 {
75 memcpy(fpe_type, line, 8);
76 return 1;
77 }
78
79 __setup("fpe=", fpe_setup);
80 #endif
81
82 extern void init_default_cache_policy(unsigned long);
83 extern void paging_init(const struct machine_desc *desc);
84 extern void early_paging_init(const struct machine_desc *);
85 extern void sanity_check_meminfo(void);
86 extern enum reboot_mode reboot_mode;
87 extern void setup_dma_zone(const struct machine_desc *desc);
88
89 unsigned int processor_id;
90 EXPORT_SYMBOL(processor_id);
91 unsigned int __machine_arch_type __read_mostly;
92 EXPORT_SYMBOL(__machine_arch_type);
93 unsigned int cacheid __read_mostly;
94 EXPORT_SYMBOL(cacheid);
95
96 unsigned int __atags_pointer __initdata;
97
98 unsigned int system_rev;
99 EXPORT_SYMBOL(system_rev);
100
101 const char *system_serial;
102 EXPORT_SYMBOL(system_serial);
103
104 unsigned int system_serial_low;
105 EXPORT_SYMBOL(system_serial_low);
106
107 unsigned int system_serial_high;
108 EXPORT_SYMBOL(system_serial_high);
109
110 unsigned int elf_hwcap __read_mostly;
111 EXPORT_SYMBOL(elf_hwcap);
112
113 unsigned int elf_hwcap2 __read_mostly;
114 EXPORT_SYMBOL(elf_hwcap2);
115
116
117 #ifdef MULTI_CPU
118 struct processor processor __read_mostly;
119 #endif
120 #ifdef MULTI_TLB
121 struct cpu_tlb_fns cpu_tlb __read_mostly;
122 #endif
123 #ifdef MULTI_USER
124 struct cpu_user_fns cpu_user __read_mostly;
125 #endif
126 #ifdef MULTI_CACHE
127 struct cpu_cache_fns cpu_cache __read_mostly;
128 #endif
129 #ifdef CONFIG_OUTER_CACHE
130 struct outer_cache_fns outer_cache __read_mostly;
131 EXPORT_SYMBOL(outer_cache);
132 #endif
133
134 /*
135 * Cached cpu_architecture() result for use by assembler code.
136 * C code should use the cpu_architecture() function instead of accessing this
137 * variable directly.
138 */
139 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
140
141 struct stack {
142 u32 irq[3];
143 u32 abt[3];
144 u32 und[3];
145 u32 fiq[3];
146 } ____cacheline_aligned;
147
148 #ifndef CONFIG_CPU_V7M
149 static struct stack stacks[NR_CPUS];
150 #endif
151
152 char elf_platform[ELF_PLATFORM_SIZE];
153 EXPORT_SYMBOL(elf_platform);
154
155 static const char *cpu_name;
156 static const char *machine_name;
157 static char __initdata cmd_line[COMMAND_LINE_SIZE];
158 const struct machine_desc *machine_desc __initdata;
159
160 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
161 #define ENDIANNESS ((char)endian_test.l)
162
163 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
164
165 /*
166 * Standard memory resources
167 */
168 static struct resource mem_res[] = {
169 {
170 .name = "Video RAM",
171 .start = 0,
172 .end = 0,
173 .flags = IORESOURCE_MEM
174 },
175 {
176 .name = "Kernel code",
177 .start = 0,
178 .end = 0,
179 .flags = IORESOURCE_SYSTEM_RAM
180 },
181 {
182 .name = "Kernel data",
183 .start = 0,
184 .end = 0,
185 .flags = IORESOURCE_SYSTEM_RAM
186 }
187 };
188
189 #define video_ram mem_res[0]
190 #define kernel_code mem_res[1]
191 #define kernel_data mem_res[2]
192
193 static struct resource io_res[] = {
194 {
195 .name = "reserved",
196 .start = 0x3bc,
197 .end = 0x3be,
198 .flags = IORESOURCE_IO | IORESOURCE_BUSY
199 },
200 {
201 .name = "reserved",
202 .start = 0x378,
203 .end = 0x37f,
204 .flags = IORESOURCE_IO | IORESOURCE_BUSY
205 },
206 {
207 .name = "reserved",
208 .start = 0x278,
209 .end = 0x27f,
210 .flags = IORESOURCE_IO | IORESOURCE_BUSY
211 }
212 };
213
214 #define lp0 io_res[0]
215 #define lp1 io_res[1]
216 #define lp2 io_res[2]
217
218 static const char *proc_arch[] = {
219 "undefined/unknown",
220 "3",
221 "4",
222 "4T",
223 "5",
224 "5T",
225 "5TE",
226 "5TEJ",
227 "6TEJ",
228 "7",
229 "7M",
230 "?(12)",
231 "?(13)",
232 "?(14)",
233 "?(15)",
234 "?(16)",
235 "?(17)",
236 };
237
238 #ifdef CONFIG_CPU_V7M
239 static int __get_cpu_architecture(void)
240 {
241 return CPU_ARCH_ARMv7M;
242 }
243 #else
244 static int __get_cpu_architecture(void)
245 {
246 int cpu_arch;
247
248 if ((read_cpuid_id() & 0x0008f000) == 0) {
249 cpu_arch = CPU_ARCH_UNKNOWN;
250 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
251 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
252 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
253 cpu_arch = (read_cpuid_id() >> 16) & 7;
254 if (cpu_arch)
255 cpu_arch += CPU_ARCH_ARMv3;
256 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
257 /* Revised CPUID format. Read the Memory Model Feature
258 * Register 0 and check for VMSAv7 or PMSAv7 */
259 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
260 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
261 (mmfr0 & 0x000000f0) >= 0x00000030)
262 cpu_arch = CPU_ARCH_ARMv7;
263 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
264 (mmfr0 & 0x000000f0) == 0x00000020)
265 cpu_arch = CPU_ARCH_ARMv6;
266 else
267 cpu_arch = CPU_ARCH_UNKNOWN;
268 } else
269 cpu_arch = CPU_ARCH_UNKNOWN;
270
271 return cpu_arch;
272 }
273 #endif
274
275 int __pure cpu_architecture(void)
276 {
277 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
278
279 return __cpu_architecture;
280 }
281
282 static int cpu_has_aliasing_icache(unsigned int arch)
283 {
284 int aliasing_icache;
285 unsigned int id_reg, num_sets, line_size;
286
287 /* PIPT caches never alias. */
288 if (icache_is_pipt())
289 return 0;
290
291 /* arch specifies the register format */
292 switch (arch) {
293 case CPU_ARCH_ARMv7:
294 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
295 : /* No output operands */
296 : "r" (1));
297 isb();
298 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
299 : "=r" (id_reg));
300 line_size = 4 << ((id_reg & 0x7) + 2);
301 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
302 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
303 break;
304 case CPU_ARCH_ARMv6:
305 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
306 break;
307 default:
308 /* I-cache aliases will be handled by D-cache aliasing code */
309 aliasing_icache = 0;
310 }
311
312 return aliasing_icache;
313 }
314
315 static void __init cacheid_init(void)
316 {
317 unsigned int arch = cpu_architecture();
318
319 if (arch == CPU_ARCH_ARMv7M) {
320 cacheid = 0;
321 } else if (arch >= CPU_ARCH_ARMv6) {
322 unsigned int cachetype = read_cpuid_cachetype();
323 if ((cachetype & (7 << 29)) == 4 << 29) {
324 /* ARMv7 register format */
325 arch = CPU_ARCH_ARMv7;
326 cacheid = CACHEID_VIPT_NONALIASING;
327 switch (cachetype & (3 << 14)) {
328 case (1 << 14):
329 cacheid |= CACHEID_ASID_TAGGED;
330 break;
331 case (3 << 14):
332 cacheid |= CACHEID_PIPT;
333 break;
334 }
335 } else {
336 arch = CPU_ARCH_ARMv6;
337 if (cachetype & (1 << 23))
338 cacheid = CACHEID_VIPT_ALIASING;
339 else
340 cacheid = CACHEID_VIPT_NONALIASING;
341 }
342 if (cpu_has_aliasing_icache(arch))
343 cacheid |= CACHEID_VIPT_I_ALIASING;
344 } else {
345 cacheid = CACHEID_VIVT;
346 }
347
348 pr_info("CPU: %s data cache, %s instruction cache\n",
349 cache_is_vivt() ? "VIVT" :
350 cache_is_vipt_aliasing() ? "VIPT aliasing" :
351 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
352 cache_is_vivt() ? "VIVT" :
353 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
354 icache_is_vipt_aliasing() ? "VIPT aliasing" :
355 icache_is_pipt() ? "PIPT" :
356 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
357 }
358
359 /*
360 * These functions re-use the assembly code in head.S, which
361 * already provide the required functionality.
362 */
363 extern struct proc_info_list *lookup_processor_type(unsigned int);
364
365 void __init early_print(const char *str, ...)
366 {
367 extern void printascii(const char *);
368 char buf[256];
369 va_list ap;
370
371 va_start(ap, str);
372 vsnprintf(buf, sizeof(buf), str, ap);
373 va_end(ap);
374
375 #ifdef CONFIG_DEBUG_LL
376 printascii(buf);
377 #endif
378 printk("%s", buf);
379 }
380
381 #ifdef CONFIG_ARM_PATCH_IDIV
382
383 static inline u32 __attribute_const__ sdiv_instruction(void)
384 {
385 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
386 /* "sdiv r0, r0, r1" */
387 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
388 return __opcode_to_mem_thumb32(insn);
389 }
390
391 /* "sdiv r0, r0, r1" */
392 return __opcode_to_mem_arm(0xe710f110);
393 }
394
395 static inline u32 __attribute_const__ udiv_instruction(void)
396 {
397 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
398 /* "udiv r0, r0, r1" */
399 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
400 return __opcode_to_mem_thumb32(insn);
401 }
402
403 /* "udiv r0, r0, r1" */
404 return __opcode_to_mem_arm(0xe730f110);
405 }
406
407 static inline u32 __attribute_const__ bx_lr_instruction(void)
408 {
409 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
410 /* "bx lr; nop" */
411 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
412 return __opcode_to_mem_thumb32(insn);
413 }
414
415 /* "bx lr" */
416 return __opcode_to_mem_arm(0xe12fff1e);
417 }
418
419 static void __init patch_aeabi_idiv(void)
420 {
421 extern void __aeabi_uidiv(void);
422 extern void __aeabi_idiv(void);
423 uintptr_t fn_addr;
424 unsigned int mask;
425
426 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
427 if (!(elf_hwcap & mask))
428 return;
429
430 pr_info("CPU: div instructions available: patching division code\n");
431
432 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
433 ((u32 *)fn_addr)[0] = udiv_instruction();
434 ((u32 *)fn_addr)[1] = bx_lr_instruction();
435 flush_icache_range(fn_addr, fn_addr + 8);
436
437 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
438 ((u32 *)fn_addr)[0] = sdiv_instruction();
439 ((u32 *)fn_addr)[1] = bx_lr_instruction();
440 flush_icache_range(fn_addr, fn_addr + 8);
441 }
442
443 #else
444 static inline void patch_aeabi_idiv(void) { }
445 #endif
446
447 static void __init cpuid_init_hwcaps(void)
448 {
449 int block;
450 u32 isar5;
451
452 if (cpu_architecture() < CPU_ARCH_ARMv7)
453 return;
454
455 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
456 if (block >= 2)
457 elf_hwcap |= HWCAP_IDIVA;
458 if (block >= 1)
459 elf_hwcap |= HWCAP_IDIVT;
460
461 /* LPAE implies atomic ldrd/strd instructions */
462 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
463 if (block >= 5)
464 elf_hwcap |= HWCAP_LPAE;
465
466 /* check for supported v8 Crypto instructions */
467 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
468
469 block = cpuid_feature_extract_field(isar5, 4);
470 if (block >= 2)
471 elf_hwcap2 |= HWCAP2_PMULL;
472 if (block >= 1)
473 elf_hwcap2 |= HWCAP2_AES;
474
475 block = cpuid_feature_extract_field(isar5, 8);
476 if (block >= 1)
477 elf_hwcap2 |= HWCAP2_SHA1;
478
479 block = cpuid_feature_extract_field(isar5, 12);
480 if (block >= 1)
481 elf_hwcap2 |= HWCAP2_SHA2;
482
483 block = cpuid_feature_extract_field(isar5, 16);
484 if (block >= 1)
485 elf_hwcap2 |= HWCAP2_CRC32;
486 }
487
488 static void __init elf_hwcap_fixup(void)
489 {
490 unsigned id = read_cpuid_id();
491
492 /*
493 * HWCAP_TLS is available only on 1136 r1p0 and later,
494 * see also kuser_get_tls_init.
495 */
496 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
497 ((id >> 20) & 3) == 0) {
498 elf_hwcap &= ~HWCAP_TLS;
499 return;
500 }
501
502 /* Verify if CPUID scheme is implemented */
503 if ((id & 0x000f0000) != 0x000f0000)
504 return;
505
506 /*
507 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
508 * avoid advertising SWP; it may not be atomic with
509 * multiprocessing cores.
510 */
511 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
512 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
513 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
514 elf_hwcap &= ~HWCAP_SWP;
515 }
516
517 /*
518 * cpu_init - initialise one CPU.
519 *
520 * cpu_init sets up the per-CPU stacks.
521 */
522 void notrace cpu_init(void)
523 {
524 #ifndef CONFIG_CPU_V7M
525 unsigned int cpu = smp_processor_id();
526 struct stack *stk = &stacks[cpu];
527
528 if (cpu >= NR_CPUS) {
529 pr_crit("CPU%u: bad primary CPU number\n", cpu);
530 BUG();
531 }
532
533 /*
534 * This only works on resume and secondary cores. For booting on the
535 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
536 */
537 set_my_cpu_offset(per_cpu_offset(cpu));
538
539 cpu_proc_init();
540
541 /*
542 * Define the placement constraint for the inline asm directive below.
543 * In Thumb-2, msr with an immediate value is not allowed.
544 */
545 #ifdef CONFIG_THUMB2_KERNEL
546 #define PLC "r"
547 #else
548 #define PLC "I"
549 #endif
550
551 /*
552 * setup stacks for re-entrant exception handlers
553 */
554 __asm__ (
555 "msr cpsr_c, %1\n\t"
556 "add r14, %0, %2\n\t"
557 "mov sp, r14\n\t"
558 "msr cpsr_c, %3\n\t"
559 "add r14, %0, %4\n\t"
560 "mov sp, r14\n\t"
561 "msr cpsr_c, %5\n\t"
562 "add r14, %0, %6\n\t"
563 "mov sp, r14\n\t"
564 "msr cpsr_c, %7\n\t"
565 "add r14, %0, %8\n\t"
566 "mov sp, r14\n\t"
567 "msr cpsr_c, %9"
568 :
569 : "r" (stk),
570 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
571 "I" (offsetof(struct stack, irq[0])),
572 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
573 "I" (offsetof(struct stack, abt[0])),
574 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
575 "I" (offsetof(struct stack, und[0])),
576 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
577 "I" (offsetof(struct stack, fiq[0])),
578 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
579 : "r14");
580 #endif
581 }
582
583 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
584
585 void __init smp_setup_processor_id(void)
586 {
587 int i;
588 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
589 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
590
591 cpu_logical_map(0) = cpu;
592 for (i = 1; i < nr_cpu_ids; ++i)
593 cpu_logical_map(i) = i == cpu ? 0 : i;
594
595 /*
596 * clear __my_cpu_offset on boot CPU to avoid hang caused by
597 * using percpu variable early, for example, lockdep will
598 * access percpu variable inside lock_release
599 */
600 set_my_cpu_offset(0);
601
602 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
603 }
604
605 struct mpidr_hash mpidr_hash;
606 #ifdef CONFIG_SMP
607 /**
608 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
609 * level in order to build a linear index from an
610 * MPIDR value. Resulting algorithm is a collision
611 * free hash carried out through shifting and ORing
612 */
613 static void __init smp_build_mpidr_hash(void)
614 {
615 u32 i, affinity;
616 u32 fs[3], bits[3], ls, mask = 0;
617 /*
618 * Pre-scan the list of MPIDRS and filter out bits that do
619 * not contribute to affinity levels, ie they never toggle.
620 */
621 for_each_possible_cpu(i)
622 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
623 pr_debug("mask of set bits 0x%x\n", mask);
624 /*
625 * Find and stash the last and first bit set at all affinity levels to
626 * check how many bits are required to represent them.
627 */
628 for (i = 0; i < 3; i++) {
629 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
630 /*
631 * Find the MSB bit and LSB bits position
632 * to determine how many bits are required
633 * to express the affinity level.
634 */
635 ls = fls(affinity);
636 fs[i] = affinity ? ffs(affinity) - 1 : 0;
637 bits[i] = ls - fs[i];
638 }
639 /*
640 * An index can be created from the MPIDR by isolating the
641 * significant bits at each affinity level and by shifting
642 * them in order to compress the 24 bits values space to a
643 * compressed set of values. This is equivalent to hashing
644 * the MPIDR through shifting and ORing. It is a collision free
645 * hash though not minimal since some levels might contain a number
646 * of CPUs that is not an exact power of 2 and their bit
647 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
648 */
649 mpidr_hash.shift_aff[0] = fs[0];
650 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
651 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
652 (bits[1] + bits[0]);
653 mpidr_hash.mask = mask;
654 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
655 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
656 mpidr_hash.shift_aff[0],
657 mpidr_hash.shift_aff[1],
658 mpidr_hash.shift_aff[2],
659 mpidr_hash.mask,
660 mpidr_hash.bits);
661 /*
662 * 4x is an arbitrary value used to warn on a hash table much bigger
663 * than expected on most systems.
664 */
665 if (mpidr_hash_size() > 4 * num_possible_cpus())
666 pr_warn("Large number of MPIDR hash buckets detected\n");
667 sync_cache_w(&mpidr_hash);
668 }
669 #endif
670
671 static void __init setup_processor(void)
672 {
673 struct proc_info_list *list;
674
675 /*
676 * locate processor in the list of supported processor
677 * types. The linker builds this table for us from the
678 * entries in arch/arm/mm/proc-*.S
679 */
680 list = lookup_processor_type(read_cpuid_id());
681 if (!list) {
682 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
683 read_cpuid_id());
684 while (1);
685 }
686
687 cpu_name = list->cpu_name;
688 __cpu_architecture = __get_cpu_architecture();
689
690 #ifdef MULTI_CPU
691 processor = *list->proc;
692 #endif
693 #ifdef MULTI_TLB
694 cpu_tlb = *list->tlb;
695 #endif
696 #ifdef MULTI_USER
697 cpu_user = *list->user;
698 #endif
699 #ifdef MULTI_CACHE
700 cpu_cache = *list->cache;
701 #endif
702
703 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
704 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
705 proc_arch[cpu_architecture()], get_cr());
706
707 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
708 list->arch_name, ENDIANNESS);
709 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
710 list->elf_name, ENDIANNESS);
711 elf_hwcap = list->elf_hwcap;
712
713 cpuid_init_hwcaps();
714 patch_aeabi_idiv();
715
716 #ifndef CONFIG_ARM_THUMB
717 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
718 #endif
719 #ifdef CONFIG_MMU
720 init_default_cache_policy(list->__cpu_mm_mmu_flags);
721 #endif
722 erratum_a15_798181_init();
723
724 elf_hwcap_fixup();
725
726 cacheid_init();
727 cpu_init();
728 }
729
730 void __init dump_machine_table(void)
731 {
732 const struct machine_desc *p;
733
734 early_print("Available machine support:\n\nID (hex)\tNAME\n");
735 for_each_machine_desc(p)
736 early_print("%08x\t%s\n", p->nr, p->name);
737
738 early_print("\nPlease check your kernel config and/or bootloader.\n");
739
740 while (true)
741 /* can't use cpu_relax() here as it may require MMU setup */;
742 }
743
744 int __init arm_add_memory(u64 start, u64 size)
745 {
746 u64 aligned_start;
747
748 /*
749 * Ensure that start/size are aligned to a page boundary.
750 * Size is rounded down, start is rounded up.
751 */
752 aligned_start = PAGE_ALIGN(start);
753 if (aligned_start > start + size)
754 size = 0;
755 else
756 size -= aligned_start - start;
757
758 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
759 if (aligned_start > ULONG_MAX) {
760 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
761 (long long)start);
762 return -EINVAL;
763 }
764
765 if (aligned_start + size > ULONG_MAX) {
766 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
767 (long long)start);
768 /*
769 * To ensure bank->start + bank->size is representable in
770 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
771 * This means we lose a page after masking.
772 */
773 size = ULONG_MAX - aligned_start;
774 }
775 #endif
776
777 if (aligned_start < PHYS_OFFSET) {
778 if (aligned_start + size <= PHYS_OFFSET) {
779 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
780 aligned_start, aligned_start + size);
781 return -EINVAL;
782 }
783
784 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
785 aligned_start, (u64)PHYS_OFFSET);
786
787 size -= PHYS_OFFSET - aligned_start;
788 aligned_start = PHYS_OFFSET;
789 }
790
791 start = aligned_start;
792 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
793
794 /*
795 * Check whether this memory region has non-zero size or
796 * invalid node number.
797 */
798 if (size == 0)
799 return -EINVAL;
800
801 memblock_add(start, size);
802 return 0;
803 }
804
805 /*
806 * Pick out the memory size. We look for mem=size@start,
807 * where start and size are "size[KkMm]"
808 */
809
810 static int __init early_mem(char *p)
811 {
812 static int usermem __initdata = 0;
813 u64 size;
814 u64 start;
815 char *endp;
816
817 /*
818 * If the user specifies memory size, we
819 * blow away any automatically generated
820 * size.
821 */
822 if (usermem == 0) {
823 usermem = 1;
824 memblock_remove(memblock_start_of_DRAM(),
825 memblock_end_of_DRAM() - memblock_start_of_DRAM());
826 }
827
828 start = PHYS_OFFSET;
829 size = memparse(p, &endp);
830 if (*endp == '@')
831 start = memparse(endp + 1, NULL);
832
833 arm_add_memory(start, size);
834
835 return 0;
836 }
837 early_param("mem", early_mem);
838
839 static void __init request_standard_resources(const struct machine_desc *mdesc)
840 {
841 struct memblock_region *region;
842 struct resource *res;
843
844 kernel_code.start = virt_to_phys(_text);
845 kernel_code.end = virt_to_phys(_etext - 1);
846 kernel_data.start = virt_to_phys(_sdata);
847 kernel_data.end = virt_to_phys(_end - 1);
848
849 for_each_memblock(memory, region) {
850 res = memblock_virt_alloc(sizeof(*res), 0);
851 res->name = "System RAM";
852 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
853 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
854 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
855
856 request_resource(&iomem_resource, res);
857
858 if (kernel_code.start >= res->start &&
859 kernel_code.end <= res->end)
860 request_resource(res, &kernel_code);
861 if (kernel_data.start >= res->start &&
862 kernel_data.end <= res->end)
863 request_resource(res, &kernel_data);
864 }
865
866 if (mdesc->video_start) {
867 video_ram.start = mdesc->video_start;
868 video_ram.end = mdesc->video_end;
869 request_resource(&iomem_resource, &video_ram);
870 }
871
872 /*
873 * Some machines don't have the possibility of ever
874 * possessing lp0, lp1 or lp2
875 */
876 if (mdesc->reserve_lp0)
877 request_resource(&ioport_resource, &lp0);
878 if (mdesc->reserve_lp1)
879 request_resource(&ioport_resource, &lp1);
880 if (mdesc->reserve_lp2)
881 request_resource(&ioport_resource, &lp2);
882 }
883
884 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
885 struct screen_info screen_info = {
886 .orig_video_lines = 30,
887 .orig_video_cols = 80,
888 .orig_video_mode = 0,
889 .orig_video_ega_bx = 0,
890 .orig_video_isVGA = 1,
891 .orig_video_points = 8
892 };
893 #endif
894
895 static int __init customize_machine(void)
896 {
897 /*
898 * customizes platform devices, or adds new ones
899 * On DT based machines, we fall back to populating the
900 * machine from the device tree, if no callback is provided,
901 * otherwise we would always need an init_machine callback.
902 */
903 of_iommu_init();
904 if (machine_desc->init_machine)
905 machine_desc->init_machine();
906 #ifdef CONFIG_OF
907 else
908 of_platform_populate(NULL, of_default_bus_match_table,
909 NULL, NULL);
910 #endif
911 return 0;
912 }
913 arch_initcall(customize_machine);
914
915 static int __init init_machine_late(void)
916 {
917 struct device_node *root;
918 int ret;
919
920 if (machine_desc->init_late)
921 machine_desc->init_late();
922
923 root = of_find_node_by_path("/");
924 if (root) {
925 ret = of_property_read_string(root, "serial-number",
926 &system_serial);
927 if (ret)
928 system_serial = NULL;
929 }
930
931 if (!system_serial)
932 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
933 system_serial_high,
934 system_serial_low);
935
936 return 0;
937 }
938 late_initcall(init_machine_late);
939
940 #ifdef CONFIG_KEXEC
941 static inline unsigned long long get_total_mem(void)
942 {
943 unsigned long total;
944
945 total = max_low_pfn - min_low_pfn;
946 return total << PAGE_SHIFT;
947 }
948
949 /**
950 * reserve_crashkernel() - reserves memory are for crash kernel
951 *
952 * This function reserves memory area given in "crashkernel=" kernel command
953 * line parameter. The memory reserved is used by a dump capture kernel when
954 * primary kernel is crashing.
955 */
956 static void __init reserve_crashkernel(void)
957 {
958 unsigned long long crash_size, crash_base;
959 unsigned long long total_mem;
960 int ret;
961
962 total_mem = get_total_mem();
963 ret = parse_crashkernel(boot_command_line, total_mem,
964 &crash_size, &crash_base);
965 if (ret)
966 return;
967
968 ret = memblock_reserve(crash_base, crash_size);
969 if (ret < 0) {
970 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
971 (unsigned long)crash_base);
972 return;
973 }
974
975 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
976 (unsigned long)(crash_size >> 20),
977 (unsigned long)(crash_base >> 20),
978 (unsigned long)(total_mem >> 20));
979
980 crashk_res.start = crash_base;
981 crashk_res.end = crash_base + crash_size - 1;
982 insert_resource(&iomem_resource, &crashk_res);
983 }
984 #else
985 static inline void reserve_crashkernel(void) {}
986 #endif /* CONFIG_KEXEC */
987
988 void __init hyp_mode_check(void)
989 {
990 #ifdef CONFIG_ARM_VIRT_EXT
991 sync_boot_mode();
992
993 if (is_hyp_mode_available()) {
994 pr_info("CPU: All CPU(s) started in HYP mode.\n");
995 pr_info("CPU: Virtualization extensions available.\n");
996 } else if (is_hyp_mode_mismatched()) {
997 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
998 __boot_cpu_mode & MODE_MASK);
999 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1000 } else
1001 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1002 #endif
1003 }
1004
1005 void __init setup_arch(char **cmdline_p)
1006 {
1007 const struct machine_desc *mdesc;
1008
1009 setup_processor();
1010 mdesc = setup_machine_fdt(__atags_pointer);
1011 if (!mdesc)
1012 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
1013 machine_desc = mdesc;
1014 machine_name = mdesc->name;
1015 dump_stack_set_arch_desc("%s", mdesc->name);
1016
1017 if (mdesc->reboot_mode != REBOOT_HARD)
1018 reboot_mode = mdesc->reboot_mode;
1019
1020 init_mm.start_code = (unsigned long) _text;
1021 init_mm.end_code = (unsigned long) _etext;
1022 init_mm.end_data = (unsigned long) _edata;
1023 init_mm.brk = (unsigned long) _end;
1024
1025 /* populate cmd_line too for later use, preserving boot_command_line */
1026 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1027 *cmdline_p = cmd_line;
1028
1029 early_fixmap_init();
1030 early_ioremap_init();
1031
1032 parse_early_param();
1033
1034 #ifdef CONFIG_MMU
1035 early_paging_init(mdesc);
1036 #endif
1037 setup_dma_zone(mdesc);
1038 efi_init();
1039 sanity_check_meminfo();
1040 arm_memblock_init(mdesc);
1041
1042 early_ioremap_reset();
1043
1044 paging_init(mdesc);
1045 request_standard_resources(mdesc);
1046
1047 if (mdesc->restart)
1048 arm_pm_restart = mdesc->restart;
1049
1050 unflatten_device_tree();
1051
1052 arm_dt_init_cpu_maps();
1053 psci_dt_init();
1054 xen_early_init();
1055 #ifdef CONFIG_SMP
1056 if (is_smp()) {
1057 if (!mdesc->smp_init || !mdesc->smp_init()) {
1058 if (psci_smp_available())
1059 smp_set_ops(&psci_smp_ops);
1060 else if (mdesc->smp)
1061 smp_set_ops(mdesc->smp);
1062 }
1063 smp_init_cpus();
1064 smp_build_mpidr_hash();
1065 }
1066 #endif
1067
1068 if (!is_smp())
1069 hyp_mode_check();
1070
1071 reserve_crashkernel();
1072
1073 #ifdef CONFIG_MULTI_IRQ_HANDLER
1074 handle_arch_irq = mdesc->handle_irq;
1075 #endif
1076
1077 #ifdef CONFIG_VT
1078 #if defined(CONFIG_VGA_CONSOLE)
1079 conswitchp = &vga_con;
1080 #elif defined(CONFIG_DUMMY_CONSOLE)
1081 conswitchp = &dummy_con;
1082 #endif
1083 #endif
1084
1085 if (mdesc->init_early)
1086 mdesc->init_early();
1087 }
1088
1089
1090 static int __init topology_init(void)
1091 {
1092 int cpu;
1093
1094 for_each_possible_cpu(cpu) {
1095 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1096 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1097 register_cpu(&cpuinfo->cpu, cpu);
1098 }
1099
1100 return 0;
1101 }
1102 subsys_initcall(topology_init);
1103
1104 #ifdef CONFIG_HAVE_PROC_CPU
1105 static int __init proc_cpu_init(void)
1106 {
1107 struct proc_dir_entry *res;
1108
1109 res = proc_mkdir("cpu", NULL);
1110 if (!res)
1111 return -ENOMEM;
1112 return 0;
1113 }
1114 fs_initcall(proc_cpu_init);
1115 #endif
1116
1117 static const char *hwcap_str[] = {
1118 "swp",
1119 "half",
1120 "thumb",
1121 "26bit",
1122 "fastmult",
1123 "fpa",
1124 "vfp",
1125 "edsp",
1126 "java",
1127 "iwmmxt",
1128 "crunch",
1129 "thumbee",
1130 "neon",
1131 "vfpv3",
1132 "vfpv3d16",
1133 "tls",
1134 "vfpv4",
1135 "idiva",
1136 "idivt",
1137 "vfpd32",
1138 "lpae",
1139 "evtstrm",
1140 NULL
1141 };
1142
1143 static const char *hwcap2_str[] = {
1144 "aes",
1145 "pmull",
1146 "sha1",
1147 "sha2",
1148 "crc32",
1149 NULL
1150 };
1151
1152 static int c_show(struct seq_file *m, void *v)
1153 {
1154 int i, j;
1155 u32 cpuid;
1156
1157 for_each_online_cpu(i) {
1158 /*
1159 * glibc reads /proc/cpuinfo to determine the number of
1160 * online processors, looking for lines beginning with
1161 * "processor". Give glibc what it expects.
1162 */
1163 seq_printf(m, "processor\t: %d\n", i);
1164 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1165 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1166 cpu_name, cpuid & 15, elf_platform);
1167
1168 #if defined(CONFIG_SMP)
1169 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1170 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1171 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1172 #else
1173 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1174 loops_per_jiffy / (500000/HZ),
1175 (loops_per_jiffy / (5000/HZ)) % 100);
1176 #endif
1177 /* dump out the processor features */
1178 seq_puts(m, "Features\t: ");
1179
1180 for (j = 0; hwcap_str[j]; j++)
1181 if (elf_hwcap & (1 << j))
1182 seq_printf(m, "%s ", hwcap_str[j]);
1183
1184 for (j = 0; hwcap2_str[j]; j++)
1185 if (elf_hwcap2 & (1 << j))
1186 seq_printf(m, "%s ", hwcap2_str[j]);
1187
1188 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1189 seq_printf(m, "CPU architecture: %s\n",
1190 proc_arch[cpu_architecture()]);
1191
1192 if ((cpuid & 0x0008f000) == 0x00000000) {
1193 /* pre-ARM7 */
1194 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1195 } else {
1196 if ((cpuid & 0x0008f000) == 0x00007000) {
1197 /* ARM7 */
1198 seq_printf(m, "CPU variant\t: 0x%02x\n",
1199 (cpuid >> 16) & 127);
1200 } else {
1201 /* post-ARM7 */
1202 seq_printf(m, "CPU variant\t: 0x%x\n",
1203 (cpuid >> 20) & 15);
1204 }
1205 seq_printf(m, "CPU part\t: 0x%03x\n",
1206 (cpuid >> 4) & 0xfff);
1207 }
1208 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1209 }
1210
1211 seq_printf(m, "Hardware\t: %s\n", machine_name);
1212 seq_printf(m, "Revision\t: %04x\n", system_rev);
1213 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1214
1215 return 0;
1216 }
1217
1218 static void *c_start(struct seq_file *m, loff_t *pos)
1219 {
1220 return *pos < 1 ? (void *)1 : NULL;
1221 }
1222
1223 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1224 {
1225 ++*pos;
1226 return NULL;
1227 }
1228
1229 static void c_stop(struct seq_file *m, void *v)
1230 {
1231 }
1232
1233 const struct seq_operations cpuinfo_op = {
1234 .start = c_start,
1235 .next = c_next,
1236 .stop = c_stop,
1237 .show = c_show
1238 };
This page took 0.054753 seconds and 5 git commands to generate.