ARM: 8476/1: VDSO: use PTR_ERR_OR_ZERO for vma check
[deliverable/linux.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ecea4ab6 10#include <linux/export.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
af4dda73 21#include <linux/of_iommu.h>
883a106b 22#include <linux/of_platform.h>
1da177e4 23#include <linux/init.h>
3c57fb43 24#include <linux/kexec.h>
93c02ab4 25#include <linux/of_fdt.h>
1da177e4
LT
26#include <linux/cpu.h>
27#include <linux/interrupt.h>
7bbb7940 28#include <linux/smp.h>
e119bfff 29#include <linux/proc_fs.h>
2778f620 30#include <linux/memblock.h>
2ecccf90
DM
31#include <linux/bug.h>
32#include <linux/compiler.h>
27a3f0e9 33#include <linux/sort.h>
be120397 34#include <linux/psci.h>
1da177e4 35
b86040a5 36#include <asm/unified.h>
15d07dc9 37#include <asm/cp15.h>
1da177e4 38#include <asm/cpu.h>
0ba8b9b2 39#include <asm/cputype.h>
1da177e4 40#include <asm/elf.h>
a5f4c561 41#include <asm/fixmap.h>
1da177e4 42#include <asm/procinfo.h>
05774088 43#include <asm/psci.h>
37efe642 44#include <asm/sections.h>
1da177e4 45#include <asm/setup.h>
f00ec48f 46#include <asm/smp_plat.h>
1da177e4
LT
47#include <asm/mach-types.h>
48#include <asm/cacheflush.h>
46097c7d 49#include <asm/cachetype.h>
1da177e4 50#include <asm/tlbflush.h>
5882bfef 51#include <asm/xen/hypervisor.h>
1da177e4 52
93c02ab4 53#include <asm/prom.h>
1da177e4
LT
54#include <asm/mach/arch.h>
55#include <asm/mach/irq.h>
56#include <asm/mach/time.h>
9f97da78
DH
57#include <asm/system_info.h>
58#include <asm/system_misc.h>
5cbad0eb 59#include <asm/traps.h>
bff595c1 60#include <asm/unwind.h>
1c16d242 61#include <asm/memblock.h>
4588c34d 62#include <asm/virt.h>
1da177e4 63
4cd9d6f7 64#include "atags.h"
0fc1c832 65
1da177e4
LT
66
67#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
68char fpe_type[8];
69
70static int __init fpe_setup(char *line)
71{
72 memcpy(fpe_type, line, 8);
73 return 1;
74}
75
76__setup("fpe=", fpe_setup);
77#endif
78
ca8f0b0a 79extern void init_default_cache_policy(unsigned long);
ff69a4c8 80extern void paging_init(const struct machine_desc *desc);
1221ed10 81extern void early_paging_init(const struct machine_desc *);
0371d3f7 82extern void sanity_check_meminfo(void);
16d6d5b0 83extern enum reboot_mode reboot_mode;
ff69a4c8 84extern void setup_dma_zone(const struct machine_desc *desc);
1da177e4
LT
85
86unsigned int processor_id;
c18f6581 87EXPORT_SYMBOL(processor_id);
0385ebc0 88unsigned int __machine_arch_type __read_mostly;
1da177e4 89EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 90unsigned int cacheid __read_mostly;
c0e95878 91EXPORT_SYMBOL(cacheid);
1da177e4 92
9d20fdd5
BG
93unsigned int __atags_pointer __initdata;
94
1da177e4
LT
95unsigned int system_rev;
96EXPORT_SYMBOL(system_rev);
97
3f599875
PK
98const char *system_serial;
99EXPORT_SYMBOL(system_serial);
100
1da177e4
LT
101unsigned int system_serial_low;
102EXPORT_SYMBOL(system_serial_low);
103
104unsigned int system_serial_high;
105EXPORT_SYMBOL(system_serial_high);
106
0385ebc0 107unsigned int elf_hwcap __read_mostly;
1da177e4
LT
108EXPORT_SYMBOL(elf_hwcap);
109
b342ea4e
AB
110unsigned int elf_hwcap2 __read_mostly;
111EXPORT_SYMBOL(elf_hwcap2);
112
1da177e4
LT
113
114#ifdef MULTI_CPU
0385ebc0 115struct processor processor __read_mostly;
1da177e4
LT
116#endif
117#ifdef MULTI_TLB
0385ebc0 118struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
119#endif
120#ifdef MULTI_USER
0385ebc0 121struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
122#endif
123#ifdef MULTI_CACHE
0385ebc0 124struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 125#endif
953233dc 126#ifdef CONFIG_OUTER_CACHE
0385ebc0 127struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 128EXPORT_SYMBOL(outer_cache);
953233dc 129#endif
1da177e4 130
2ecccf90
DM
131/*
132 * Cached cpu_architecture() result for use by assembler code.
133 * C code should use the cpu_architecture() function instead of accessing this
134 * variable directly.
135 */
136int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
137
ccea7a19
RK
138struct stack {
139 u32 irq[3];
140 u32 abt[3];
141 u32 und[3];
c0e7f7ee 142 u32 fiq[3];
ccea7a19
RK
143} ____cacheline_aligned;
144
55bdd694 145#ifndef CONFIG_CPU_V7M
ccea7a19 146static struct stack stacks[NR_CPUS];
55bdd694 147#endif
ccea7a19 148
1da177e4
LT
149char elf_platform[ELF_PLATFORM_SIZE];
150EXPORT_SYMBOL(elf_platform);
151
1da177e4
LT
152static const char *cpu_name;
153static const char *machine_name;
48ab7e09 154static char __initdata cmd_line[COMMAND_LINE_SIZE];
ff69a4c8 155const struct machine_desc *machine_desc __initdata;
1da177e4 156
1da177e4
LT
157static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
158#define ENDIANNESS ((char)endian_test.l)
159
160DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
161
162/*
163 * Standard memory resources
164 */
165static struct resource mem_res[] = {
740e518e
GKH
166 {
167 .name = "Video RAM",
168 .start = 0,
169 .end = 0,
170 .flags = IORESOURCE_MEM
171 },
172 {
a36d8e5b 173 .name = "Kernel code",
740e518e
GKH
174 .start = 0,
175 .end = 0,
176 .flags = IORESOURCE_MEM
177 },
178 {
179 .name = "Kernel data",
180 .start = 0,
181 .end = 0,
182 .flags = IORESOURCE_MEM
183 }
1da177e4
LT
184};
185
186#define video_ram mem_res[0]
187#define kernel_code mem_res[1]
188#define kernel_data mem_res[2]
189
190static struct resource io_res[] = {
740e518e
GKH
191 {
192 .name = "reserved",
193 .start = 0x3bc,
194 .end = 0x3be,
195 .flags = IORESOURCE_IO | IORESOURCE_BUSY
196 },
197 {
198 .name = "reserved",
199 .start = 0x378,
200 .end = 0x37f,
201 .flags = IORESOURCE_IO | IORESOURCE_BUSY
202 },
203 {
204 .name = "reserved",
205 .start = 0x278,
206 .end = 0x27f,
207 .flags = IORESOURCE_IO | IORESOURCE_BUSY
208 }
1da177e4
LT
209};
210
211#define lp0 io_res[0]
212#define lp1 io_res[1]
213#define lp2 io_res[2]
214
1da177e4
LT
215static const char *proc_arch[] = {
216 "undefined/unknown",
217 "3",
218 "4",
219 "4T",
220 "5",
221 "5T",
222 "5TE",
223 "5TEJ",
224 "6TEJ",
6b090a25 225 "7",
55bdd694 226 "7M",
1da177e4
LT
227 "?(12)",
228 "?(13)",
229 "?(14)",
230 "?(15)",
231 "?(16)",
232 "?(17)",
233};
234
55bdd694
CM
235#ifdef CONFIG_CPU_V7M
236static int __get_cpu_architecture(void)
237{
238 return CPU_ARCH_ARMv7M;
239}
240#else
2ecccf90 241static int __get_cpu_architecture(void)
1da177e4
LT
242{
243 int cpu_arch;
244
0ba8b9b2 245 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 246 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
247 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
248 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
249 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
250 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
251 if (cpu_arch)
252 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 253 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
254 /* Revised CPUID format. Read the Memory Model Feature
255 * Register 0 and check for VMSAv7 or PMSAv7 */
526299ce 256 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
315cfe78
CM
257 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
258 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
259 cpu_arch = CPU_ARCH_ARMv7;
260 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
261 (mmfr0 & 0x000000f0) == 0x00000020)
262 cpu_arch = CPU_ARCH_ARMv6;
263 else
264 cpu_arch = CPU_ARCH_UNKNOWN;
265 } else
266 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
267
268 return cpu_arch;
269}
55bdd694 270#endif
1da177e4 271
2ecccf90
DM
272int __pure cpu_architecture(void)
273{
274 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
275
276 return __cpu_architecture;
277}
278
8925ec4c
WD
279static int cpu_has_aliasing_icache(unsigned int arch)
280{
281 int aliasing_icache;
282 unsigned int id_reg, num_sets, line_size;
283
7f94e9cc
WD
284 /* PIPT caches never alias. */
285 if (icache_is_pipt())
286 return 0;
287
8925ec4c
WD
288 /* arch specifies the register format */
289 switch (arch) {
290 case CPU_ARCH_ARMv7:
5fb31a96
LW
291 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
292 : /* No output operands */
8925ec4c 293 : "r" (1));
5fb31a96
LW
294 isb();
295 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
296 : "=r" (id_reg));
8925ec4c
WD
297 line_size = 4 << ((id_reg & 0x7) + 2);
298 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
299 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
300 break;
301 case CPU_ARCH_ARMv6:
302 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
303 break;
304 default:
305 /* I-cache aliases will be handled by D-cache aliasing code */
306 aliasing_icache = 0;
307 }
308
309 return aliasing_icache;
310}
311
c0e95878
RK
312static void __init cacheid_init(void)
313{
c0e95878
RK
314 unsigned int arch = cpu_architecture();
315
55bdd694
CM
316 if (arch == CPU_ARCH_ARMv7M) {
317 cacheid = 0;
318 } else if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 319 unsigned int cachetype = read_cpuid_cachetype();
b57ee99f
CM
320 if ((cachetype & (7 << 29)) == 4 << 29) {
321 /* ARMv7 register format */
72dc53ac 322 arch = CPU_ARCH_ARMv7;
b57ee99f 323 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
324 switch (cachetype & (3 << 14)) {
325 case (1 << 14):
b57ee99f 326 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
327 break;
328 case (3 << 14):
329 cacheid |= CACHEID_PIPT;
330 break;
331 }
8925ec4c 332 } else {
72dc53ac
WD
333 arch = CPU_ARCH_ARMv6;
334 if (cachetype & (1 << 23))
335 cacheid = CACHEID_VIPT_ALIASING;
336 else
337 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 338 }
72dc53ac
WD
339 if (cpu_has_aliasing_icache(arch))
340 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
341 } else {
342 cacheid = CACHEID_VIVT;
343 }
2b4ae1f1 344
1b0f6681 345 pr_info("CPU: %s data cache, %s instruction cache\n",
2b4ae1f1
RK
346 cache_is_vivt() ? "VIVT" :
347 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 348 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
349 cache_is_vivt() ? "VIVT" :
350 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 351 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 352 icache_is_pipt() ? "PIPT" :
2b4ae1f1 353 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
354}
355
1da177e4
LT
356/*
357 * These functions re-use the assembly code in head.S, which
358 * already provide the required functionality.
359 */
0f44ba1d 360extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 361
93c02ab4 362void __init early_print(const char *str, ...)
6fc31d54
RK
363{
364 extern void printascii(const char *);
365 char buf[256];
366 va_list ap;
367
368 va_start(ap, str);
369 vsnprintf(buf, sizeof(buf), str, ap);
370 va_end(ap);
371
372#ifdef CONFIG_DEBUG_LL
373 printascii(buf);
374#endif
375 printk("%s", buf);
376}
377
8164f7af
SB
378static void __init cpuid_init_hwcaps(void)
379{
b8c9592b 380 int block;
a092aedb 381 u32 isar5;
8164f7af
SB
382
383 if (cpu_architecture() < CPU_ARCH_ARMv7)
384 return;
385
b8c9592b
AB
386 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
387 if (block >= 2)
8164f7af 388 elf_hwcap |= HWCAP_IDIVA;
b8c9592b 389 if (block >= 1)
8164f7af 390 elf_hwcap |= HWCAP_IDIVT;
a469abd0
WD
391
392 /* LPAE implies atomic ldrd/strd instructions */
b8c9592b
AB
393 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
394 if (block >= 5)
a469abd0 395 elf_hwcap |= HWCAP_LPAE;
a092aedb
AB
396
397 /* check for supported v8 Crypto instructions */
398 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
399
400 block = cpuid_feature_extract_field(isar5, 4);
401 if (block >= 2)
402 elf_hwcap2 |= HWCAP2_PMULL;
403 if (block >= 1)
404 elf_hwcap2 |= HWCAP2_AES;
405
406 block = cpuid_feature_extract_field(isar5, 8);
407 if (block >= 1)
408 elf_hwcap2 |= HWCAP2_SHA1;
409
410 block = cpuid_feature_extract_field(isar5, 12);
411 if (block >= 1)
412 elf_hwcap2 |= HWCAP2_SHA2;
413
414 block = cpuid_feature_extract_field(isar5, 16);
415 if (block >= 1)
416 elf_hwcap2 |= HWCAP2_CRC32;
8164f7af
SB
417}
418
58171bf2 419static void __init elf_hwcap_fixup(void)
f159f4ed 420{
58171bf2 421 unsigned id = read_cpuid_id();
f159f4ed
TL
422
423 /*
424 * HWCAP_TLS is available only on 1136 r1p0 and later,
425 * see also kuser_get_tls_init.
426 */
58171bf2
RK
427 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
428 ((id >> 20) & 3) == 0) {
f159f4ed 429 elf_hwcap &= ~HWCAP_TLS;
58171bf2
RK
430 return;
431 }
432
433 /* Verify if CPUID scheme is implemented */
434 if ((id & 0x000f0000) != 0x000f0000)
435 return;
436
437 /*
438 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
439 * avoid advertising SWP; it may not be atomic with
440 * multiprocessing cores.
441 */
b8c9592b
AB
442 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
443 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
444 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
58171bf2 445 elf_hwcap &= ~HWCAP_SWP;
f159f4ed
TL
446}
447
ccea7a19
RK
448/*
449 * cpu_init - initialise one CPU.
450 *
90f1e084 451 * cpu_init sets up the per-CPU stacks.
ccea7a19 452 */
1783d457 453void notrace cpu_init(void)
ccea7a19 454{
55bdd694 455#ifndef CONFIG_CPU_V7M
ccea7a19
RK
456 unsigned int cpu = smp_processor_id();
457 struct stack *stk = &stacks[cpu];
458
459 if (cpu >= NR_CPUS) {
1b0f6681 460 pr_crit("CPU%u: bad primary CPU number\n", cpu);
ccea7a19
RK
461 BUG();
462 }
463
14318efb
RH
464 /*
465 * This only works on resume and secondary cores. For booting on the
466 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
467 */
468 set_my_cpu_offset(per_cpu_offset(cpu));
469
b69874e4
RK
470 cpu_proc_init();
471
b86040a5
CM
472 /*
473 * Define the placement constraint for the inline asm directive below.
474 * In Thumb-2, msr with an immediate value is not allowed.
475 */
476#ifdef CONFIG_THUMB2_KERNEL
477#define PLC "r"
478#else
479#define PLC "I"
480#endif
481
ccea7a19
RK
482 /*
483 * setup stacks for re-entrant exception handlers
484 */
485 __asm__ (
486 "msr cpsr_c, %1\n\t"
b86040a5
CM
487 "add r14, %0, %2\n\t"
488 "mov sp, r14\n\t"
ccea7a19 489 "msr cpsr_c, %3\n\t"
b86040a5
CM
490 "add r14, %0, %4\n\t"
491 "mov sp, r14\n\t"
ccea7a19 492 "msr cpsr_c, %5\n\t"
b86040a5
CM
493 "add r14, %0, %6\n\t"
494 "mov sp, r14\n\t"
c0e7f7ee
DT
495 "msr cpsr_c, %7\n\t"
496 "add r14, %0, %8\n\t"
497 "mov sp, r14\n\t"
498 "msr cpsr_c, %9"
ccea7a19
RK
499 :
500 : "r" (stk),
b86040a5 501 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 502 "I" (offsetof(struct stack, irq[0])),
b86040a5 503 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 504 "I" (offsetof(struct stack, abt[0])),
b86040a5 505 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 506 "I" (offsetof(struct stack, und[0])),
c0e7f7ee
DT
507 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
508 "I" (offsetof(struct stack, fiq[0])),
b86040a5 509 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 510 : "r14");
55bdd694 511#endif
ccea7a19
RK
512}
513
18d7f152 514u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
515
516void __init smp_setup_processor_id(void)
517{
518 int i;
cb8cf4f8
LP
519 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
520 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
521
522 cpu_logical_map(0) = cpu;
cb8cf4f8 523 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
524 cpu_logical_map(i) = i == cpu ? 0 : i;
525
9394c1c6
ML
526 /*
527 * clear __my_cpu_offset on boot CPU to avoid hang caused by
528 * using percpu variable early, for example, lockdep will
529 * access percpu variable inside lock_release
530 */
531 set_my_cpu_offset(0);
532
1b0f6681 533 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
534}
535
8cf72172
LP
536struct mpidr_hash mpidr_hash;
537#ifdef CONFIG_SMP
538/**
539 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
540 * level in order to build a linear index from an
541 * MPIDR value. Resulting algorithm is a collision
542 * free hash carried out through shifting and ORing
543 */
544static void __init smp_build_mpidr_hash(void)
545{
546 u32 i, affinity;
547 u32 fs[3], bits[3], ls, mask = 0;
548 /*
549 * Pre-scan the list of MPIDRS and filter out bits that do
550 * not contribute to affinity levels, ie they never toggle.
551 */
552 for_each_possible_cpu(i)
553 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
554 pr_debug("mask of set bits 0x%x\n", mask);
555 /*
556 * Find and stash the last and first bit set at all affinity levels to
557 * check how many bits are required to represent them.
558 */
559 for (i = 0; i < 3; i++) {
560 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
561 /*
562 * Find the MSB bit and LSB bits position
563 * to determine how many bits are required
564 * to express the affinity level.
565 */
566 ls = fls(affinity);
567 fs[i] = affinity ? ffs(affinity) - 1 : 0;
568 bits[i] = ls - fs[i];
569 }
570 /*
571 * An index can be created from the MPIDR by isolating the
572 * significant bits at each affinity level and by shifting
573 * them in order to compress the 24 bits values space to a
574 * compressed set of values. This is equivalent to hashing
575 * the MPIDR through shifting and ORing. It is a collision free
576 * hash though not minimal since some levels might contain a number
577 * of CPUs that is not an exact power of 2 and their bit
578 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
579 */
580 mpidr_hash.shift_aff[0] = fs[0];
581 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
582 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
583 (bits[1] + bits[0]);
584 mpidr_hash.mask = mask;
585 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
586 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
587 mpidr_hash.shift_aff[0],
588 mpidr_hash.shift_aff[1],
589 mpidr_hash.shift_aff[2],
590 mpidr_hash.mask,
591 mpidr_hash.bits);
592 /*
593 * 4x is an arbitrary value used to warn on a hash table much bigger
594 * than expected on most systems.
595 */
596 if (mpidr_hash_size() > 4 * num_possible_cpus())
597 pr_warn("Large number of MPIDR hash buckets detected\n");
598 sync_cache_w(&mpidr_hash);
599}
600#endif
601
b69874e4
RK
602static void __init setup_processor(void)
603{
604 struct proc_info_list *list;
605
606 /*
607 * locate processor in the list of supported processor
608 * types. The linker builds this table for us from the
609 * entries in arch/arm/mm/proc-*.S
610 */
611 list = lookup_processor_type(read_cpuid_id());
612 if (!list) {
1b0f6681
OJ
613 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
614 read_cpuid_id());
b69874e4
RK
615 while (1);
616 }
617
618 cpu_name = list->cpu_name;
2ecccf90 619 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
620
621#ifdef MULTI_CPU
622 processor = *list->proc;
623#endif
624#ifdef MULTI_TLB
625 cpu_tlb = *list->tlb;
626#endif
627#ifdef MULTI_USER
628 cpu_user = *list->user;
629#endif
630#ifdef MULTI_CACHE
631 cpu_cache = *list->cache;
632#endif
633
1b0f6681
OJ
634 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
635 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
4585eaff 636 proc_arch[cpu_architecture()], get_cr());
b69874e4 637
a34dbfb0
WD
638 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
639 list->arch_name, ENDIANNESS);
640 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
641 list->elf_name, ENDIANNESS);
b69874e4 642 elf_hwcap = list->elf_hwcap;
8164f7af
SB
643
644 cpuid_init_hwcaps();
645
b69874e4 646#ifndef CONFIG_ARM_THUMB
c40e3641 647 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4 648#endif
ca8f0b0a
RK
649#ifdef CONFIG_MMU
650 init_default_cache_policy(list->__cpu_mm_mmu_flags);
651#endif
92871b94
RH
652 erratum_a15_798181_init();
653
58171bf2 654 elf_hwcap_fixup();
b69874e4
RK
655
656 cacheid_init();
657 cpu_init();
658}
659
93c02ab4 660void __init dump_machine_table(void)
1da177e4 661{
ff69a4c8 662 const struct machine_desc *p;
1da177e4 663
6291319d
GL
664 early_print("Available machine support:\n\nID (hex)\tNAME\n");
665 for_each_machine_desc(p)
dce72dd0 666 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 667
dce72dd0 668 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 669
dce72dd0
NP
670 while (true)
671 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
672}
673
6a5014aa 674int __init arm_add_memory(u64 start, u64 size)
3a669411 675{
6d7d5da7 676 u64 aligned_start;
4b5f32ce 677
3a669411
RK
678 /*
679 * Ensure that start/size are aligned to a page boundary.
909ba297 680 * Size is rounded down, start is rounded up.
3a669411 681 */
6d7d5da7 682 aligned_start = PAGE_ALIGN(start);
909ba297
MY
683 if (aligned_start > start + size)
684 size = 0;
685 else
686 size -= aligned_start - start;
e5ab8580 687
6d7d5da7
MD
688#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
689 if (aligned_start > ULONG_MAX) {
1b0f6681
OJ
690 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
691 (long long)start);
6d7d5da7
MD
692 return -EINVAL;
693 }
694
695 if (aligned_start + size > ULONG_MAX) {
1b0f6681
OJ
696 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
697 (long long)start);
e5ab8580
WD
698 /*
699 * To ensure bank->start + bank->size is representable in
700 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
701 * This means we lose a page after masking.
702 */
6d7d5da7 703 size = ULONG_MAX - aligned_start;
e5ab8580
WD
704 }
705#endif
706
571b1437
RK
707 if (aligned_start < PHYS_OFFSET) {
708 if (aligned_start + size <= PHYS_OFFSET) {
709 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
710 aligned_start, aligned_start + size);
711 return -EINVAL;
712 }
713
714 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
715 aligned_start, (u64)PHYS_OFFSET);
716
717 size -= PHYS_OFFSET - aligned_start;
718 aligned_start = PHYS_OFFSET;
719 }
720
1c2f87c2
LA
721 start = aligned_start;
722 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
723
724 /*
725 * Check whether this memory region has non-zero size or
726 * invalid node number.
727 */
1c2f87c2 728 if (size == 0)
4b5f32ce
NP
729 return -EINVAL;
730
1c2f87c2 731 memblock_add(start, size);
4b5f32ce 732 return 0;
3a669411
RK
733}
734
1da177e4
LT
735/*
736 * Pick out the memory size. We look for mem=size@start,
737 * where start and size are "size[KkMm]"
738 */
1c2f87c2 739
2b0d8c25 740static int __init early_mem(char *p)
1da177e4
LT
741{
742 static int usermem __initdata = 0;
6a5014aa
MD
743 u64 size;
744 u64 start;
2b0d8c25 745 char *endp;
1da177e4
LT
746
747 /*
748 * If the user specifies memory size, we
749 * blow away any automatically generated
750 * size.
751 */
752 if (usermem == 0) {
753 usermem = 1;
1c2f87c2
LA
754 memblock_remove(memblock_start_of_DRAM(),
755 memblock_end_of_DRAM() - memblock_start_of_DRAM());
1da177e4
LT
756 }
757
758 start = PHYS_OFFSET;
2b0d8c25
JK
759 size = memparse(p, &endp);
760 if (*endp == '@')
761 start = memparse(endp + 1, NULL);
1da177e4 762
1c97b73e 763 arm_add_memory(start, size);
1da177e4 764
2b0d8c25 765 return 0;
1da177e4 766}
2b0d8c25 767early_param("mem", early_mem);
1da177e4 768
ff69a4c8 769static void __init request_standard_resources(const struct machine_desc *mdesc)
1da177e4 770{
11b9369c 771 struct memblock_region *region;
1da177e4 772 struct resource *res;
1da177e4 773
37efe642
RK
774 kernel_code.start = virt_to_phys(_text);
775 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 776 kernel_data.start = virt_to_phys(_sdata);
37efe642 777 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 778
11b9369c 779 for_each_memblock(memory, region) {
ca474408 780 res = memblock_virt_alloc(sizeof(*res), 0);
1da177e4 781 res->name = "System RAM";
11b9369c
DZ
782 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
783 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
784 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
785
786 request_resource(&iomem_resource, res);
787
788 if (kernel_code.start >= res->start &&
789 kernel_code.end <= res->end)
790 request_resource(res, &kernel_code);
791 if (kernel_data.start >= res->start &&
792 kernel_data.end <= res->end)
793 request_resource(res, &kernel_data);
794 }
795
796 if (mdesc->video_start) {
797 video_ram.start = mdesc->video_start;
798 video_ram.end = mdesc->video_end;
799 request_resource(&iomem_resource, &video_ram);
800 }
801
802 /*
803 * Some machines don't have the possibility of ever
804 * possessing lp0, lp1 or lp2
805 */
806 if (mdesc->reserve_lp0)
807 request_resource(&ioport_resource, &lp0);
808 if (mdesc->reserve_lp1)
809 request_resource(&ioport_resource, &lp1);
810 if (mdesc->reserve_lp2)
811 request_resource(&ioport_resource, &lp2);
812}
813
1da177e4
LT
814#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
815struct screen_info screen_info = {
816 .orig_video_lines = 30,
817 .orig_video_cols = 80,
818 .orig_video_mode = 0,
819 .orig_video_ega_bx = 0,
820 .orig_video_isVGA = 1,
821 .orig_video_points = 8
822};
4394c124 823#endif
1da177e4 824
1da177e4
LT
825static int __init customize_machine(void)
826{
883a106b
AB
827 /*
828 * customizes platform devices, or adds new ones
829 * On DT based machines, we fall back to populating the
830 * machine from the device tree, if no callback is provided,
831 * otherwise we would always need an init_machine callback.
832 */
af4dda73 833 of_iommu_init();
8ff1443c
RK
834 if (machine_desc->init_machine)
835 machine_desc->init_machine();
883a106b
AB
836#ifdef CONFIG_OF
837 else
838 of_platform_populate(NULL, of_default_bus_match_table,
839 NULL, NULL);
840#endif
1da177e4
LT
841 return 0;
842}
843arch_initcall(customize_machine);
844
90de4137
SG
845static int __init init_machine_late(void)
846{
3f599875
PK
847 struct device_node *root;
848 int ret;
849
90de4137
SG
850 if (machine_desc->init_late)
851 machine_desc->init_late();
3f599875
PK
852
853 root = of_find_node_by_path("/");
854 if (root) {
855 ret = of_property_read_string(root, "serial-number",
856 &system_serial);
857 if (ret)
858 system_serial = NULL;
859 }
860
861 if (!system_serial)
862 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
863 system_serial_high,
864 system_serial_low);
865
90de4137
SG
866 return 0;
867}
868late_initcall(init_machine_late);
869
3c57fb43
MW
870#ifdef CONFIG_KEXEC
871static inline unsigned long long get_total_mem(void)
872{
873 unsigned long total;
874
875 total = max_low_pfn - min_low_pfn;
876 return total << PAGE_SHIFT;
877}
878
879/**
880 * reserve_crashkernel() - reserves memory are for crash kernel
881 *
882 * This function reserves memory area given in "crashkernel=" kernel command
883 * line parameter. The memory reserved is used by a dump capture kernel when
884 * primary kernel is crashing.
885 */
886static void __init reserve_crashkernel(void)
887{
888 unsigned long long crash_size, crash_base;
889 unsigned long long total_mem;
890 int ret;
891
892 total_mem = get_total_mem();
893 ret = parse_crashkernel(boot_command_line, total_mem,
894 &crash_size, &crash_base);
895 if (ret)
896 return;
897
84f452b1 898 ret = memblock_reserve(crash_base, crash_size);
3c57fb43 899 if (ret < 0) {
1b0f6681
OJ
900 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
901 (unsigned long)crash_base);
3c57fb43
MW
902 return;
903 }
904
1b0f6681
OJ
905 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
906 (unsigned long)(crash_size >> 20),
907 (unsigned long)(crash_base >> 20),
908 (unsigned long)(total_mem >> 20));
3c57fb43
MW
909
910 crashk_res.start = crash_base;
911 crashk_res.end = crash_base + crash_size - 1;
912 insert_resource(&iomem_resource, &crashk_res);
913}
914#else
915static inline void reserve_crashkernel(void) {}
916#endif /* CONFIG_KEXEC */
917
4588c34d
DM
918void __init hyp_mode_check(void)
919{
920#ifdef CONFIG_ARM_VIRT_EXT
8fbac214
MR
921 sync_boot_mode();
922
4588c34d
DM
923 if (is_hyp_mode_available()) {
924 pr_info("CPU: All CPU(s) started in HYP mode.\n");
925 pr_info("CPU: Virtualization extensions available.\n");
926 } else if (is_hyp_mode_mismatched()) {
927 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
928 __boot_cpu_mode & MODE_MASK);
929 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
930 } else
931 pr_info("CPU: All CPU(s) started in SVC mode.\n");
932#endif
933}
934
6291319d
GL
935void __init setup_arch(char **cmdline_p)
936{
ff69a4c8 937 const struct machine_desc *mdesc;
6291319d 938
6291319d 939 setup_processor();
93c02ab4
GL
940 mdesc = setup_machine_fdt(__atags_pointer);
941 if (!mdesc)
b8b499c8 942 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
943 machine_desc = mdesc;
944 machine_name = mdesc->name;
719c9d14 945 dump_stack_set_arch_desc("%s", mdesc->name);
6291319d 946
16d6d5b0
RH
947 if (mdesc->reboot_mode != REBOOT_HARD)
948 reboot_mode = mdesc->reboot_mode;
6291319d 949
37efe642
RK
950 init_mm.start_code = (unsigned long) _text;
951 init_mm.end_code = (unsigned long) _etext;
952 init_mm.end_data = (unsigned long) _edata;
953 init_mm.brk = (unsigned long) _end;
1da177e4 954
48ab7e09
JK
955 /* populate cmd_line too for later use, preserving boot_command_line */
956 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
957 *cmdline_p = cmd_line;
2b0d8c25 958
a5f4c561
SA
959 if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
960 early_fixmap_init();
961
2b0d8c25
JK
962 parse_early_param();
963
1221ed10
RK
964#ifdef CONFIG_MMU
965 early_paging_init(mdesc);
966#endif
7c927322 967 setup_dma_zone(mdesc);
0371d3f7 968 sanity_check_meminfo();
1c2f87c2 969 arm_memblock_init(mdesc);
2778f620 970
4b5f32ce 971 paging_init(mdesc);
11b9369c 972 request_standard_resources(mdesc);
1da177e4 973
a528721d
RK
974 if (mdesc->restart)
975 arm_pm_restart = mdesc->restart;
976
93c02ab4
GL
977 unflatten_device_tree();
978
5587164e 979 arm_dt_init_cpu_maps();
be120397 980 psci_dt_init();
5882bfef 981 xen_early_init();
7bbb7940 982#ifdef CONFIG_SMP
abcee5fb 983 if (is_smp()) {
b382b940
JM
984 if (!mdesc->smp_init || !mdesc->smp_init()) {
985 if (psci_smp_available())
986 smp_set_ops(&psci_smp_ops);
987 else if (mdesc->smp)
988 smp_set_ops(mdesc->smp);
989 }
f00ec48f 990 smp_init_cpus();
8cf72172 991 smp_build_mpidr_hash();
abcee5fb 992 }
7bbb7940 993#endif
4588c34d
DM
994
995 if (!is_smp())
996 hyp_mode_check();
997
3c57fb43 998 reserve_crashkernel();
7bbb7940 999
52108641 1000#ifdef CONFIG_MULTI_IRQ_HANDLER
1001 handle_arch_irq = mdesc->handle_irq;
1002#endif
1da177e4
LT
1003
1004#ifdef CONFIG_VT
1005#if defined(CONFIG_VGA_CONSOLE)
1006 conswitchp = &vga_con;
1007#elif defined(CONFIG_DUMMY_CONSOLE)
1008 conswitchp = &dummy_con;
1009#endif
1010#endif
dec12e62
RK
1011
1012 if (mdesc->init_early)
1013 mdesc->init_early();
1da177e4
LT
1014}
1015
1016
1017static int __init topology_init(void)
1018{
1019 int cpu;
1020
66fb8bd2
RK
1021 for_each_possible_cpu(cpu) {
1022 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
787047ee 1023 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
66fb8bd2
RK
1024 register_cpu(&cpuinfo->cpu, cpu);
1025 }
1da177e4
LT
1026
1027 return 0;
1028}
1da177e4
LT
1029subsys_initcall(topology_init);
1030
e119bfff
RK
1031#ifdef CONFIG_HAVE_PROC_CPU
1032static int __init proc_cpu_init(void)
1033{
1034 struct proc_dir_entry *res;
1035
1036 res = proc_mkdir("cpu", NULL);
1037 if (!res)
1038 return -ENOMEM;
1039 return 0;
1040}
1041fs_initcall(proc_cpu_init);
1042#endif
1043
1da177e4
LT
1044static const char *hwcap_str[] = {
1045 "swp",
1046 "half",
1047 "thumb",
1048 "26bit",
1049 "fastmult",
1050 "fpa",
1051 "vfp",
1052 "edsp",
1053 "java",
8f7f9435 1054 "iwmmxt",
99e4a6dd 1055 "crunch",
4369ae16 1056 "thumbee",
2bedbdf4 1057 "neon",
7279dc3e
CM
1058 "vfpv3",
1059 "vfpv3d16",
254cdf8e
WD
1060 "tls",
1061 "vfpv4",
1062 "idiva",
1063 "idivt",
ab8d46c0 1064 "vfpd32",
a469abd0 1065 "lpae",
e9faebc6 1066 "evtstrm",
1da177e4
LT
1067 NULL
1068};
1069
b342ea4e 1070static const char *hwcap2_str[] = {
8258a989
AB
1071 "aes",
1072 "pmull",
1073 "sha1",
1074 "sha2",
1075 "crc32",
b342ea4e
AB
1076 NULL
1077};
1078
1da177e4
LT
1079static int c_show(struct seq_file *m, void *v)
1080{
b4b8f770
LP
1081 int i, j;
1082 u32 cpuid;
1da177e4 1083
1da177e4 1084 for_each_online_cpu(i) {
15559722
RK
1085 /*
1086 * glibc reads /proc/cpuinfo to determine the number of
1087 * online processors, looking for lines beginning with
1088 * "processor". Give glibc what it expects.
1089 */
1090 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
1091 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1092 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1093 cpu_name, cpuid & 15, elf_platform);
1094
4bf9636c
PM
1095#if defined(CONFIG_SMP)
1096 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1097 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1098 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1099#else
1100 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1101 loops_per_jiffy / (500000/HZ),
1102 (loops_per_jiffy / (5000/HZ)) % 100);
1103#endif
b4b8f770
LP
1104 /* dump out the processor features */
1105 seq_puts(m, "Features\t: ");
1da177e4 1106
b4b8f770
LP
1107 for (j = 0; hwcap_str[j]; j++)
1108 if (elf_hwcap & (1 << j))
1109 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 1110
b342ea4e
AB
1111 for (j = 0; hwcap2_str[j]; j++)
1112 if (elf_hwcap2 & (1 << j))
1113 seq_printf(m, "%s ", hwcap2_str[j]);
1114
b4b8f770
LP
1115 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1116 seq_printf(m, "CPU architecture: %s\n",
1117 proc_arch[cpu_architecture()]);
1da177e4 1118
b4b8f770
LP
1119 if ((cpuid & 0x0008f000) == 0x00000000) {
1120 /* pre-ARM7 */
1121 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 1122 } else {
b4b8f770
LP
1123 if ((cpuid & 0x0008f000) == 0x00007000) {
1124 /* ARM7 */
1125 seq_printf(m, "CPU variant\t: 0x%02x\n",
1126 (cpuid >> 16) & 127);
1127 } else {
1128 /* post-ARM7 */
1129 seq_printf(m, "CPU variant\t: 0x%x\n",
1130 (cpuid >> 20) & 15);
1131 }
1132 seq_printf(m, "CPU part\t: 0x%03x\n",
1133 (cpuid >> 4) & 0xfff);
1da177e4 1134 }
b4b8f770 1135 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 1136 }
1da177e4
LT
1137
1138 seq_printf(m, "Hardware\t: %s\n", machine_name);
1139 seq_printf(m, "Revision\t: %04x\n", system_rev);
3f599875 1140 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1da177e4
LT
1141
1142 return 0;
1143}
1144
1145static void *c_start(struct seq_file *m, loff_t *pos)
1146{
1147 return *pos < 1 ? (void *)1 : NULL;
1148}
1149
1150static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1151{
1152 ++*pos;
1153 return NULL;
1154}
1155
1156static void c_stop(struct seq_file *m, void *v)
1157{
1158}
1159
2ffd6e18 1160const struct seq_operations cpuinfo_op = {
1da177e4
LT
1161 .start = c_start,
1162 .next = c_next,
1163 .stop = c_stop,
1164 .show = c_show
1165};
This page took 0.81448 seconds and 5 git commands to generate.