ARM: Add base support for ARMv7-M
[deliverable/linux.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ecea4ab6 10#include <linux/export.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
1da177e4 21#include <linux/init.h>
3c57fb43 22#include <linux/kexec.h>
93c02ab4 23#include <linux/of_fdt.h>
1da177e4
LT
24#include <linux/cpu.h>
25#include <linux/interrupt.h>
7bbb7940 26#include <linux/smp.h>
e119bfff 27#include <linux/proc_fs.h>
2778f620 28#include <linux/memblock.h>
2ecccf90
DM
29#include <linux/bug.h>
30#include <linux/compiler.h>
27a3f0e9 31#include <linux/sort.h>
1da177e4 32
b86040a5 33#include <asm/unified.h>
15d07dc9 34#include <asm/cp15.h>
1da177e4 35#include <asm/cpu.h>
0ba8b9b2 36#include <asm/cputype.h>
1da177e4 37#include <asm/elf.h>
1da177e4 38#include <asm/procinfo.h>
37efe642 39#include <asm/sections.h>
1da177e4 40#include <asm/setup.h>
f00ec48f 41#include <asm/smp_plat.h>
1da177e4
LT
42#include <asm/mach-types.h>
43#include <asm/cacheflush.h>
46097c7d 44#include <asm/cachetype.h>
1da177e4
LT
45#include <asm/tlbflush.h>
46
93c02ab4 47#include <asm/prom.h>
1da177e4
LT
48#include <asm/mach/arch.h>
49#include <asm/mach/irq.h>
50#include <asm/mach/time.h>
9f97da78
DH
51#include <asm/system_info.h>
52#include <asm/system_misc.h>
5cbad0eb 53#include <asm/traps.h>
bff595c1 54#include <asm/unwind.h>
1c16d242 55#include <asm/memblock.h>
4588c34d 56#include <asm/virt.h>
1da177e4 57
4cd9d6f7 58#include "atags.h"
bc581770 59#include "tcm.h"
0fc1c832 60
1da177e4
LT
61
62#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
63char fpe_type[8];
64
65static int __init fpe_setup(char *line)
66{
67 memcpy(fpe_type, line, 8);
68 return 1;
69}
70
71__setup("fpe=", fpe_setup);
72#endif
73
4b5f32ce 74extern void paging_init(struct machine_desc *desc);
0371d3f7 75extern void sanity_check_meminfo(void);
1da177e4 76extern void reboot_setup(char *str);
c7909509 77extern void setup_dma_zone(struct machine_desc *desc);
1da177e4
LT
78
79unsigned int processor_id;
c18f6581 80EXPORT_SYMBOL(processor_id);
0385ebc0 81unsigned int __machine_arch_type __read_mostly;
1da177e4 82EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 83unsigned int cacheid __read_mostly;
c0e95878 84EXPORT_SYMBOL(cacheid);
1da177e4 85
9d20fdd5
BG
86unsigned int __atags_pointer __initdata;
87
1da177e4
LT
88unsigned int system_rev;
89EXPORT_SYMBOL(system_rev);
90
91unsigned int system_serial_low;
92EXPORT_SYMBOL(system_serial_low);
93
94unsigned int system_serial_high;
95EXPORT_SYMBOL(system_serial_high);
96
0385ebc0 97unsigned int elf_hwcap __read_mostly;
1da177e4
LT
98EXPORT_SYMBOL(elf_hwcap);
99
100
101#ifdef MULTI_CPU
0385ebc0 102struct processor processor __read_mostly;
1da177e4
LT
103#endif
104#ifdef MULTI_TLB
0385ebc0 105struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
106#endif
107#ifdef MULTI_USER
0385ebc0 108struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
109#endif
110#ifdef MULTI_CACHE
0385ebc0 111struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 112#endif
953233dc 113#ifdef CONFIG_OUTER_CACHE
0385ebc0 114struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 115EXPORT_SYMBOL(outer_cache);
953233dc 116#endif
1da177e4 117
2ecccf90
DM
118/*
119 * Cached cpu_architecture() result for use by assembler code.
120 * C code should use the cpu_architecture() function instead of accessing this
121 * variable directly.
122 */
123int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
124
ccea7a19
RK
125struct stack {
126 u32 irq[3];
127 u32 abt[3];
128 u32 und[3];
129} ____cacheline_aligned;
130
55bdd694 131#ifndef CONFIG_CPU_V7M
ccea7a19 132static struct stack stacks[NR_CPUS];
55bdd694 133#endif
ccea7a19 134
1da177e4
LT
135char elf_platform[ELF_PLATFORM_SIZE];
136EXPORT_SYMBOL(elf_platform);
137
1da177e4
LT
138static const char *cpu_name;
139static const char *machine_name;
48ab7e09 140static char __initdata cmd_line[COMMAND_LINE_SIZE];
8ff1443c 141struct machine_desc *machine_desc __initdata;
1da177e4 142
1da177e4
LT
143static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
144#define ENDIANNESS ((char)endian_test.l)
145
146DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
147
148/*
149 * Standard memory resources
150 */
151static struct resource mem_res[] = {
740e518e
GKH
152 {
153 .name = "Video RAM",
154 .start = 0,
155 .end = 0,
156 .flags = IORESOURCE_MEM
157 },
158 {
a36d8e5b 159 .name = "Kernel code",
740e518e
GKH
160 .start = 0,
161 .end = 0,
162 .flags = IORESOURCE_MEM
163 },
164 {
165 .name = "Kernel data",
166 .start = 0,
167 .end = 0,
168 .flags = IORESOURCE_MEM
169 }
1da177e4
LT
170};
171
172#define video_ram mem_res[0]
173#define kernel_code mem_res[1]
174#define kernel_data mem_res[2]
175
176static struct resource io_res[] = {
740e518e
GKH
177 {
178 .name = "reserved",
179 .start = 0x3bc,
180 .end = 0x3be,
181 .flags = IORESOURCE_IO | IORESOURCE_BUSY
182 },
183 {
184 .name = "reserved",
185 .start = 0x378,
186 .end = 0x37f,
187 .flags = IORESOURCE_IO | IORESOURCE_BUSY
188 },
189 {
190 .name = "reserved",
191 .start = 0x278,
192 .end = 0x27f,
193 .flags = IORESOURCE_IO | IORESOURCE_BUSY
194 }
1da177e4
LT
195};
196
197#define lp0 io_res[0]
198#define lp1 io_res[1]
199#define lp2 io_res[2]
200
1da177e4
LT
201static const char *proc_arch[] = {
202 "undefined/unknown",
203 "3",
204 "4",
205 "4T",
206 "5",
207 "5T",
208 "5TE",
209 "5TEJ",
210 "6TEJ",
6b090a25 211 "7",
55bdd694 212 "7M",
1da177e4
LT
213 "?(12)",
214 "?(13)",
215 "?(14)",
216 "?(15)",
217 "?(16)",
218 "?(17)",
219};
220
55bdd694
CM
221#ifdef CONFIG_CPU_V7M
222static int __get_cpu_architecture(void)
223{
224 return CPU_ARCH_ARMv7M;
225}
226#else
2ecccf90 227static int __get_cpu_architecture(void)
1da177e4
LT
228{
229 int cpu_arch;
230
0ba8b9b2 231 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 232 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
233 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
234 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
235 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
236 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
237 if (cpu_arch)
238 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 239 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
240 unsigned int mmfr0;
241
242 /* Revised CPUID format. Read the Memory Model Feature
243 * Register 0 and check for VMSAv7 or PMSAv7 */
244 asm("mrc p15, 0, %0, c0, c1, 4"
245 : "=r" (mmfr0));
315cfe78
CM
246 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
247 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
248 cpu_arch = CPU_ARCH_ARMv7;
249 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
250 (mmfr0 & 0x000000f0) == 0x00000020)
251 cpu_arch = CPU_ARCH_ARMv6;
252 else
253 cpu_arch = CPU_ARCH_UNKNOWN;
254 } else
255 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
256
257 return cpu_arch;
258}
55bdd694 259#endif
1da177e4 260
2ecccf90
DM
261int __pure cpu_architecture(void)
262{
263 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
264
265 return __cpu_architecture;
266}
267
8925ec4c
WD
268static int cpu_has_aliasing_icache(unsigned int arch)
269{
270 int aliasing_icache;
271 unsigned int id_reg, num_sets, line_size;
272
7f94e9cc
WD
273 /* PIPT caches never alias. */
274 if (icache_is_pipt())
275 return 0;
276
8925ec4c
WD
277 /* arch specifies the register format */
278 switch (arch) {
279 case CPU_ARCH_ARMv7:
5fb31a96
LW
280 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
281 : /* No output operands */
8925ec4c 282 : "r" (1));
5fb31a96
LW
283 isb();
284 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
285 : "=r" (id_reg));
8925ec4c
WD
286 line_size = 4 << ((id_reg & 0x7) + 2);
287 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
288 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
289 break;
290 case CPU_ARCH_ARMv6:
291 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
292 break;
293 default:
294 /* I-cache aliases will be handled by D-cache aliasing code */
295 aliasing_icache = 0;
296 }
297
298 return aliasing_icache;
299}
300
c0e95878
RK
301static void __init cacheid_init(void)
302{
c0e95878
RK
303 unsigned int arch = cpu_architecture();
304
55bdd694
CM
305 if (arch == CPU_ARCH_ARMv7M) {
306 cacheid = 0;
307 } else if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 308 unsigned int cachetype = read_cpuid_cachetype();
b57ee99f
CM
309 if ((cachetype & (7 << 29)) == 4 << 29) {
310 /* ARMv7 register format */
72dc53ac 311 arch = CPU_ARCH_ARMv7;
b57ee99f 312 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
313 switch (cachetype & (3 << 14)) {
314 case (1 << 14):
b57ee99f 315 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
316 break;
317 case (3 << 14):
318 cacheid |= CACHEID_PIPT;
319 break;
320 }
8925ec4c 321 } else {
72dc53ac
WD
322 arch = CPU_ARCH_ARMv6;
323 if (cachetype & (1 << 23))
324 cacheid = CACHEID_VIPT_ALIASING;
325 else
326 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 327 }
72dc53ac
WD
328 if (cpu_has_aliasing_icache(arch))
329 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
330 } else {
331 cacheid = CACHEID_VIVT;
332 }
2b4ae1f1
RK
333
334 printk("CPU: %s data cache, %s instruction cache\n",
335 cache_is_vivt() ? "VIVT" :
336 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 337 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
338 cache_is_vivt() ? "VIVT" :
339 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 340 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 341 icache_is_pipt() ? "PIPT" :
2b4ae1f1 342 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
343}
344
1da177e4
LT
345/*
346 * These functions re-use the assembly code in head.S, which
347 * already provide the required functionality.
348 */
0f44ba1d 349extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 350
93c02ab4 351void __init early_print(const char *str, ...)
6fc31d54
RK
352{
353 extern void printascii(const char *);
354 char buf[256];
355 va_list ap;
356
357 va_start(ap, str);
358 vsnprintf(buf, sizeof(buf), str, ap);
359 va_end(ap);
360
361#ifdef CONFIG_DEBUG_LL
362 printascii(buf);
363#endif
364 printk("%s", buf);
365}
366
f159f4ed
TL
367static void __init feat_v6_fixup(void)
368{
369 int id = read_cpuid_id();
370
371 if ((id & 0xff0f0000) != 0x41070000)
372 return;
373
374 /*
375 * HWCAP_TLS is available only on 1136 r1p0 and later,
376 * see also kuser_get_tls_init.
377 */
378 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
379 elf_hwcap &= ~HWCAP_TLS;
380}
381
ccea7a19
RK
382/*
383 * cpu_init - initialise one CPU.
384 *
90f1e084 385 * cpu_init sets up the per-CPU stacks.
ccea7a19 386 */
36c5ed23 387void cpu_init(void)
ccea7a19 388{
55bdd694 389#ifndef CONFIG_CPU_V7M
ccea7a19
RK
390 unsigned int cpu = smp_processor_id();
391 struct stack *stk = &stacks[cpu];
392
393 if (cpu >= NR_CPUS) {
394 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
395 BUG();
396 }
397
14318efb
RH
398 /*
399 * This only works on resume and secondary cores. For booting on the
400 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
401 */
402 set_my_cpu_offset(per_cpu_offset(cpu));
403
b69874e4
RK
404 cpu_proc_init();
405
b86040a5
CM
406 /*
407 * Define the placement constraint for the inline asm directive below.
408 * In Thumb-2, msr with an immediate value is not allowed.
409 */
410#ifdef CONFIG_THUMB2_KERNEL
411#define PLC "r"
412#else
413#define PLC "I"
414#endif
415
ccea7a19
RK
416 /*
417 * setup stacks for re-entrant exception handlers
418 */
419 __asm__ (
420 "msr cpsr_c, %1\n\t"
b86040a5
CM
421 "add r14, %0, %2\n\t"
422 "mov sp, r14\n\t"
ccea7a19 423 "msr cpsr_c, %3\n\t"
b86040a5
CM
424 "add r14, %0, %4\n\t"
425 "mov sp, r14\n\t"
ccea7a19 426 "msr cpsr_c, %5\n\t"
b86040a5
CM
427 "add r14, %0, %6\n\t"
428 "mov sp, r14\n\t"
ccea7a19
RK
429 "msr cpsr_c, %7"
430 :
431 : "r" (stk),
b86040a5 432 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 433 "I" (offsetof(struct stack, irq[0])),
b86040a5 434 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 435 "I" (offsetof(struct stack, abt[0])),
b86040a5 436 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 437 "I" (offsetof(struct stack, und[0])),
b86040a5 438 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 439 : "r14");
55bdd694 440#endif
ccea7a19
RK
441}
442
eb50439b
WD
443int __cpu_logical_map[NR_CPUS];
444
445void __init smp_setup_processor_id(void)
446{
447 int i;
cb8cf4f8
LP
448 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
449 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
450
451 cpu_logical_map(0) = cpu;
cb8cf4f8 452 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
453 cpu_logical_map(i) = i == cpu ? 0 : i;
454
cb8cf4f8 455 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
456}
457
b69874e4
RK
458static void __init setup_processor(void)
459{
460 struct proc_info_list *list;
461
462 /*
463 * locate processor in the list of supported processor
464 * types. The linker builds this table for us from the
465 * entries in arch/arm/mm/proc-*.S
466 */
467 list = lookup_processor_type(read_cpuid_id());
468 if (!list) {
469 printk("CPU configuration botched (ID %08x), unable "
470 "to continue.\n", read_cpuid_id());
471 while (1);
472 }
473
474 cpu_name = list->cpu_name;
2ecccf90 475 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
476
477#ifdef MULTI_CPU
478 processor = *list->proc;
479#endif
480#ifdef MULTI_TLB
481 cpu_tlb = *list->tlb;
482#endif
483#ifdef MULTI_USER
484 cpu_user = *list->user;
485#endif
486#ifdef MULTI_CACHE
487 cpu_cache = *list->cache;
488#endif
489
490 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
491 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
492 proc_arch[cpu_architecture()], cr_alignment);
493
a34dbfb0
WD
494 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
495 list->arch_name, ENDIANNESS);
496 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
497 list->elf_name, ENDIANNESS);
b69874e4
RK
498 elf_hwcap = list->elf_hwcap;
499#ifndef CONFIG_ARM_THUMB
500 elf_hwcap &= ~HWCAP_THUMB;
501#endif
502
503 feat_v6_fixup();
504
505 cacheid_init();
506 cpu_init();
507}
508
93c02ab4 509void __init dump_machine_table(void)
1da177e4 510{
dce72dd0 511 struct machine_desc *p;
1da177e4 512
6291319d
GL
513 early_print("Available machine support:\n\nID (hex)\tNAME\n");
514 for_each_machine_desc(p)
dce72dd0 515 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 516
dce72dd0 517 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 518
dce72dd0
NP
519 while (true)
520 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
521}
522
a5d5f7da 523int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
3a669411 524{
4b5f32ce
NP
525 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
526
527 if (meminfo.nr_banks >= NR_BANKS) {
528 printk(KERN_CRIT "NR_BANKS too low, "
29a38193 529 "ignoring memory at 0x%08llx\n", (long long)start);
4b5f32ce
NP
530 return -EINVAL;
531 }
05f96ef1 532
3a669411
RK
533 /*
534 * Ensure that start/size are aligned to a page boundary.
535 * Size is appropriately rounded down, start is rounded up.
536 */
537 size -= start & ~PAGE_MASK;
05f96ef1 538 bank->start = PAGE_ALIGN(start);
e5ab8580
WD
539
540#ifndef CONFIG_LPAE
541 if (bank->start + size < bank->start) {
542 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
543 "32-bit physical address space\n", (long long)start);
544 /*
545 * To ensure bank->start + bank->size is representable in
546 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
547 * This means we lose a page after masking.
548 */
549 size = ULONG_MAX - bank->start;
550 }
551#endif
552
a5d5f7da 553 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
554
555 /*
556 * Check whether this memory region has non-zero size or
557 * invalid node number.
558 */
be370302 559 if (bank->size == 0)
4b5f32ce
NP
560 return -EINVAL;
561
562 meminfo.nr_banks++;
563 return 0;
3a669411
RK
564}
565
1da177e4
LT
566/*
567 * Pick out the memory size. We look for mem=size@start,
568 * where start and size are "size[KkMm]"
569 */
2b0d8c25 570static int __init early_mem(char *p)
1da177e4
LT
571{
572 static int usermem __initdata = 0;
a5d5f7da 573 phys_addr_t size;
f60892d3 574 phys_addr_t start;
2b0d8c25 575 char *endp;
1da177e4
LT
576
577 /*
578 * If the user specifies memory size, we
579 * blow away any automatically generated
580 * size.
581 */
582 if (usermem == 0) {
583 usermem = 1;
584 meminfo.nr_banks = 0;
585 }
586
587 start = PHYS_OFFSET;
2b0d8c25
JK
588 size = memparse(p, &endp);
589 if (*endp == '@')
590 start = memparse(endp + 1, NULL);
1da177e4 591
1c97b73e 592 arm_add_memory(start, size);
1da177e4 593
2b0d8c25 594 return 0;
1da177e4 595}
2b0d8c25 596early_param("mem", early_mem);
1da177e4 597
11b9369c 598static void __init request_standard_resources(struct machine_desc *mdesc)
1da177e4 599{
11b9369c 600 struct memblock_region *region;
1da177e4 601 struct resource *res;
1da177e4 602
37efe642
RK
603 kernel_code.start = virt_to_phys(_text);
604 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 605 kernel_data.start = virt_to_phys(_sdata);
37efe642 606 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 607
11b9369c 608 for_each_memblock(memory, region) {
1da177e4
LT
609 res = alloc_bootmem_low(sizeof(*res));
610 res->name = "System RAM";
11b9369c
DZ
611 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
612 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
613 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
614
615 request_resource(&iomem_resource, res);
616
617 if (kernel_code.start >= res->start &&
618 kernel_code.end <= res->end)
619 request_resource(res, &kernel_code);
620 if (kernel_data.start >= res->start &&
621 kernel_data.end <= res->end)
622 request_resource(res, &kernel_data);
623 }
624
625 if (mdesc->video_start) {
626 video_ram.start = mdesc->video_start;
627 video_ram.end = mdesc->video_end;
628 request_resource(&iomem_resource, &video_ram);
629 }
630
631 /*
632 * Some machines don't have the possibility of ever
633 * possessing lp0, lp1 or lp2
634 */
635 if (mdesc->reserve_lp0)
636 request_resource(&ioport_resource, &lp0);
637 if (mdesc->reserve_lp1)
638 request_resource(&ioport_resource, &lp1);
639 if (mdesc->reserve_lp2)
640 request_resource(&ioport_resource, &lp2);
641}
642
1da177e4
LT
643#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
644struct screen_info screen_info = {
645 .orig_video_lines = 30,
646 .orig_video_cols = 80,
647 .orig_video_mode = 0,
648 .orig_video_ega_bx = 0,
649 .orig_video_isVGA = 1,
650 .orig_video_points = 8
651};
4394c124 652#endif
1da177e4 653
1da177e4
LT
654static int __init customize_machine(void)
655{
656 /* customizes platform devices, or adds new ones */
8ff1443c
RK
657 if (machine_desc->init_machine)
658 machine_desc->init_machine();
1da177e4
LT
659 return 0;
660}
661arch_initcall(customize_machine);
662
90de4137
SG
663static int __init init_machine_late(void)
664{
665 if (machine_desc->init_late)
666 machine_desc->init_late();
667 return 0;
668}
669late_initcall(init_machine_late);
670
3c57fb43
MW
671#ifdef CONFIG_KEXEC
672static inline unsigned long long get_total_mem(void)
673{
674 unsigned long total;
675
676 total = max_low_pfn - min_low_pfn;
677 return total << PAGE_SHIFT;
678}
679
680/**
681 * reserve_crashkernel() - reserves memory are for crash kernel
682 *
683 * This function reserves memory area given in "crashkernel=" kernel command
684 * line parameter. The memory reserved is used by a dump capture kernel when
685 * primary kernel is crashing.
686 */
687static void __init reserve_crashkernel(void)
688{
689 unsigned long long crash_size, crash_base;
690 unsigned long long total_mem;
691 int ret;
692
693 total_mem = get_total_mem();
694 ret = parse_crashkernel(boot_command_line, total_mem,
695 &crash_size, &crash_base);
696 if (ret)
697 return;
698
699 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
700 if (ret < 0) {
701 printk(KERN_WARNING "crashkernel reservation failed - "
702 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
703 return;
704 }
705
706 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
707 "for crashkernel (System RAM: %ldMB)\n",
708 (unsigned long)(crash_size >> 20),
709 (unsigned long)(crash_base >> 20),
710 (unsigned long)(total_mem >> 20));
711
712 crashk_res.start = crash_base;
713 crashk_res.end = crash_base + crash_size - 1;
714 insert_resource(&iomem_resource, &crashk_res);
715}
716#else
717static inline void reserve_crashkernel(void) {}
718#endif /* CONFIG_KEXEC */
719
27a3f0e9
NP
720static int __init meminfo_cmp(const void *_a, const void *_b)
721{
722 const struct membank *a = _a, *b = _b;
723 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
724 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
725}
6291319d 726
4588c34d
DM
727void __init hyp_mode_check(void)
728{
729#ifdef CONFIG_ARM_VIRT_EXT
730 if (is_hyp_mode_available()) {
731 pr_info("CPU: All CPU(s) started in HYP mode.\n");
732 pr_info("CPU: Virtualization extensions available.\n");
733 } else if (is_hyp_mode_mismatched()) {
734 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
735 __boot_cpu_mode & MODE_MASK);
736 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
737 } else
738 pr_info("CPU: All CPU(s) started in SVC mode.\n");
739#endif
740}
741
6291319d
GL
742void __init setup_arch(char **cmdline_p)
743{
744 struct machine_desc *mdesc;
745
6291319d 746 setup_processor();
93c02ab4
GL
747 mdesc = setup_machine_fdt(__atags_pointer);
748 if (!mdesc)
b8b499c8 749 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
750 machine_desc = mdesc;
751 machine_name = mdesc->name;
752
c7909509
MS
753 setup_dma_zone(mdesc);
754
b44c350d
RK
755 if (mdesc->restart_mode)
756 reboot_setup(&mdesc->restart_mode);
6291319d 757
37efe642
RK
758 init_mm.start_code = (unsigned long) _text;
759 init_mm.end_code = (unsigned long) _etext;
760 init_mm.end_data = (unsigned long) _edata;
761 init_mm.brk = (unsigned long) _end;
1da177e4 762
48ab7e09
JK
763 /* populate cmd_line too for later use, preserving boot_command_line */
764 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
765 *cmdline_p = cmd_line;
2b0d8c25
JK
766
767 parse_early_param();
768
27a3f0e9 769 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
0371d3f7 770 sanity_check_meminfo();
8d717a52 771 arm_memblock_init(&meminfo, mdesc);
2778f620 772
4b5f32ce 773 paging_init(mdesc);
11b9369c 774 request_standard_resources(mdesc);
1da177e4 775
a528721d
RK
776 if (mdesc->restart)
777 arm_pm_restart = mdesc->restart;
778
93c02ab4
GL
779 unflatten_device_tree();
780
5587164e 781 arm_dt_init_cpu_maps();
7bbb7940 782#ifdef CONFIG_SMP
abcee5fb
MZ
783 if (is_smp()) {
784 smp_set_ops(mdesc->smp);
f00ec48f 785 smp_init_cpus();
abcee5fb 786 }
7bbb7940 787#endif
4588c34d
DM
788
789 if (!is_smp())
790 hyp_mode_check();
791
3c57fb43 792 reserve_crashkernel();
7bbb7940 793
bc581770 794 tcm_init();
ccea7a19 795
52108641 796#ifdef CONFIG_MULTI_IRQ_HANDLER
797 handle_arch_irq = mdesc->handle_irq;
798#endif
1da177e4
LT
799
800#ifdef CONFIG_VT
801#if defined(CONFIG_VGA_CONSOLE)
802 conswitchp = &vga_con;
803#elif defined(CONFIG_DUMMY_CONSOLE)
804 conswitchp = &dummy_con;
805#endif
806#endif
dec12e62
RK
807
808 if (mdesc->init_early)
809 mdesc->init_early();
1da177e4
LT
810}
811
812
813static int __init topology_init(void)
814{
815 int cpu;
816
66fb8bd2
RK
817 for_each_possible_cpu(cpu) {
818 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
819 cpuinfo->cpu.hotpluggable = 1;
820 register_cpu(&cpuinfo->cpu, cpu);
821 }
1da177e4
LT
822
823 return 0;
824}
1da177e4
LT
825subsys_initcall(topology_init);
826
e119bfff
RK
827#ifdef CONFIG_HAVE_PROC_CPU
828static int __init proc_cpu_init(void)
829{
830 struct proc_dir_entry *res;
831
832 res = proc_mkdir("cpu", NULL);
833 if (!res)
834 return -ENOMEM;
835 return 0;
836}
837fs_initcall(proc_cpu_init);
838#endif
839
1da177e4
LT
840static const char *hwcap_str[] = {
841 "swp",
842 "half",
843 "thumb",
844 "26bit",
845 "fastmult",
846 "fpa",
847 "vfp",
848 "edsp",
849 "java",
8f7f9435 850 "iwmmxt",
99e4a6dd 851 "crunch",
4369ae16 852 "thumbee",
2bedbdf4 853 "neon",
7279dc3e
CM
854 "vfpv3",
855 "vfpv3d16",
254cdf8e
WD
856 "tls",
857 "vfpv4",
858 "idiva",
859 "idivt",
1da177e4
LT
860 NULL
861};
862
1da177e4
LT
863static int c_show(struct seq_file *m, void *v)
864{
b4b8f770
LP
865 int i, j;
866 u32 cpuid;
1da177e4 867
1da177e4 868 for_each_online_cpu(i) {
15559722
RK
869 /*
870 * glibc reads /proc/cpuinfo to determine the number of
871 * online processors, looking for lines beginning with
872 * "processor". Give glibc what it expects.
873 */
874 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
875 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
876 seq_printf(m, "model name\t: %s rev %d (%s)\n",
877 cpu_name, cpuid & 15, elf_platform);
878
879#if defined(CONFIG_SMP)
880 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1da177e4
LT
881 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
882 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
b4b8f770
LP
883#else
884 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
885 loops_per_jiffy / (500000/HZ),
886 (loops_per_jiffy / (5000/HZ)) % 100);
1da177e4 887#endif
b4b8f770
LP
888 /* dump out the processor features */
889 seq_puts(m, "Features\t: ");
1da177e4 890
b4b8f770
LP
891 for (j = 0; hwcap_str[j]; j++)
892 if (elf_hwcap & (1 << j))
893 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 894
b4b8f770
LP
895 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
896 seq_printf(m, "CPU architecture: %s\n",
897 proc_arch[cpu_architecture()]);
1da177e4 898
b4b8f770
LP
899 if ((cpuid & 0x0008f000) == 0x00000000) {
900 /* pre-ARM7 */
901 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 902 } else {
b4b8f770
LP
903 if ((cpuid & 0x0008f000) == 0x00007000) {
904 /* ARM7 */
905 seq_printf(m, "CPU variant\t: 0x%02x\n",
906 (cpuid >> 16) & 127);
907 } else {
908 /* post-ARM7 */
909 seq_printf(m, "CPU variant\t: 0x%x\n",
910 (cpuid >> 20) & 15);
911 }
912 seq_printf(m, "CPU part\t: 0x%03x\n",
913 (cpuid >> 4) & 0xfff);
1da177e4 914 }
b4b8f770 915 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 916 }
1da177e4
LT
917
918 seq_printf(m, "Hardware\t: %s\n", machine_name);
919 seq_printf(m, "Revision\t: %04x\n", system_rev);
920 seq_printf(m, "Serial\t\t: %08x%08x\n",
921 system_serial_high, system_serial_low);
922
923 return 0;
924}
925
926static void *c_start(struct seq_file *m, loff_t *pos)
927{
928 return *pos < 1 ? (void *)1 : NULL;
929}
930
931static void *c_next(struct seq_file *m, void *v, loff_t *pos)
932{
933 ++*pos;
934 return NULL;
935}
936
937static void c_stop(struct seq_file *m, void *v)
938{
939}
940
2ffd6e18 941const struct seq_operations cpuinfo_op = {
1da177e4
LT
942 .start = c_start,
943 .next = c_next,
944 .stop = c_stop,
945 .show = c_show
946};
This page took 0.643327 seconds and 5 git commands to generate.