ARM: 7582/2: rename kvm_seq to vmalloc_seq so to avoid confusion with KVM
[deliverable/linux.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ecea4ab6 10#include <linux/export.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
1da177e4 21#include <linux/init.h>
3c57fb43 22#include <linux/kexec.h>
93c02ab4 23#include <linux/of_fdt.h>
1da177e4
LT
24#include <linux/cpu.h>
25#include <linux/interrupt.h>
7bbb7940 26#include <linux/smp.h>
e119bfff 27#include <linux/proc_fs.h>
2778f620 28#include <linux/memblock.h>
2ecccf90
DM
29#include <linux/bug.h>
30#include <linux/compiler.h>
27a3f0e9 31#include <linux/sort.h>
1da177e4 32
b86040a5 33#include <asm/unified.h>
15d07dc9 34#include <asm/cp15.h>
1da177e4 35#include <asm/cpu.h>
0ba8b9b2 36#include <asm/cputype.h>
1da177e4 37#include <asm/elf.h>
1da177e4 38#include <asm/procinfo.h>
37efe642 39#include <asm/sections.h>
1da177e4 40#include <asm/setup.h>
f00ec48f 41#include <asm/smp_plat.h>
1da177e4
LT
42#include <asm/mach-types.h>
43#include <asm/cacheflush.h>
46097c7d 44#include <asm/cachetype.h>
1da177e4
LT
45#include <asm/tlbflush.h>
46
93c02ab4 47#include <asm/prom.h>
1da177e4
LT
48#include <asm/mach/arch.h>
49#include <asm/mach/irq.h>
50#include <asm/mach/time.h>
9f97da78
DH
51#include <asm/system_info.h>
52#include <asm/system_misc.h>
5cbad0eb 53#include <asm/traps.h>
bff595c1 54#include <asm/unwind.h>
1c16d242 55#include <asm/memblock.h>
4588c34d 56#include <asm/virt.h>
1da177e4 57
4cd9d6f7 58#include "atags.h"
bc581770 59#include "tcm.h"
0fc1c832 60
1da177e4
LT
61
62#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
63char fpe_type[8];
64
65static int __init fpe_setup(char *line)
66{
67 memcpy(fpe_type, line, 8);
68 return 1;
69}
70
71__setup("fpe=", fpe_setup);
72#endif
73
4b5f32ce 74extern void paging_init(struct machine_desc *desc);
0371d3f7 75extern void sanity_check_meminfo(void);
1da177e4 76extern void reboot_setup(char *str);
c7909509 77extern void setup_dma_zone(struct machine_desc *desc);
1da177e4
LT
78
79unsigned int processor_id;
c18f6581 80EXPORT_SYMBOL(processor_id);
0385ebc0 81unsigned int __machine_arch_type __read_mostly;
1da177e4 82EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 83unsigned int cacheid __read_mostly;
c0e95878 84EXPORT_SYMBOL(cacheid);
1da177e4 85
9d20fdd5
BG
86unsigned int __atags_pointer __initdata;
87
1da177e4
LT
88unsigned int system_rev;
89EXPORT_SYMBOL(system_rev);
90
91unsigned int system_serial_low;
92EXPORT_SYMBOL(system_serial_low);
93
94unsigned int system_serial_high;
95EXPORT_SYMBOL(system_serial_high);
96
0385ebc0 97unsigned int elf_hwcap __read_mostly;
1da177e4
LT
98EXPORT_SYMBOL(elf_hwcap);
99
100
101#ifdef MULTI_CPU
0385ebc0 102struct processor processor __read_mostly;
1da177e4
LT
103#endif
104#ifdef MULTI_TLB
0385ebc0 105struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
106#endif
107#ifdef MULTI_USER
0385ebc0 108struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
109#endif
110#ifdef MULTI_CACHE
0385ebc0 111struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 112#endif
953233dc 113#ifdef CONFIG_OUTER_CACHE
0385ebc0 114struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 115EXPORT_SYMBOL(outer_cache);
953233dc 116#endif
1da177e4 117
2ecccf90
DM
118/*
119 * Cached cpu_architecture() result for use by assembler code.
120 * C code should use the cpu_architecture() function instead of accessing this
121 * variable directly.
122 */
123int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
124
ccea7a19
RK
125struct stack {
126 u32 irq[3];
127 u32 abt[3];
128 u32 und[3];
129} ____cacheline_aligned;
130
131static struct stack stacks[NR_CPUS];
132
1da177e4
LT
133char elf_platform[ELF_PLATFORM_SIZE];
134EXPORT_SYMBOL(elf_platform);
135
1da177e4
LT
136static const char *cpu_name;
137static const char *machine_name;
48ab7e09 138static char __initdata cmd_line[COMMAND_LINE_SIZE];
8ff1443c 139struct machine_desc *machine_desc __initdata;
1da177e4 140
1da177e4
LT
141static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
142#define ENDIANNESS ((char)endian_test.l)
143
144DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
145
146/*
147 * Standard memory resources
148 */
149static struct resource mem_res[] = {
740e518e
GKH
150 {
151 .name = "Video RAM",
152 .start = 0,
153 .end = 0,
154 .flags = IORESOURCE_MEM
155 },
156 {
a36d8e5b 157 .name = "Kernel code",
740e518e
GKH
158 .start = 0,
159 .end = 0,
160 .flags = IORESOURCE_MEM
161 },
162 {
163 .name = "Kernel data",
164 .start = 0,
165 .end = 0,
166 .flags = IORESOURCE_MEM
167 }
1da177e4
LT
168};
169
170#define video_ram mem_res[0]
171#define kernel_code mem_res[1]
172#define kernel_data mem_res[2]
173
174static struct resource io_res[] = {
740e518e
GKH
175 {
176 .name = "reserved",
177 .start = 0x3bc,
178 .end = 0x3be,
179 .flags = IORESOURCE_IO | IORESOURCE_BUSY
180 },
181 {
182 .name = "reserved",
183 .start = 0x378,
184 .end = 0x37f,
185 .flags = IORESOURCE_IO | IORESOURCE_BUSY
186 },
187 {
188 .name = "reserved",
189 .start = 0x278,
190 .end = 0x27f,
191 .flags = IORESOURCE_IO | IORESOURCE_BUSY
192 }
1da177e4
LT
193};
194
195#define lp0 io_res[0]
196#define lp1 io_res[1]
197#define lp2 io_res[2]
198
1da177e4
LT
199static const char *proc_arch[] = {
200 "undefined/unknown",
201 "3",
202 "4",
203 "4T",
204 "5",
205 "5T",
206 "5TE",
207 "5TEJ",
208 "6TEJ",
6b090a25 209 "7",
1da177e4
LT
210 "?(11)",
211 "?(12)",
212 "?(13)",
213 "?(14)",
214 "?(15)",
215 "?(16)",
216 "?(17)",
217};
218
2ecccf90 219static int __get_cpu_architecture(void)
1da177e4
LT
220{
221 int cpu_arch;
222
0ba8b9b2 223 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 224 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
225 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
226 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
227 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
228 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
229 if (cpu_arch)
230 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 231 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
232 unsigned int mmfr0;
233
234 /* Revised CPUID format. Read the Memory Model Feature
235 * Register 0 and check for VMSAv7 or PMSAv7 */
236 asm("mrc p15, 0, %0, c0, c1, 4"
237 : "=r" (mmfr0));
315cfe78
CM
238 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
239 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
240 cpu_arch = CPU_ARCH_ARMv7;
241 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
242 (mmfr0 & 0x000000f0) == 0x00000020)
243 cpu_arch = CPU_ARCH_ARMv6;
244 else
245 cpu_arch = CPU_ARCH_UNKNOWN;
246 } else
247 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
248
249 return cpu_arch;
250}
251
2ecccf90
DM
252int __pure cpu_architecture(void)
253{
254 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
255
256 return __cpu_architecture;
257}
258
8925ec4c
WD
259static int cpu_has_aliasing_icache(unsigned int arch)
260{
261 int aliasing_icache;
262 unsigned int id_reg, num_sets, line_size;
263
7f94e9cc
WD
264 /* PIPT caches never alias. */
265 if (icache_is_pipt())
266 return 0;
267
8925ec4c
WD
268 /* arch specifies the register format */
269 switch (arch) {
270 case CPU_ARCH_ARMv7:
5fb31a96
LW
271 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
272 : /* No output operands */
8925ec4c 273 : "r" (1));
5fb31a96
LW
274 isb();
275 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
276 : "=r" (id_reg));
8925ec4c
WD
277 line_size = 4 << ((id_reg & 0x7) + 2);
278 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
279 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
280 break;
281 case CPU_ARCH_ARMv6:
282 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
283 break;
284 default:
285 /* I-cache aliases will be handled by D-cache aliasing code */
286 aliasing_icache = 0;
287 }
288
289 return aliasing_icache;
290}
291
c0e95878
RK
292static void __init cacheid_init(void)
293{
294 unsigned int cachetype = read_cpuid_cachetype();
295 unsigned int arch = cpu_architecture();
296
b57ee99f
CM
297 if (arch >= CPU_ARCH_ARMv6) {
298 if ((cachetype & (7 << 29)) == 4 << 29) {
299 /* ARMv7 register format */
72dc53ac 300 arch = CPU_ARCH_ARMv7;
b57ee99f 301 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
302 switch (cachetype & (3 << 14)) {
303 case (1 << 14):
b57ee99f 304 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
305 break;
306 case (3 << 14):
307 cacheid |= CACHEID_PIPT;
308 break;
309 }
8925ec4c 310 } else {
72dc53ac
WD
311 arch = CPU_ARCH_ARMv6;
312 if (cachetype & (1 << 23))
313 cacheid = CACHEID_VIPT_ALIASING;
314 else
315 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 316 }
72dc53ac
WD
317 if (cpu_has_aliasing_icache(arch))
318 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
319 } else {
320 cacheid = CACHEID_VIVT;
321 }
2b4ae1f1
RK
322
323 printk("CPU: %s data cache, %s instruction cache\n",
324 cache_is_vivt() ? "VIVT" :
325 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 326 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
327 cache_is_vivt() ? "VIVT" :
328 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 329 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 330 icache_is_pipt() ? "PIPT" :
2b4ae1f1 331 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
332}
333
1da177e4
LT
334/*
335 * These functions re-use the assembly code in head.S, which
336 * already provide the required functionality.
337 */
0f44ba1d 338extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 339
93c02ab4 340void __init early_print(const char *str, ...)
6fc31d54
RK
341{
342 extern void printascii(const char *);
343 char buf[256];
344 va_list ap;
345
346 va_start(ap, str);
347 vsnprintf(buf, sizeof(buf), str, ap);
348 va_end(ap);
349
350#ifdef CONFIG_DEBUG_LL
351 printascii(buf);
352#endif
353 printk("%s", buf);
354}
355
f159f4ed
TL
356static void __init feat_v6_fixup(void)
357{
358 int id = read_cpuid_id();
359
360 if ((id & 0xff0f0000) != 0x41070000)
361 return;
362
363 /*
364 * HWCAP_TLS is available only on 1136 r1p0 and later,
365 * see also kuser_get_tls_init.
366 */
367 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
368 elf_hwcap &= ~HWCAP_TLS;
369}
370
ccea7a19
RK
371/*
372 * cpu_init - initialise one CPU.
373 *
90f1e084 374 * cpu_init sets up the per-CPU stacks.
ccea7a19 375 */
36c5ed23 376void cpu_init(void)
ccea7a19
RK
377{
378 unsigned int cpu = smp_processor_id();
379 struct stack *stk = &stacks[cpu];
380
381 if (cpu >= NR_CPUS) {
382 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
383 BUG();
384 }
385
b69874e4
RK
386 cpu_proc_init();
387
b86040a5
CM
388 /*
389 * Define the placement constraint for the inline asm directive below.
390 * In Thumb-2, msr with an immediate value is not allowed.
391 */
392#ifdef CONFIG_THUMB2_KERNEL
393#define PLC "r"
394#else
395#define PLC "I"
396#endif
397
ccea7a19
RK
398 /*
399 * setup stacks for re-entrant exception handlers
400 */
401 __asm__ (
402 "msr cpsr_c, %1\n\t"
b86040a5
CM
403 "add r14, %0, %2\n\t"
404 "mov sp, r14\n\t"
ccea7a19 405 "msr cpsr_c, %3\n\t"
b86040a5
CM
406 "add r14, %0, %4\n\t"
407 "mov sp, r14\n\t"
ccea7a19 408 "msr cpsr_c, %5\n\t"
b86040a5
CM
409 "add r14, %0, %6\n\t"
410 "mov sp, r14\n\t"
ccea7a19
RK
411 "msr cpsr_c, %7"
412 :
413 : "r" (stk),
b86040a5 414 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 415 "I" (offsetof(struct stack, irq[0])),
b86040a5 416 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 417 "I" (offsetof(struct stack, abt[0])),
b86040a5 418 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 419 "I" (offsetof(struct stack, und[0])),
b86040a5 420 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 421 : "r14");
ccea7a19
RK
422}
423
eb50439b
WD
424int __cpu_logical_map[NR_CPUS];
425
426void __init smp_setup_processor_id(void)
427{
428 int i;
cb8cf4f8
LP
429 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
430 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
431
432 cpu_logical_map(0) = cpu;
cb8cf4f8 433 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
434 cpu_logical_map(i) = i == cpu ? 0 : i;
435
cb8cf4f8 436 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
437}
438
b69874e4
RK
439static void __init setup_processor(void)
440{
441 struct proc_info_list *list;
442
443 /*
444 * locate processor in the list of supported processor
445 * types. The linker builds this table for us from the
446 * entries in arch/arm/mm/proc-*.S
447 */
448 list = lookup_processor_type(read_cpuid_id());
449 if (!list) {
450 printk("CPU configuration botched (ID %08x), unable "
451 "to continue.\n", read_cpuid_id());
452 while (1);
453 }
454
455 cpu_name = list->cpu_name;
2ecccf90 456 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
457
458#ifdef MULTI_CPU
459 processor = *list->proc;
460#endif
461#ifdef MULTI_TLB
462 cpu_tlb = *list->tlb;
463#endif
464#ifdef MULTI_USER
465 cpu_user = *list->user;
466#endif
467#ifdef MULTI_CACHE
468 cpu_cache = *list->cache;
469#endif
470
471 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
472 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
473 proc_arch[cpu_architecture()], cr_alignment);
474
a34dbfb0
WD
475 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
476 list->arch_name, ENDIANNESS);
477 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
478 list->elf_name, ENDIANNESS);
b69874e4
RK
479 elf_hwcap = list->elf_hwcap;
480#ifndef CONFIG_ARM_THUMB
481 elf_hwcap &= ~HWCAP_THUMB;
482#endif
483
484 feat_v6_fixup();
485
486 cacheid_init();
487 cpu_init();
488}
489
93c02ab4 490void __init dump_machine_table(void)
1da177e4 491{
dce72dd0 492 struct machine_desc *p;
1da177e4 493
6291319d
GL
494 early_print("Available machine support:\n\nID (hex)\tNAME\n");
495 for_each_machine_desc(p)
dce72dd0 496 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 497
dce72dd0 498 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 499
dce72dd0
NP
500 while (true)
501 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
502}
503
a5d5f7da 504int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
3a669411 505{
4b5f32ce
NP
506 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
507
508 if (meminfo.nr_banks >= NR_BANKS) {
509 printk(KERN_CRIT "NR_BANKS too low, "
29a38193 510 "ignoring memory at 0x%08llx\n", (long long)start);
4b5f32ce
NP
511 return -EINVAL;
512 }
05f96ef1 513
3a669411
RK
514 /*
515 * Ensure that start/size are aligned to a page boundary.
516 * Size is appropriately rounded down, start is rounded up.
517 */
518 size -= start & ~PAGE_MASK;
05f96ef1 519 bank->start = PAGE_ALIGN(start);
e5ab8580
WD
520
521#ifndef CONFIG_LPAE
522 if (bank->start + size < bank->start) {
523 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
524 "32-bit physical address space\n", (long long)start);
525 /*
526 * To ensure bank->start + bank->size is representable in
527 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
528 * This means we lose a page after masking.
529 */
530 size = ULONG_MAX - bank->start;
531 }
532#endif
533
a5d5f7da 534 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
535
536 /*
537 * Check whether this memory region has non-zero size or
538 * invalid node number.
539 */
be370302 540 if (bank->size == 0)
4b5f32ce
NP
541 return -EINVAL;
542
543 meminfo.nr_banks++;
544 return 0;
3a669411
RK
545}
546
1da177e4
LT
547/*
548 * Pick out the memory size. We look for mem=size@start,
549 * where start and size are "size[KkMm]"
550 */
2b0d8c25 551static int __init early_mem(char *p)
1da177e4
LT
552{
553 static int usermem __initdata = 0;
a5d5f7da 554 phys_addr_t size;
f60892d3 555 phys_addr_t start;
2b0d8c25 556 char *endp;
1da177e4
LT
557
558 /*
559 * If the user specifies memory size, we
560 * blow away any automatically generated
561 * size.
562 */
563 if (usermem == 0) {
564 usermem = 1;
565 meminfo.nr_banks = 0;
566 }
567
568 start = PHYS_OFFSET;
2b0d8c25
JK
569 size = memparse(p, &endp);
570 if (*endp == '@')
571 start = memparse(endp + 1, NULL);
1da177e4 572
1c97b73e 573 arm_add_memory(start, size);
1da177e4 574
2b0d8c25 575 return 0;
1da177e4 576}
2b0d8c25 577early_param("mem", early_mem);
1da177e4 578
11b9369c 579static void __init request_standard_resources(struct machine_desc *mdesc)
1da177e4 580{
11b9369c 581 struct memblock_region *region;
1da177e4 582 struct resource *res;
1da177e4 583
37efe642
RK
584 kernel_code.start = virt_to_phys(_text);
585 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 586 kernel_data.start = virt_to_phys(_sdata);
37efe642 587 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 588
11b9369c 589 for_each_memblock(memory, region) {
1da177e4
LT
590 res = alloc_bootmem_low(sizeof(*res));
591 res->name = "System RAM";
11b9369c
DZ
592 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
593 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
594 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
595
596 request_resource(&iomem_resource, res);
597
598 if (kernel_code.start >= res->start &&
599 kernel_code.end <= res->end)
600 request_resource(res, &kernel_code);
601 if (kernel_data.start >= res->start &&
602 kernel_data.end <= res->end)
603 request_resource(res, &kernel_data);
604 }
605
606 if (mdesc->video_start) {
607 video_ram.start = mdesc->video_start;
608 video_ram.end = mdesc->video_end;
609 request_resource(&iomem_resource, &video_ram);
610 }
611
612 /*
613 * Some machines don't have the possibility of ever
614 * possessing lp0, lp1 or lp2
615 */
616 if (mdesc->reserve_lp0)
617 request_resource(&ioport_resource, &lp0);
618 if (mdesc->reserve_lp1)
619 request_resource(&ioport_resource, &lp1);
620 if (mdesc->reserve_lp2)
621 request_resource(&ioport_resource, &lp2);
622}
623
1da177e4
LT
624#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
625struct screen_info screen_info = {
626 .orig_video_lines = 30,
627 .orig_video_cols = 80,
628 .orig_video_mode = 0,
629 .orig_video_ega_bx = 0,
630 .orig_video_isVGA = 1,
631 .orig_video_points = 8
632};
4394c124 633#endif
1da177e4 634
1da177e4
LT
635static int __init customize_machine(void)
636{
637 /* customizes platform devices, or adds new ones */
8ff1443c
RK
638 if (machine_desc->init_machine)
639 machine_desc->init_machine();
1da177e4
LT
640 return 0;
641}
642arch_initcall(customize_machine);
643
90de4137
SG
644static int __init init_machine_late(void)
645{
646 if (machine_desc->init_late)
647 machine_desc->init_late();
648 return 0;
649}
650late_initcall(init_machine_late);
651
3c57fb43
MW
652#ifdef CONFIG_KEXEC
653static inline unsigned long long get_total_mem(void)
654{
655 unsigned long total;
656
657 total = max_low_pfn - min_low_pfn;
658 return total << PAGE_SHIFT;
659}
660
661/**
662 * reserve_crashkernel() - reserves memory are for crash kernel
663 *
664 * This function reserves memory area given in "crashkernel=" kernel command
665 * line parameter. The memory reserved is used by a dump capture kernel when
666 * primary kernel is crashing.
667 */
668static void __init reserve_crashkernel(void)
669{
670 unsigned long long crash_size, crash_base;
671 unsigned long long total_mem;
672 int ret;
673
674 total_mem = get_total_mem();
675 ret = parse_crashkernel(boot_command_line, total_mem,
676 &crash_size, &crash_base);
677 if (ret)
678 return;
679
680 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
681 if (ret < 0) {
682 printk(KERN_WARNING "crashkernel reservation failed - "
683 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
684 return;
685 }
686
687 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
688 "for crashkernel (System RAM: %ldMB)\n",
689 (unsigned long)(crash_size >> 20),
690 (unsigned long)(crash_base >> 20),
691 (unsigned long)(total_mem >> 20));
692
693 crashk_res.start = crash_base;
694 crashk_res.end = crash_base + crash_size - 1;
695 insert_resource(&iomem_resource, &crashk_res);
696}
697#else
698static inline void reserve_crashkernel(void) {}
699#endif /* CONFIG_KEXEC */
700
27a3f0e9
NP
701static int __init meminfo_cmp(const void *_a, const void *_b)
702{
703 const struct membank *a = _a, *b = _b;
704 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
705 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
706}
6291319d 707
4588c34d
DM
708void __init hyp_mode_check(void)
709{
710#ifdef CONFIG_ARM_VIRT_EXT
711 if (is_hyp_mode_available()) {
712 pr_info("CPU: All CPU(s) started in HYP mode.\n");
713 pr_info("CPU: Virtualization extensions available.\n");
714 } else if (is_hyp_mode_mismatched()) {
715 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
716 __boot_cpu_mode & MODE_MASK);
717 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
718 } else
719 pr_info("CPU: All CPU(s) started in SVC mode.\n");
720#endif
721}
722
6291319d
GL
723void __init setup_arch(char **cmdline_p)
724{
725 struct machine_desc *mdesc;
726
6291319d 727 setup_processor();
93c02ab4
GL
728 mdesc = setup_machine_fdt(__atags_pointer);
729 if (!mdesc)
aa783b6f 730 mdesc = setup_machine_tags(__atags_pointer, machine_arch_type);
6291319d
GL
731 machine_desc = mdesc;
732 machine_name = mdesc->name;
733
c7909509
MS
734 setup_dma_zone(mdesc);
735
b44c350d
RK
736 if (mdesc->restart_mode)
737 reboot_setup(&mdesc->restart_mode);
6291319d 738
37efe642
RK
739 init_mm.start_code = (unsigned long) _text;
740 init_mm.end_code = (unsigned long) _etext;
741 init_mm.end_data = (unsigned long) _edata;
742 init_mm.brk = (unsigned long) _end;
1da177e4 743
48ab7e09
JK
744 /* populate cmd_line too for later use, preserving boot_command_line */
745 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
746 *cmdline_p = cmd_line;
2b0d8c25
JK
747
748 parse_early_param();
749
27a3f0e9 750 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
0371d3f7 751 sanity_check_meminfo();
8d717a52 752 arm_memblock_init(&meminfo, mdesc);
2778f620 753
4b5f32ce 754 paging_init(mdesc);
11b9369c 755 request_standard_resources(mdesc);
1da177e4 756
a528721d
RK
757 if (mdesc->restart)
758 arm_pm_restart = mdesc->restart;
759
93c02ab4
GL
760 unflatten_device_tree();
761
5587164e 762 arm_dt_init_cpu_maps();
7bbb7940 763#ifdef CONFIG_SMP
abcee5fb
MZ
764 if (is_smp()) {
765 smp_set_ops(mdesc->smp);
f00ec48f 766 smp_init_cpus();
abcee5fb 767 }
7bbb7940 768#endif
4588c34d
DM
769
770 if (!is_smp())
771 hyp_mode_check();
772
3c57fb43 773 reserve_crashkernel();
7bbb7940 774
bc581770 775 tcm_init();
ccea7a19 776
52108641 777#ifdef CONFIG_MULTI_IRQ_HANDLER
778 handle_arch_irq = mdesc->handle_irq;
779#endif
1da177e4
LT
780
781#ifdef CONFIG_VT
782#if defined(CONFIG_VGA_CONSOLE)
783 conswitchp = &vga_con;
784#elif defined(CONFIG_DUMMY_CONSOLE)
785 conswitchp = &dummy_con;
786#endif
787#endif
dec12e62
RK
788
789 if (mdesc->init_early)
790 mdesc->init_early();
1da177e4
LT
791}
792
793
794static int __init topology_init(void)
795{
796 int cpu;
797
66fb8bd2
RK
798 for_each_possible_cpu(cpu) {
799 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
800 cpuinfo->cpu.hotpluggable = 1;
801 register_cpu(&cpuinfo->cpu, cpu);
802 }
1da177e4
LT
803
804 return 0;
805}
1da177e4
LT
806subsys_initcall(topology_init);
807
e119bfff
RK
808#ifdef CONFIG_HAVE_PROC_CPU
809static int __init proc_cpu_init(void)
810{
811 struct proc_dir_entry *res;
812
813 res = proc_mkdir("cpu", NULL);
814 if (!res)
815 return -ENOMEM;
816 return 0;
817}
818fs_initcall(proc_cpu_init);
819#endif
820
1da177e4
LT
821static const char *hwcap_str[] = {
822 "swp",
823 "half",
824 "thumb",
825 "26bit",
826 "fastmult",
827 "fpa",
828 "vfp",
829 "edsp",
830 "java",
8f7f9435 831 "iwmmxt",
99e4a6dd 832 "crunch",
4369ae16 833 "thumbee",
2bedbdf4 834 "neon",
7279dc3e
CM
835 "vfpv3",
836 "vfpv3d16",
254cdf8e
WD
837 "tls",
838 "vfpv4",
839 "idiva",
840 "idivt",
1da177e4
LT
841 NULL
842};
843
1da177e4
LT
844static int c_show(struct seq_file *m, void *v)
845{
b4b8f770
LP
846 int i, j;
847 u32 cpuid;
1da177e4 848
1da177e4 849 for_each_online_cpu(i) {
15559722
RK
850 /*
851 * glibc reads /proc/cpuinfo to determine the number of
852 * online processors, looking for lines beginning with
853 * "processor". Give glibc what it expects.
854 */
855 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
856 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
857 seq_printf(m, "model name\t: %s rev %d (%s)\n",
858 cpu_name, cpuid & 15, elf_platform);
859
860#if defined(CONFIG_SMP)
861 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1da177e4
LT
862 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
863 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
b4b8f770
LP
864#else
865 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
866 loops_per_jiffy / (500000/HZ),
867 (loops_per_jiffy / (5000/HZ)) % 100);
1da177e4 868#endif
b4b8f770
LP
869 /* dump out the processor features */
870 seq_puts(m, "Features\t: ");
1da177e4 871
b4b8f770
LP
872 for (j = 0; hwcap_str[j]; j++)
873 if (elf_hwcap & (1 << j))
874 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 875
b4b8f770
LP
876 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
877 seq_printf(m, "CPU architecture: %s\n",
878 proc_arch[cpu_architecture()]);
1da177e4 879
b4b8f770
LP
880 if ((cpuid & 0x0008f000) == 0x00000000) {
881 /* pre-ARM7 */
882 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 883 } else {
b4b8f770
LP
884 if ((cpuid & 0x0008f000) == 0x00007000) {
885 /* ARM7 */
886 seq_printf(m, "CPU variant\t: 0x%02x\n",
887 (cpuid >> 16) & 127);
888 } else {
889 /* post-ARM7 */
890 seq_printf(m, "CPU variant\t: 0x%x\n",
891 (cpuid >> 20) & 15);
892 }
893 seq_printf(m, "CPU part\t: 0x%03x\n",
894 (cpuid >> 4) & 0xfff);
1da177e4 895 }
b4b8f770 896 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 897 }
1da177e4
LT
898
899 seq_printf(m, "Hardware\t: %s\n", machine_name);
900 seq_printf(m, "Revision\t: %04x\n", system_rev);
901 seq_printf(m, "Serial\t\t: %08x%08x\n",
902 system_serial_high, system_serial_low);
903
904 return 0;
905}
906
907static void *c_start(struct seq_file *m, loff_t *pos)
908{
909 return *pos < 1 ? (void *)1 : NULL;
910}
911
912static void *c_next(struct seq_file *m, void *v, loff_t *pos)
913{
914 ++*pos;
915 return NULL;
916}
917
918static void c_stop(struct seq_file *m, void *v)
919{
920}
921
2ffd6e18 922const struct seq_operations cpuinfo_op = {
1da177e4
LT
923 .start = c_start,
924 .next = c_next,
925 .stop = c_stop,
926 .show = c_show
927};
This page took 0.829832 seconds and 5 git commands to generate.