ARM: 6428/1: add cpu_idle_wait() to support CPUidle on SMP systems.
[deliverable/linux.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
1da177e4
LT
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
1da177e4 21#include <linux/init.h>
3c57fb43 22#include <linux/kexec.h>
cea0bb1b 23#include <linux/crash_dump.h>
1da177e4
LT
24#include <linux/root_dev.h>
25#include <linux/cpu.h>
26#include <linux/interrupt.h>
7bbb7940 27#include <linux/smp.h>
4e950f6f 28#include <linux/fs.h>
e119bfff 29#include <linux/proc_fs.h>
2778f620 30#include <linux/memblock.h>
1da177e4 31
b86040a5 32#include <asm/unified.h>
1da177e4 33#include <asm/cpu.h>
0ba8b9b2 34#include <asm/cputype.h>
1da177e4 35#include <asm/elf.h>
1da177e4 36#include <asm/procinfo.h>
37efe642 37#include <asm/sections.h>
1da177e4 38#include <asm/setup.h>
f00ec48f 39#include <asm/smp_plat.h>
1da177e4
LT
40#include <asm/mach-types.h>
41#include <asm/cacheflush.h>
46097c7d 42#include <asm/cachetype.h>
1da177e4
LT
43#include <asm/tlbflush.h>
44
45#include <asm/mach/arch.h>
46#include <asm/mach/irq.h>
47#include <asm/mach/time.h>
5cbad0eb 48#include <asm/traps.h>
bff595c1 49#include <asm/unwind.h>
1da177e4 50
73a65b3f 51#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
0fc1c832 52#include "compat.h"
73a65b3f 53#endif
4cd9d6f7 54#include "atags.h"
bc581770 55#include "tcm.h"
0fc1c832 56
1da177e4
LT
57#ifndef MEM_SIZE
58#define MEM_SIZE (16*1024*1024)
59#endif
60
61#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
62char fpe_type[8];
63
64static int __init fpe_setup(char *line)
65{
66 memcpy(fpe_type, line, 8);
67 return 1;
68}
69
70__setup("fpe=", fpe_setup);
71#endif
72
4b5f32ce 73extern void paging_init(struct machine_desc *desc);
1da177e4 74extern void reboot_setup(char *str);
1da177e4
LT
75
76unsigned int processor_id;
c18f6581 77EXPORT_SYMBOL(processor_id);
1da177e4
LT
78unsigned int __machine_arch_type;
79EXPORT_SYMBOL(__machine_arch_type);
c0e95878
RK
80unsigned int cacheid;
81EXPORT_SYMBOL(cacheid);
1da177e4 82
9d20fdd5
BG
83unsigned int __atags_pointer __initdata;
84
1da177e4
LT
85unsigned int system_rev;
86EXPORT_SYMBOL(system_rev);
87
88unsigned int system_serial_low;
89EXPORT_SYMBOL(system_serial_low);
90
91unsigned int system_serial_high;
92EXPORT_SYMBOL(system_serial_high);
93
94unsigned int elf_hwcap;
95EXPORT_SYMBOL(elf_hwcap);
96
97
98#ifdef MULTI_CPU
99struct processor processor;
100#endif
101#ifdef MULTI_TLB
102struct cpu_tlb_fns cpu_tlb;
103#endif
104#ifdef MULTI_USER
105struct cpu_user_fns cpu_user;
106#endif
107#ifdef MULTI_CACHE
108struct cpu_cache_fns cpu_cache;
109#endif
953233dc
CM
110#ifdef CONFIG_OUTER_CACHE
111struct outer_cache_fns outer_cache;
6c09f09d 112EXPORT_SYMBOL(outer_cache);
953233dc 113#endif
1da177e4 114
ccea7a19
RK
115struct stack {
116 u32 irq[3];
117 u32 abt[3];
118 u32 und[3];
119} ____cacheline_aligned;
120
121static struct stack stacks[NR_CPUS];
122
1da177e4
LT
123char elf_platform[ELF_PLATFORM_SIZE];
124EXPORT_SYMBOL(elf_platform);
125
1da177e4
LT
126static const char *cpu_name;
127static const char *machine_name;
48ab7e09 128static char __initdata cmd_line[COMMAND_LINE_SIZE];
1da177e4
LT
129
130static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
131static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
132#define ENDIANNESS ((char)endian_test.l)
133
134DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
135
136/*
137 * Standard memory resources
138 */
139static struct resource mem_res[] = {
740e518e
GKH
140 {
141 .name = "Video RAM",
142 .start = 0,
143 .end = 0,
144 .flags = IORESOURCE_MEM
145 },
146 {
147 .name = "Kernel text",
148 .start = 0,
149 .end = 0,
150 .flags = IORESOURCE_MEM
151 },
152 {
153 .name = "Kernel data",
154 .start = 0,
155 .end = 0,
156 .flags = IORESOURCE_MEM
157 }
1da177e4
LT
158};
159
160#define video_ram mem_res[0]
161#define kernel_code mem_res[1]
162#define kernel_data mem_res[2]
163
164static struct resource io_res[] = {
740e518e
GKH
165 {
166 .name = "reserved",
167 .start = 0x3bc,
168 .end = 0x3be,
169 .flags = IORESOURCE_IO | IORESOURCE_BUSY
170 },
171 {
172 .name = "reserved",
173 .start = 0x378,
174 .end = 0x37f,
175 .flags = IORESOURCE_IO | IORESOURCE_BUSY
176 },
177 {
178 .name = "reserved",
179 .start = 0x278,
180 .end = 0x27f,
181 .flags = IORESOURCE_IO | IORESOURCE_BUSY
182 }
1da177e4
LT
183};
184
185#define lp0 io_res[0]
186#define lp1 io_res[1]
187#define lp2 io_res[2]
188
1da177e4
LT
189static const char *proc_arch[] = {
190 "undefined/unknown",
191 "3",
192 "4",
193 "4T",
194 "5",
195 "5T",
196 "5TE",
197 "5TEJ",
198 "6TEJ",
6b090a25 199 "7",
1da177e4
LT
200 "?(11)",
201 "?(12)",
202 "?(13)",
203 "?(14)",
204 "?(15)",
205 "?(16)",
206 "?(17)",
207};
208
1da177e4
LT
209int cpu_architecture(void)
210{
211 int cpu_arch;
212
0ba8b9b2 213 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 214 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
215 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
216 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
217 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
218 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
219 if (cpu_arch)
220 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 221 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
222 unsigned int mmfr0;
223
224 /* Revised CPUID format. Read the Memory Model Feature
225 * Register 0 and check for VMSAv7 or PMSAv7 */
226 asm("mrc p15, 0, %0, c0, c1, 4"
227 : "=r" (mmfr0));
228 if ((mmfr0 & 0x0000000f) == 0x00000003 ||
229 (mmfr0 & 0x000000f0) == 0x00000030)
230 cpu_arch = CPU_ARCH_ARMv7;
231 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
232 (mmfr0 & 0x000000f0) == 0x00000020)
233 cpu_arch = CPU_ARCH_ARMv6;
234 else
235 cpu_arch = CPU_ARCH_UNKNOWN;
236 } else
237 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
238
239 return cpu_arch;
240}
241
c0e95878
RK
242static void __init cacheid_init(void)
243{
244 unsigned int cachetype = read_cpuid_cachetype();
245 unsigned int arch = cpu_architecture();
246
b57ee99f
CM
247 if (arch >= CPU_ARCH_ARMv6) {
248 if ((cachetype & (7 << 29)) == 4 << 29) {
249 /* ARMv7 register format */
250 cacheid = CACHEID_VIPT_NONALIASING;
251 if ((cachetype & (3 << 14)) == 1 << 14)
252 cacheid |= CACHEID_ASID_TAGGED;
253 } else if (cachetype & (1 << 23))
c0e95878
RK
254 cacheid = CACHEID_VIPT_ALIASING;
255 else
256 cacheid = CACHEID_VIPT_NONALIASING;
257 } else {
258 cacheid = CACHEID_VIVT;
259 }
2b4ae1f1
RK
260
261 printk("CPU: %s data cache, %s instruction cache\n",
262 cache_is_vivt() ? "VIVT" :
263 cache_is_vipt_aliasing() ? "VIPT aliasing" :
264 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
265 cache_is_vivt() ? "VIVT" :
266 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
267 cache_is_vipt_aliasing() ? "VIPT aliasing" :
268 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
269}
270
1da177e4
LT
271/*
272 * These functions re-use the assembly code in head.S, which
273 * already provide the required functionality.
274 */
0f44ba1d 275extern struct proc_info_list *lookup_processor_type(unsigned int);
1da177e4
LT
276extern struct machine_desc *lookup_machine_type(unsigned int);
277
f159f4ed
TL
278static void __init feat_v6_fixup(void)
279{
280 int id = read_cpuid_id();
281
282 if ((id & 0xff0f0000) != 0x41070000)
283 return;
284
285 /*
286 * HWCAP_TLS is available only on 1136 r1p0 and later,
287 * see also kuser_get_tls_init.
288 */
289 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
290 elf_hwcap &= ~HWCAP_TLS;
291}
292
1da177e4
LT
293static void __init setup_processor(void)
294{
295 struct proc_info_list *list;
296
297 /*
298 * locate processor in the list of supported processor
299 * types. The linker builds this table for us from the
300 * entries in arch/arm/mm/proc-*.S
301 */
0ba8b9b2 302 list = lookup_processor_type(read_cpuid_id());
1da177e4
LT
303 if (!list) {
304 printk("CPU configuration botched (ID %08x), unable "
0ba8b9b2 305 "to continue.\n", read_cpuid_id());
1da177e4
LT
306 while (1);
307 }
308
309 cpu_name = list->cpu_name;
310
311#ifdef MULTI_CPU
312 processor = *list->proc;
313#endif
314#ifdef MULTI_TLB
315 cpu_tlb = *list->tlb;
316#endif
317#ifdef MULTI_USER
318 cpu_user = *list->user;
319#endif
320#ifdef MULTI_CACHE
321 cpu_cache = *list->cache;
322#endif
323
4e19025b 324 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
0ba8b9b2 325 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
264edb35 326 proc_arch[cpu_architecture()], cr_alignment);
1da177e4 327
96b644bd 328 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
1da177e4
LT
329 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
330 elf_hwcap = list->elf_hwcap;
adeff422
CM
331#ifndef CONFIG_ARM_THUMB
332 elf_hwcap &= ~HWCAP_THUMB;
333#endif
1da177e4 334
f159f4ed
TL
335 feat_v6_fixup();
336
c0e95878 337 cacheid_init();
1da177e4
LT
338 cpu_proc_init();
339}
340
ccea7a19
RK
341/*
342 * cpu_init - initialise one CPU.
343 *
90f1e084 344 * cpu_init sets up the per-CPU stacks.
ccea7a19 345 */
36c5ed23 346void cpu_init(void)
ccea7a19
RK
347{
348 unsigned int cpu = smp_processor_id();
349 struct stack *stk = &stacks[cpu];
350
351 if (cpu >= NR_CPUS) {
352 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
353 BUG();
354 }
355
b86040a5
CM
356 /*
357 * Define the placement constraint for the inline asm directive below.
358 * In Thumb-2, msr with an immediate value is not allowed.
359 */
360#ifdef CONFIG_THUMB2_KERNEL
361#define PLC "r"
362#else
363#define PLC "I"
364#endif
365
ccea7a19
RK
366 /*
367 * setup stacks for re-entrant exception handlers
368 */
369 __asm__ (
370 "msr cpsr_c, %1\n\t"
b86040a5
CM
371 "add r14, %0, %2\n\t"
372 "mov sp, r14\n\t"
ccea7a19 373 "msr cpsr_c, %3\n\t"
b86040a5
CM
374 "add r14, %0, %4\n\t"
375 "mov sp, r14\n\t"
ccea7a19 376 "msr cpsr_c, %5\n\t"
b86040a5
CM
377 "add r14, %0, %6\n\t"
378 "mov sp, r14\n\t"
ccea7a19
RK
379 "msr cpsr_c, %7"
380 :
381 : "r" (stk),
b86040a5 382 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 383 "I" (offsetof(struct stack, irq[0])),
b86040a5 384 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 385 "I" (offsetof(struct stack, abt[0])),
b86040a5 386 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 387 "I" (offsetof(struct stack, und[0])),
b86040a5 388 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 389 : "r14");
ccea7a19
RK
390}
391
1da177e4
LT
392static struct machine_desc * __init setup_machine(unsigned int nr)
393{
394 struct machine_desc *list;
395
396 /*
397 * locate machine in the list of supported machines.
398 */
399 list = lookup_machine_type(nr);
400 if (!list) {
401 printk("Machine configuration botched (nr %d), unable "
402 "to continue.\n", nr);
403 while (1);
404 }
405
406 printk("Machine: %s\n", list->name);
407
408 return list;
409}
410
4b5f32ce 411static int __init arm_add_memory(unsigned long start, unsigned long size)
3a669411 412{
4b5f32ce
NP
413 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
414
415 if (meminfo.nr_banks >= NR_BANKS) {
416 printk(KERN_CRIT "NR_BANKS too low, "
417 "ignoring memory at %#lx\n", start);
418 return -EINVAL;
419 }
05f96ef1 420
3a669411
RK
421 /*
422 * Ensure that start/size are aligned to a page boundary.
423 * Size is appropriately rounded down, start is rounded up.
424 */
425 size -= start & ~PAGE_MASK;
05f96ef1
RK
426 bank->start = PAGE_ALIGN(start);
427 bank->size = size & PAGE_MASK;
4b5f32ce
NP
428
429 /*
430 * Check whether this memory region has non-zero size or
431 * invalid node number.
432 */
be370302 433 if (bank->size == 0)
4b5f32ce
NP
434 return -EINVAL;
435
436 meminfo.nr_banks++;
437 return 0;
3a669411
RK
438}
439
1da177e4
LT
440/*
441 * Pick out the memory size. We look for mem=size@start,
442 * where start and size are "size[KkMm]"
443 */
2b0d8c25 444static int __init early_mem(char *p)
1da177e4
LT
445{
446 static int usermem __initdata = 0;
447 unsigned long size, start;
2b0d8c25 448 char *endp;
1da177e4
LT
449
450 /*
451 * If the user specifies memory size, we
452 * blow away any automatically generated
453 * size.
454 */
455 if (usermem == 0) {
456 usermem = 1;
457 meminfo.nr_banks = 0;
458 }
459
460 start = PHYS_OFFSET;
2b0d8c25
JK
461 size = memparse(p, &endp);
462 if (*endp == '@')
463 start = memparse(endp + 1, NULL);
1da177e4 464
1c97b73e 465 arm_add_memory(start, size);
1da177e4 466
2b0d8c25 467 return 0;
1da177e4 468}
2b0d8c25 469early_param("mem", early_mem);
1da177e4
LT
470
471static void __init
472setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
473{
474#ifdef CONFIG_BLK_DEV_RAM
475 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
476
477 rd_image_start = image_start;
478 rd_prompt = prompt;
479 rd_doload = doload;
480
481 if (rd_sz)
482 rd_size = rd_sz;
483#endif
484}
485
486static void __init
487request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
488{
489 struct resource *res;
490 int i;
491
37efe642
RK
492 kernel_code.start = virt_to_phys(_text);
493 kernel_code.end = virt_to_phys(_etext - 1);
494 kernel_data.start = virt_to_phys(_data);
495 kernel_data.end = virt_to_phys(_end - 1);
1da177e4
LT
496
497 for (i = 0; i < mi->nr_banks; i++) {
1da177e4
LT
498 if (mi->bank[i].size == 0)
499 continue;
500
1da177e4
LT
501 res = alloc_bootmem_low(sizeof(*res));
502 res->name = "System RAM";
3319f5e5
NP
503 res->start = mi->bank[i].start;
504 res->end = mi->bank[i].start + mi->bank[i].size - 1;
1da177e4
LT
505 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
506
507 request_resource(&iomem_resource, res);
508
509 if (kernel_code.start >= res->start &&
510 kernel_code.end <= res->end)
511 request_resource(res, &kernel_code);
512 if (kernel_data.start >= res->start &&
513 kernel_data.end <= res->end)
514 request_resource(res, &kernel_data);
515 }
516
517 if (mdesc->video_start) {
518 video_ram.start = mdesc->video_start;
519 video_ram.end = mdesc->video_end;
520 request_resource(&iomem_resource, &video_ram);
521 }
522
523 /*
524 * Some machines don't have the possibility of ever
525 * possessing lp0, lp1 or lp2
526 */
527 if (mdesc->reserve_lp0)
528 request_resource(&ioport_resource, &lp0);
529 if (mdesc->reserve_lp1)
530 request_resource(&ioport_resource, &lp1);
531 if (mdesc->reserve_lp2)
532 request_resource(&ioport_resource, &lp2);
533}
534
535/*
536 * Tag parsing.
537 *
538 * This is the new way of passing data to the kernel at boot time. Rather
539 * than passing a fixed inflexible structure to the kernel, we pass a list
540 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
541 * tag for the list to be recognised (to distinguish the tagged list from
542 * a param_struct). The list is terminated with a zero-length tag (this tag
543 * is not parsed in any way).
544 */
545static int __init parse_tag_core(const struct tag *tag)
546{
547 if (tag->hdr.size > 2) {
548 if ((tag->u.core.flags & 1) == 0)
549 root_mountflags &= ~MS_RDONLY;
550 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
551 }
552 return 0;
553}
554
555__tagtable(ATAG_CORE, parse_tag_core);
556
557static int __init parse_tag_mem32(const struct tag *tag)
558{
4b5f32ce 559 return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
1da177e4
LT
560}
561
562__tagtable(ATAG_MEM, parse_tag_mem32);
563
564#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
565struct screen_info screen_info = {
566 .orig_video_lines = 30,
567 .orig_video_cols = 80,
568 .orig_video_mode = 0,
569 .orig_video_ega_bx = 0,
570 .orig_video_isVGA = 1,
571 .orig_video_points = 8
572};
573
574static int __init parse_tag_videotext(const struct tag *tag)
575{
576 screen_info.orig_x = tag->u.videotext.x;
577 screen_info.orig_y = tag->u.videotext.y;
578 screen_info.orig_video_page = tag->u.videotext.video_page;
579 screen_info.orig_video_mode = tag->u.videotext.video_mode;
580 screen_info.orig_video_cols = tag->u.videotext.video_cols;
581 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
582 screen_info.orig_video_lines = tag->u.videotext.video_lines;
583 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
584 screen_info.orig_video_points = tag->u.videotext.video_points;
585 return 0;
586}
587
588__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
589#endif
590
591static int __init parse_tag_ramdisk(const struct tag *tag)
592{
593 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
594 (tag->u.ramdisk.flags & 2) == 0,
595 tag->u.ramdisk.start, tag->u.ramdisk.size);
596 return 0;
597}
598
599__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
600
1da177e4
LT
601static int __init parse_tag_serialnr(const struct tag *tag)
602{
603 system_serial_low = tag->u.serialnr.low;
604 system_serial_high = tag->u.serialnr.high;
605 return 0;
606}
607
608__tagtable(ATAG_SERIAL, parse_tag_serialnr);
609
610static int __init parse_tag_revision(const struct tag *tag)
611{
612 system_rev = tag->u.revision.rev;
613 return 0;
614}
615
616__tagtable(ATAG_REVISION, parse_tag_revision);
617
92d2040d 618#ifndef CONFIG_CMDLINE_FORCE
1da177e4
LT
619static int __init parse_tag_cmdline(const struct tag *tag)
620{
621 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
622 return 0;
623}
624
625__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
92d2040d 626#endif /* CONFIG_CMDLINE_FORCE */
1da177e4
LT
627
628/*
629 * Scan the tag table for this tag, and call its parse function.
630 * The tag table is built by the linker from all the __tagtable
631 * declarations.
632 */
633static int __init parse_tag(const struct tag *tag)
634{
635 extern struct tagtable __tagtable_begin, __tagtable_end;
636 struct tagtable *t;
637
638 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
639 if (tag->hdr.tag == t->tag) {
640 t->parse(tag);
641 break;
642 }
643
644 return t < &__tagtable_end;
645}
646
647/*
648 * Parse all tags in the list, checking both the global and architecture
649 * specific tag tables.
650 */
651static void __init parse_tags(const struct tag *t)
652{
653 for (; t->hdr.size; t = tag_next(t))
654 if (!parse_tag(t))
655 printk(KERN_WARNING
656 "Ignoring unrecognised tag 0x%08x\n",
657 t->hdr.tag);
658}
659
660/*
661 * This holds our defaults.
662 */
663static struct init_tags {
664 struct tag_header hdr1;
665 struct tag_core core;
666 struct tag_header hdr2;
667 struct tag_mem32 mem;
668 struct tag_header hdr3;
669} init_tags __initdata = {
670 { tag_size(tag_core), ATAG_CORE },
671 { 1, PAGE_SIZE, 0xff },
672 { tag_size(tag_mem32), ATAG_MEM },
673 { MEM_SIZE, PHYS_OFFSET },
674 { 0, ATAG_NONE }
675};
676
677static void (*init_machine)(void) __initdata;
678
679static int __init customize_machine(void)
680{
681 /* customizes platform devices, or adds new ones */
682 if (init_machine)
683 init_machine();
684 return 0;
685}
686arch_initcall(customize_machine);
687
3c57fb43
MW
688#ifdef CONFIG_KEXEC
689static inline unsigned long long get_total_mem(void)
690{
691 unsigned long total;
692
693 total = max_low_pfn - min_low_pfn;
694 return total << PAGE_SHIFT;
695}
696
697/**
698 * reserve_crashkernel() - reserves memory are for crash kernel
699 *
700 * This function reserves memory area given in "crashkernel=" kernel command
701 * line parameter. The memory reserved is used by a dump capture kernel when
702 * primary kernel is crashing.
703 */
704static void __init reserve_crashkernel(void)
705{
706 unsigned long long crash_size, crash_base;
707 unsigned long long total_mem;
708 int ret;
709
710 total_mem = get_total_mem();
711 ret = parse_crashkernel(boot_command_line, total_mem,
712 &crash_size, &crash_base);
713 if (ret)
714 return;
715
716 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
717 if (ret < 0) {
718 printk(KERN_WARNING "crashkernel reservation failed - "
719 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
720 return;
721 }
722
723 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
724 "for crashkernel (System RAM: %ldMB)\n",
725 (unsigned long)(crash_size >> 20),
726 (unsigned long)(crash_base >> 20),
727 (unsigned long)(total_mem >> 20));
728
729 crashk_res.start = crash_base;
730 crashk_res.end = crash_base + crash_size - 1;
731 insert_resource(&iomem_resource, &crashk_res);
732}
733#else
734static inline void reserve_crashkernel(void) {}
735#endif /* CONFIG_KEXEC */
736
cea0bb1b
MW
737/*
738 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
739 * is_kdump_kernel() to determine if we are booting after a panic. Hence
740 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
741 */
742
743#ifdef CONFIG_CRASH_DUMP
744/*
745 * elfcorehdr= specifies the location of elf core header stored by the crashed
746 * kernel. This option will be passed by kexec loader to the capture kernel.
747 */
748static int __init setup_elfcorehdr(char *arg)
749{
750 char *end;
751
752 if (!arg)
753 return -EINVAL;
754
755 elfcorehdr_addr = memparse(arg, &end);
756 return end > arg ? 0 : -EINVAL;
757}
758early_param("elfcorehdr", setup_elfcorehdr);
759#endif /* CONFIG_CRASH_DUMP */
760
73a65b3f
UKK
761static void __init squash_mem_tags(struct tag *tag)
762{
763 for (; tag->hdr.size; tag = tag_next(tag))
764 if (tag->hdr.tag == ATAG_MEM)
765 tag->hdr.tag = ATAG_NONE;
766}
767
1da177e4
LT
768void __init setup_arch(char **cmdline_p)
769{
770 struct tag *tags = (struct tag *)&init_tags;
771 struct machine_desc *mdesc;
772 char *from = default_command_line;
773
bff595c1
CM
774 unwind_init();
775
1da177e4
LT
776 setup_processor();
777 mdesc = setup_machine(machine_arch_type);
778 machine_name = mdesc->name;
779
780 if (mdesc->soft_reboot)
781 reboot_setup("s");
782
9d20fdd5
BG
783 if (__atags_pointer)
784 tags = phys_to_virt(__atags_pointer);
785 else if (mdesc->boot_params)
f9bd6ea4 786 tags = phys_to_virt(mdesc->boot_params);
1da177e4 787
73a65b3f 788#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
1da177e4
LT
789 /*
790 * If we have the old style parameters, convert them to
791 * a tag list.
792 */
793 if (tags->hdr.tag != ATAG_CORE)
794 convert_to_tag_list(tags);
73a65b3f 795#endif
1da177e4
LT
796 if (tags->hdr.tag != ATAG_CORE)
797 tags = (struct tag *)&init_tags;
798
799 if (mdesc->fixup)
800 mdesc->fixup(mdesc, tags, &from, &meminfo);
801
802 if (tags->hdr.tag == ATAG_CORE) {
803 if (meminfo.nr_banks != 0)
804 squash_mem_tags(tags);
4cd9d6f7 805 save_atags(tags);
1da177e4
LT
806 parse_tags(tags);
807 }
808
37efe642
RK
809 init_mm.start_code = (unsigned long) _text;
810 init_mm.end_code = (unsigned long) _etext;
811 init_mm.end_data = (unsigned long) _edata;
812 init_mm.brk = (unsigned long) _end;
1da177e4 813
2b0d8c25
JK
814 /* parse_early_param needs a boot_command_line */
815 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
816
48ab7e09
JK
817 /* populate cmd_line too for later use, preserving boot_command_line */
818 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
819 *cmdline_p = cmd_line;
2b0d8c25
JK
820
821 parse_early_param();
822
8d717a52 823 arm_memblock_init(&meminfo, mdesc);
2778f620 824
4b5f32ce 825 paging_init(mdesc);
1da177e4
LT
826 request_standard_resources(&meminfo, mdesc);
827
7bbb7940 828#ifdef CONFIG_SMP
f00ec48f
RK
829 if (is_smp())
830 smp_init_cpus();
7bbb7940 831#endif
3c57fb43 832 reserve_crashkernel();
7bbb7940 833
ccea7a19 834 cpu_init();
bc581770 835 tcm_init();
ccea7a19 836
1da177e4
LT
837 /*
838 * Set up various architecture-specific pointers
839 */
354e6f72 840 arch_nr_irqs = mdesc->nr_irqs;
1da177e4
LT
841 init_arch_irq = mdesc->init_irq;
842 system_timer = mdesc->timer;
843 init_machine = mdesc->init_machine;
844
845#ifdef CONFIG_VT
846#if defined(CONFIG_VGA_CONSOLE)
847 conswitchp = &vga_con;
848#elif defined(CONFIG_DUMMY_CONSOLE)
849 conswitchp = &dummy_con;
850#endif
851#endif
5cbad0eb 852 early_trap_init();
1da177e4
LT
853}
854
855
856static int __init topology_init(void)
857{
858 int cpu;
859
66fb8bd2
RK
860 for_each_possible_cpu(cpu) {
861 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
862 cpuinfo->cpu.hotpluggable = 1;
863 register_cpu(&cpuinfo->cpu, cpu);
864 }
1da177e4
LT
865
866 return 0;
867}
1da177e4
LT
868subsys_initcall(topology_init);
869
e119bfff
RK
870#ifdef CONFIG_HAVE_PROC_CPU
871static int __init proc_cpu_init(void)
872{
873 struct proc_dir_entry *res;
874
875 res = proc_mkdir("cpu", NULL);
876 if (!res)
877 return -ENOMEM;
878 return 0;
879}
880fs_initcall(proc_cpu_init);
881#endif
882
1da177e4
LT
883static const char *hwcap_str[] = {
884 "swp",
885 "half",
886 "thumb",
887 "26bit",
888 "fastmult",
889 "fpa",
890 "vfp",
891 "edsp",
892 "java",
8f7f9435 893 "iwmmxt",
99e4a6dd 894 "crunch",
4369ae16 895 "thumbee",
2bedbdf4 896 "neon",
7279dc3e
CM
897 "vfpv3",
898 "vfpv3d16",
1da177e4
LT
899 NULL
900};
901
1da177e4
LT
902static int c_show(struct seq_file *m, void *v)
903{
904 int i;
905
906 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
0ba8b9b2 907 cpu_name, read_cpuid_id() & 15, elf_platform);
1da177e4
LT
908
909#if defined(CONFIG_SMP)
910 for_each_online_cpu(i) {
15559722
RK
911 /*
912 * glibc reads /proc/cpuinfo to determine the number of
913 * online processors, looking for lines beginning with
914 * "processor". Give glibc what it expects.
915 */
916 seq_printf(m, "processor\t: %d\n", i);
1da177e4
LT
917 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
918 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
919 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
920 }
921#else /* CONFIG_SMP */
922 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
923 loops_per_jiffy / (500000/HZ),
924 (loops_per_jiffy / (5000/HZ)) % 100);
925#endif
926
927 /* dump out the processor features */
928 seq_puts(m, "Features\t: ");
929
930 for (i = 0; hwcap_str[i]; i++)
931 if (elf_hwcap & (1 << i))
932 seq_printf(m, "%s ", hwcap_str[i]);
933
0ba8b9b2 934 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1da177e4
LT
935 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
936
0ba8b9b2 937 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1da177e4 938 /* pre-ARM7 */
0ba8b9b2 939 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1da177e4 940 } else {
0ba8b9b2 941 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1da177e4
LT
942 /* ARM7 */
943 seq_printf(m, "CPU variant\t: 0x%02x\n",
0ba8b9b2 944 (read_cpuid_id() >> 16) & 127);
1da177e4
LT
945 } else {
946 /* post-ARM7 */
947 seq_printf(m, "CPU variant\t: 0x%x\n",
0ba8b9b2 948 (read_cpuid_id() >> 20) & 15);
1da177e4
LT
949 }
950 seq_printf(m, "CPU part\t: 0x%03x\n",
0ba8b9b2 951 (read_cpuid_id() >> 4) & 0xfff);
1da177e4 952 }
0ba8b9b2 953 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1da177e4 954
1da177e4
LT
955 seq_puts(m, "\n");
956
957 seq_printf(m, "Hardware\t: %s\n", machine_name);
958 seq_printf(m, "Revision\t: %04x\n", system_rev);
959 seq_printf(m, "Serial\t\t: %08x%08x\n",
960 system_serial_high, system_serial_low);
961
962 return 0;
963}
964
965static void *c_start(struct seq_file *m, loff_t *pos)
966{
967 return *pos < 1 ? (void *)1 : NULL;
968}
969
970static void *c_next(struct seq_file *m, void *v, loff_t *pos)
971{
972 ++*pos;
973 return NULL;
974}
975
976static void c_stop(struct seq_file *m, void *v)
977{
978}
979
2ffd6e18 980const struct seq_operations cpuinfo_op = {
1da177e4
LT
981 .start = c_start,
982 .next = c_next,
983 .stop = c_stop,
984 .show = c_show
985};
This page took 0.559868 seconds and 5 git commands to generate.