[ARM] Don't call dump_cpu_info unless we're booting
[deliverable/linux.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/stddef.h>
14#include <linux/ioport.h>
15#include <linux/delay.h>
16#include <linux/utsname.h>
17#include <linux/initrd.h>
18#include <linux/console.h>
19#include <linux/bootmem.h>
20#include <linux/seq_file.h>
21#include <linux/tty.h>
22#include <linux/init.h>
23#include <linux/root_dev.h>
24#include <linux/cpu.h>
25#include <linux/interrupt.h>
26
27#include <asm/cpu.h>
28#include <asm/elf.h>
29#include <asm/hardware.h>
30#include <asm/io.h>
31#include <asm/procinfo.h>
32#include <asm/setup.h>
33#include <asm/mach-types.h>
34#include <asm/cacheflush.h>
35#include <asm/tlbflush.h>
36
37#include <asm/mach/arch.h>
38#include <asm/mach/irq.h>
39#include <asm/mach/time.h>
40
41#ifndef MEM_SIZE
42#define MEM_SIZE (16*1024*1024)
43#endif
44
45#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
46char fpe_type[8];
47
48static int __init fpe_setup(char *line)
49{
50 memcpy(fpe_type, line, 8);
51 return 1;
52}
53
54__setup("fpe=", fpe_setup);
55#endif
56
57extern unsigned int mem_fclk_21285;
58extern void paging_init(struct meminfo *, struct machine_desc *desc);
59extern void convert_to_tag_list(struct tag *tags);
60extern void squash_mem_tags(struct tag *tag);
61extern void reboot_setup(char *str);
62extern int root_mountflags;
63extern void _stext, _text, _etext, __data_start, _edata, _end;
64
65unsigned int processor_id;
66unsigned int __machine_arch_type;
67EXPORT_SYMBOL(__machine_arch_type);
68
69unsigned int system_rev;
70EXPORT_SYMBOL(system_rev);
71
72unsigned int system_serial_low;
73EXPORT_SYMBOL(system_serial_low);
74
75unsigned int system_serial_high;
76EXPORT_SYMBOL(system_serial_high);
77
78unsigned int elf_hwcap;
79EXPORT_SYMBOL(elf_hwcap);
80
81
82#ifdef MULTI_CPU
83struct processor processor;
84#endif
85#ifdef MULTI_TLB
86struct cpu_tlb_fns cpu_tlb;
87#endif
88#ifdef MULTI_USER
89struct cpu_user_fns cpu_user;
90#endif
91#ifdef MULTI_CACHE
92struct cpu_cache_fns cpu_cache;
93#endif
94
ccea7a19
RK
95struct stack {
96 u32 irq[3];
97 u32 abt[3];
98 u32 und[3];
99} ____cacheline_aligned;
100
101static struct stack stacks[NR_CPUS];
102
1da177e4
LT
103char elf_platform[ELF_PLATFORM_SIZE];
104EXPORT_SYMBOL(elf_platform);
105
106unsigned long phys_initrd_start __initdata = 0;
107unsigned long phys_initrd_size __initdata = 0;
108
109static struct meminfo meminfo __initdata = { 0, };
110static const char *cpu_name;
111static const char *machine_name;
112static char command_line[COMMAND_LINE_SIZE];
113
114static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
115static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
116#define ENDIANNESS ((char)endian_test.l)
117
118DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
119
120/*
121 * Standard memory resources
122 */
123static struct resource mem_res[] = {
124 { "Video RAM", 0, 0, IORESOURCE_MEM },
125 { "Kernel text", 0, 0, IORESOURCE_MEM },
126 { "Kernel data", 0, 0, IORESOURCE_MEM }
127};
128
129#define video_ram mem_res[0]
130#define kernel_code mem_res[1]
131#define kernel_data mem_res[2]
132
133static struct resource io_res[] = {
134 { "reserved", 0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY },
135 { "reserved", 0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY },
136 { "reserved", 0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY }
137};
138
139#define lp0 io_res[0]
140#define lp1 io_res[1]
141#define lp2 io_res[2]
142
143static const char *cache_types[16] = {
144 "write-through",
145 "write-back",
146 "write-back",
147 "undefined 3",
148 "undefined 4",
149 "undefined 5",
150 "write-back",
151 "write-back",
152 "undefined 8",
153 "undefined 9",
154 "undefined 10",
155 "undefined 11",
156 "undefined 12",
157 "undefined 13",
158 "write-back",
159 "undefined 15",
160};
161
162static const char *cache_clean[16] = {
163 "not required",
164 "read-block",
165 "cp15 c7 ops",
166 "undefined 3",
167 "undefined 4",
168 "undefined 5",
169 "cp15 c7 ops",
170 "cp15 c7 ops",
171 "undefined 8",
172 "undefined 9",
173 "undefined 10",
174 "undefined 11",
175 "undefined 12",
176 "undefined 13",
177 "cp15 c7 ops",
178 "undefined 15",
179};
180
181static const char *cache_lockdown[16] = {
182 "not supported",
183 "not supported",
184 "not supported",
185 "undefined 3",
186 "undefined 4",
187 "undefined 5",
188 "format A",
189 "format B",
190 "undefined 8",
191 "undefined 9",
192 "undefined 10",
193 "undefined 11",
194 "undefined 12",
195 "undefined 13",
196 "format C",
197 "undefined 15",
198};
199
200static const char *proc_arch[] = {
201 "undefined/unknown",
202 "3",
203 "4",
204 "4T",
205 "5",
206 "5T",
207 "5TE",
208 "5TEJ",
209 "6TEJ",
210 "?(10)",
211 "?(11)",
212 "?(12)",
213 "?(13)",
214 "?(14)",
215 "?(15)",
216 "?(16)",
217 "?(17)",
218};
219
220#define CACHE_TYPE(x) (((x) >> 25) & 15)
221#define CACHE_S(x) ((x) & (1 << 24))
222#define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
223#define CACHE_ISIZE(x) ((x) & 4095)
224
225#define CACHE_SIZE(y) (((y) >> 6) & 7)
226#define CACHE_ASSOC(y) (((y) >> 3) & 7)
227#define CACHE_M(y) ((y) & (1 << 2))
228#define CACHE_LINE(y) ((y) & 3)
229
230static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
231{
232 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
233
234 printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
235 cpu, prefix,
236 mult << (8 + CACHE_SIZE(cache)),
237 (mult << CACHE_ASSOC(cache)) >> 1,
238 8 << CACHE_LINE(cache),
239 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
240 CACHE_LINE(cache)));
241}
242
243static void __init dump_cpu_info(int cpu)
244{
245 unsigned int info = read_cpuid(CPUID_CACHETYPE);
246
247 if (info != processor_id) {
248 printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
249 cache_types[CACHE_TYPE(info)]);
250 if (CACHE_S(info)) {
251 dump_cache("I cache", cpu, CACHE_ISIZE(info));
252 dump_cache("D cache", cpu, CACHE_DSIZE(info));
253 } else {
254 dump_cache("cache", cpu, CACHE_ISIZE(info));
255 }
256 }
257}
258
259int cpu_architecture(void)
260{
261 int cpu_arch;
262
263 if ((processor_id & 0x0000f000) == 0) {
264 cpu_arch = CPU_ARCH_UNKNOWN;
265 } else if ((processor_id & 0x0000f000) == 0x00007000) {
266 cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
267 } else {
268 cpu_arch = (processor_id >> 16) & 7;
269 if (cpu_arch)
270 cpu_arch += CPU_ARCH_ARMv3;
271 }
272
273 return cpu_arch;
274}
275
276/*
277 * These functions re-use the assembly code in head.S, which
278 * already provide the required functionality.
279 */
280extern struct proc_info_list *lookup_processor_type(void);
281extern struct machine_desc *lookup_machine_type(unsigned int);
282
283static void __init setup_processor(void)
284{
285 struct proc_info_list *list;
286
287 /*
288 * locate processor in the list of supported processor
289 * types. The linker builds this table for us from the
290 * entries in arch/arm/mm/proc-*.S
291 */
292 list = lookup_processor_type();
293 if (!list) {
294 printk("CPU configuration botched (ID %08x), unable "
295 "to continue.\n", processor_id);
296 while (1);
297 }
298
299 cpu_name = list->cpu_name;
300
301#ifdef MULTI_CPU
302 processor = *list->proc;
303#endif
304#ifdef MULTI_TLB
305 cpu_tlb = *list->tlb;
306#endif
307#ifdef MULTI_USER
308 cpu_user = *list->user;
309#endif
310#ifdef MULTI_CACHE
311 cpu_cache = *list->cache;
312#endif
313
314 printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
315 cpu_name, processor_id, (int)processor_id & 15,
316 proc_arch[cpu_architecture()]);
317
1da177e4
LT
318 sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS);
319 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
320 elf_hwcap = list->elf_hwcap;
321
322 cpu_proc_init();
323}
324
ccea7a19
RK
325/*
326 * cpu_init - initialise one CPU.
327 *
328 * cpu_init dumps the cache information, initialises SMP specific
329 * information, and sets up the per-CPU stacks.
330 */
36c5ed23 331void cpu_init(void)
ccea7a19
RK
332{
333 unsigned int cpu = smp_processor_id();
334 struct stack *stk = &stacks[cpu];
335
336 if (cpu >= NR_CPUS) {
337 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
338 BUG();
339 }
340
32f8b97c
RK
341 if (system_state == SYSTEM_BOOTING)
342 dump_cpu_info(cpu);
ccea7a19
RK
343
344 /*
345 * setup stacks for re-entrant exception handlers
346 */
347 __asm__ (
348 "msr cpsr_c, %1\n\t"
349 "add sp, %0, %2\n\t"
350 "msr cpsr_c, %3\n\t"
351 "add sp, %0, %4\n\t"
352 "msr cpsr_c, %5\n\t"
353 "add sp, %0, %6\n\t"
354 "msr cpsr_c, %7"
355 :
356 : "r" (stk),
357 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
358 "I" (offsetof(struct stack, irq[0])),
359 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
360 "I" (offsetof(struct stack, abt[0])),
361 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
362 "I" (offsetof(struct stack, und[0])),
aaaa3f9e
CM
363 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
364 : "r14");
ccea7a19
RK
365}
366
1da177e4
LT
367static struct machine_desc * __init setup_machine(unsigned int nr)
368{
369 struct machine_desc *list;
370
371 /*
372 * locate machine in the list of supported machines.
373 */
374 list = lookup_machine_type(nr);
375 if (!list) {
376 printk("Machine configuration botched (nr %d), unable "
377 "to continue.\n", nr);
378 while (1);
379 }
380
381 printk("Machine: %s\n", list->name);
382
383 return list;
384}
385
386static void __init early_initrd(char **p)
387{
388 unsigned long start, size;
389
390 start = memparse(*p, p);
391 if (**p == ',') {
392 size = memparse((*p) + 1, p);
393
394 phys_initrd_start = start;
395 phys_initrd_size = size;
396 }
397}
398__early_param("initrd=", early_initrd);
399
3a669411
RK
400static void __init add_memory(unsigned long start, unsigned long size)
401{
402 /*
403 * Ensure that start/size are aligned to a page boundary.
404 * Size is appropriately rounded down, start is rounded up.
405 */
406 size -= start & ~PAGE_MASK;
407
408 meminfo.bank[meminfo.nr_banks].start = PAGE_ALIGN(start);
409 meminfo.bank[meminfo.nr_banks].size = size & PAGE_MASK;
410 meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start);
411 meminfo.nr_banks += 1;
412}
413
1da177e4
LT
414/*
415 * Pick out the memory size. We look for mem=size@start,
416 * where start and size are "size[KkMm]"
417 */
418static void __init early_mem(char **p)
419{
420 static int usermem __initdata = 0;
421 unsigned long size, start;
422
423 /*
424 * If the user specifies memory size, we
425 * blow away any automatically generated
426 * size.
427 */
428 if (usermem == 0) {
429 usermem = 1;
430 meminfo.nr_banks = 0;
431 }
432
433 start = PHYS_OFFSET;
434 size = memparse(*p, p);
435 if (**p == '@')
436 start = memparse(*p + 1, p);
437
3a669411 438 add_memory(start, size);
1da177e4
LT
439}
440__early_param("mem=", early_mem);
441
442/*
443 * Initial parsing of the command line.
444 */
445static void __init parse_cmdline(char **cmdline_p, char *from)
446{
447 char c = ' ', *to = command_line;
448 int len = 0;
449
450 for (;;) {
451 if (c == ' ') {
452 extern struct early_params __early_begin, __early_end;
453 struct early_params *p;
454
455 for (p = &__early_begin; p < &__early_end; p++) {
456 int len = strlen(p->arg);
457
458 if (memcmp(from, p->arg, len) == 0) {
459 if (to != command_line)
460 to -= 1;
461 from += len;
462 p->fn(&from);
463
464 while (*from != ' ' && *from != '\0')
465 from++;
466 break;
467 }
468 }
469 }
470 c = *from++;
471 if (!c)
472 break;
473 if (COMMAND_LINE_SIZE <= ++len)
474 break;
475 *to++ = c;
476 }
477 *to = '\0';
478 *cmdline_p = command_line;
479}
480
481static void __init
482setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
483{
484#ifdef CONFIG_BLK_DEV_RAM
485 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
486
487 rd_image_start = image_start;
488 rd_prompt = prompt;
489 rd_doload = doload;
490
491 if (rd_sz)
492 rd_size = rd_sz;
493#endif
494}
495
496static void __init
497request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
498{
499 struct resource *res;
500 int i;
501
502 kernel_code.start = virt_to_phys(&_text);
503 kernel_code.end = virt_to_phys(&_etext - 1);
504 kernel_data.start = virt_to_phys(&__data_start);
505 kernel_data.end = virt_to_phys(&_end - 1);
506
507 for (i = 0; i < mi->nr_banks; i++) {
508 unsigned long virt_start, virt_end;
509
510 if (mi->bank[i].size == 0)
511 continue;
512
513 virt_start = __phys_to_virt(mi->bank[i].start);
514 virt_end = virt_start + mi->bank[i].size - 1;
515
516 res = alloc_bootmem_low(sizeof(*res));
517 res->name = "System RAM";
518 res->start = __virt_to_phys(virt_start);
519 res->end = __virt_to_phys(virt_end);
520 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
521
522 request_resource(&iomem_resource, res);
523
524 if (kernel_code.start >= res->start &&
525 kernel_code.end <= res->end)
526 request_resource(res, &kernel_code);
527 if (kernel_data.start >= res->start &&
528 kernel_data.end <= res->end)
529 request_resource(res, &kernel_data);
530 }
531
532 if (mdesc->video_start) {
533 video_ram.start = mdesc->video_start;
534 video_ram.end = mdesc->video_end;
535 request_resource(&iomem_resource, &video_ram);
536 }
537
538 /*
539 * Some machines don't have the possibility of ever
540 * possessing lp0, lp1 or lp2
541 */
542 if (mdesc->reserve_lp0)
543 request_resource(&ioport_resource, &lp0);
544 if (mdesc->reserve_lp1)
545 request_resource(&ioport_resource, &lp1);
546 if (mdesc->reserve_lp2)
547 request_resource(&ioport_resource, &lp2);
548}
549
550/*
551 * Tag parsing.
552 *
553 * This is the new way of passing data to the kernel at boot time. Rather
554 * than passing a fixed inflexible structure to the kernel, we pass a list
555 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
556 * tag for the list to be recognised (to distinguish the tagged list from
557 * a param_struct). The list is terminated with a zero-length tag (this tag
558 * is not parsed in any way).
559 */
560static int __init parse_tag_core(const struct tag *tag)
561{
562 if (tag->hdr.size > 2) {
563 if ((tag->u.core.flags & 1) == 0)
564 root_mountflags &= ~MS_RDONLY;
565 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
566 }
567 return 0;
568}
569
570__tagtable(ATAG_CORE, parse_tag_core);
571
572static int __init parse_tag_mem32(const struct tag *tag)
573{
574 if (meminfo.nr_banks >= NR_BANKS) {
575 printk(KERN_WARNING
576 "Ignoring memory bank 0x%08x size %dKB\n",
577 tag->u.mem.start, tag->u.mem.size / 1024);
578 return -EINVAL;
579 }
3a669411 580 add_memory(tag->u.mem.start, tag->u.mem.size);
1da177e4
LT
581 return 0;
582}
583
584__tagtable(ATAG_MEM, parse_tag_mem32);
585
586#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
587struct screen_info screen_info = {
588 .orig_video_lines = 30,
589 .orig_video_cols = 80,
590 .orig_video_mode = 0,
591 .orig_video_ega_bx = 0,
592 .orig_video_isVGA = 1,
593 .orig_video_points = 8
594};
595
596static int __init parse_tag_videotext(const struct tag *tag)
597{
598 screen_info.orig_x = tag->u.videotext.x;
599 screen_info.orig_y = tag->u.videotext.y;
600 screen_info.orig_video_page = tag->u.videotext.video_page;
601 screen_info.orig_video_mode = tag->u.videotext.video_mode;
602 screen_info.orig_video_cols = tag->u.videotext.video_cols;
603 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
604 screen_info.orig_video_lines = tag->u.videotext.video_lines;
605 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
606 screen_info.orig_video_points = tag->u.videotext.video_points;
607 return 0;
608}
609
610__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
611#endif
612
613static int __init parse_tag_ramdisk(const struct tag *tag)
614{
615 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
616 (tag->u.ramdisk.flags & 2) == 0,
617 tag->u.ramdisk.start, tag->u.ramdisk.size);
618 return 0;
619}
620
621__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
622
623static int __init parse_tag_initrd(const struct tag *tag)
624{
625 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
626 "please update your bootloader.\n");
627 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
628 phys_initrd_size = tag->u.initrd.size;
629 return 0;
630}
631
632__tagtable(ATAG_INITRD, parse_tag_initrd);
633
634static int __init parse_tag_initrd2(const struct tag *tag)
635{
636 phys_initrd_start = tag->u.initrd.start;
637 phys_initrd_size = tag->u.initrd.size;
638 return 0;
639}
640
641__tagtable(ATAG_INITRD2, parse_tag_initrd2);
642
643static int __init parse_tag_serialnr(const struct tag *tag)
644{
645 system_serial_low = tag->u.serialnr.low;
646 system_serial_high = tag->u.serialnr.high;
647 return 0;
648}
649
650__tagtable(ATAG_SERIAL, parse_tag_serialnr);
651
652static int __init parse_tag_revision(const struct tag *tag)
653{
654 system_rev = tag->u.revision.rev;
655 return 0;
656}
657
658__tagtable(ATAG_REVISION, parse_tag_revision);
659
660static int __init parse_tag_cmdline(const struct tag *tag)
661{
662 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
663 return 0;
664}
665
666__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
667
668/*
669 * Scan the tag table for this tag, and call its parse function.
670 * The tag table is built by the linker from all the __tagtable
671 * declarations.
672 */
673static int __init parse_tag(const struct tag *tag)
674{
675 extern struct tagtable __tagtable_begin, __tagtable_end;
676 struct tagtable *t;
677
678 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
679 if (tag->hdr.tag == t->tag) {
680 t->parse(tag);
681 break;
682 }
683
684 return t < &__tagtable_end;
685}
686
687/*
688 * Parse all tags in the list, checking both the global and architecture
689 * specific tag tables.
690 */
691static void __init parse_tags(const struct tag *t)
692{
693 for (; t->hdr.size; t = tag_next(t))
694 if (!parse_tag(t))
695 printk(KERN_WARNING
696 "Ignoring unrecognised tag 0x%08x\n",
697 t->hdr.tag);
698}
699
700/*
701 * This holds our defaults.
702 */
703static struct init_tags {
704 struct tag_header hdr1;
705 struct tag_core core;
706 struct tag_header hdr2;
707 struct tag_mem32 mem;
708 struct tag_header hdr3;
709} init_tags __initdata = {
710 { tag_size(tag_core), ATAG_CORE },
711 { 1, PAGE_SIZE, 0xff },
712 { tag_size(tag_mem32), ATAG_MEM },
713 { MEM_SIZE, PHYS_OFFSET },
714 { 0, ATAG_NONE }
715};
716
717static void (*init_machine)(void) __initdata;
718
719static int __init customize_machine(void)
720{
721 /* customizes platform devices, or adds new ones */
722 if (init_machine)
723 init_machine();
724 return 0;
725}
726arch_initcall(customize_machine);
727
728void __init setup_arch(char **cmdline_p)
729{
730 struct tag *tags = (struct tag *)&init_tags;
731 struct machine_desc *mdesc;
732 char *from = default_command_line;
733
734 setup_processor();
735 mdesc = setup_machine(machine_arch_type);
736 machine_name = mdesc->name;
737
738 if (mdesc->soft_reboot)
739 reboot_setup("s");
740
f9bd6ea4
RK
741 if (mdesc->boot_params)
742 tags = phys_to_virt(mdesc->boot_params);
1da177e4
LT
743
744 /*
745 * If we have the old style parameters, convert them to
746 * a tag list.
747 */
748 if (tags->hdr.tag != ATAG_CORE)
749 convert_to_tag_list(tags);
750 if (tags->hdr.tag != ATAG_CORE)
751 tags = (struct tag *)&init_tags;
752
753 if (mdesc->fixup)
754 mdesc->fixup(mdesc, tags, &from, &meminfo);
755
756 if (tags->hdr.tag == ATAG_CORE) {
757 if (meminfo.nr_banks != 0)
758 squash_mem_tags(tags);
759 parse_tags(tags);
760 }
761
762 init_mm.start_code = (unsigned long) &_text;
763 init_mm.end_code = (unsigned long) &_etext;
764 init_mm.end_data = (unsigned long) &_edata;
765 init_mm.brk = (unsigned long) &_end;
766
767 memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
768 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
769 parse_cmdline(cmdline_p, from);
770 paging_init(&meminfo, mdesc);
771 request_standard_resources(&meminfo, mdesc);
772
ccea7a19
RK
773 cpu_init();
774
1da177e4
LT
775 /*
776 * Set up various architecture-specific pointers
777 */
778 init_arch_irq = mdesc->init_irq;
779 system_timer = mdesc->timer;
780 init_machine = mdesc->init_machine;
781
782#ifdef CONFIG_VT
783#if defined(CONFIG_VGA_CONSOLE)
784 conswitchp = &vga_con;
785#elif defined(CONFIG_DUMMY_CONSOLE)
786 conswitchp = &dummy_con;
787#endif
788#endif
789}
790
791
792static int __init topology_init(void)
793{
794 int cpu;
795
796 for_each_cpu(cpu)
797 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL);
798
799 return 0;
800}
801
802subsys_initcall(topology_init);
803
804static const char *hwcap_str[] = {
805 "swp",
806 "half",
807 "thumb",
808 "26bit",
809 "fastmult",
810 "fpa",
811 "vfp",
812 "edsp",
813 "java",
814 NULL
815};
816
817static void
818c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
819{
820 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
821
822 seq_printf(m, "%s size\t\t: %d\n"
823 "%s assoc\t\t: %d\n"
824 "%s line length\t: %d\n"
825 "%s sets\t\t: %d\n",
826 type, mult << (8 + CACHE_SIZE(cache)),
827 type, (mult << CACHE_ASSOC(cache)) >> 1,
828 type, 8 << CACHE_LINE(cache),
829 type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
830 CACHE_LINE(cache)));
831}
832
833static int c_show(struct seq_file *m, void *v)
834{
835 int i;
836
837 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
838 cpu_name, (int)processor_id & 15, elf_platform);
839
840#if defined(CONFIG_SMP)
841 for_each_online_cpu(i) {
842 seq_printf(m, "Processor\t: %d\n", i);
843 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
844 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
845 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
846 }
847#else /* CONFIG_SMP */
848 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
849 loops_per_jiffy / (500000/HZ),
850 (loops_per_jiffy / (5000/HZ)) % 100);
851#endif
852
853 /* dump out the processor features */
854 seq_puts(m, "Features\t: ");
855
856 for (i = 0; hwcap_str[i]; i++)
857 if (elf_hwcap & (1 << i))
858 seq_printf(m, "%s ", hwcap_str[i]);
859
860 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
861 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
862
863 if ((processor_id & 0x0000f000) == 0x00000000) {
864 /* pre-ARM7 */
865 seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
866 } else {
867 if ((processor_id & 0x0000f000) == 0x00007000) {
868 /* ARM7 */
869 seq_printf(m, "CPU variant\t: 0x%02x\n",
870 (processor_id >> 16) & 127);
871 } else {
872 /* post-ARM7 */
873 seq_printf(m, "CPU variant\t: 0x%x\n",
874 (processor_id >> 20) & 15);
875 }
876 seq_printf(m, "CPU part\t: 0x%03x\n",
877 (processor_id >> 4) & 0xfff);
878 }
879 seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
880
881 {
882 unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
883 if (cache_info != processor_id) {
884 seq_printf(m, "Cache type\t: %s\n"
885 "Cache clean\t: %s\n"
886 "Cache lockdown\t: %s\n"
887 "Cache format\t: %s\n",
888 cache_types[CACHE_TYPE(cache_info)],
889 cache_clean[CACHE_TYPE(cache_info)],
890 cache_lockdown[CACHE_TYPE(cache_info)],
891 CACHE_S(cache_info) ? "Harvard" : "Unified");
892
893 if (CACHE_S(cache_info)) {
894 c_show_cache(m, "I", CACHE_ISIZE(cache_info));
895 c_show_cache(m, "D", CACHE_DSIZE(cache_info));
896 } else {
897 c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
898 }
899 }
900 }
901
902 seq_puts(m, "\n");
903
904 seq_printf(m, "Hardware\t: %s\n", machine_name);
905 seq_printf(m, "Revision\t: %04x\n", system_rev);
906 seq_printf(m, "Serial\t\t: %08x%08x\n",
907 system_serial_high, system_serial_low);
908
909 return 0;
910}
911
912static void *c_start(struct seq_file *m, loff_t *pos)
913{
914 return *pos < 1 ? (void *)1 : NULL;
915}
916
917static void *c_next(struct seq_file *m, void *v, loff_t *pos)
918{
919 ++*pos;
920 return NULL;
921}
922
923static void c_stop(struct seq_file *m, void *v)
924{
925}
926
927struct seq_operations cpuinfo_op = {
928 .start = c_start,
929 .next = c_next,
930 .stop = c_stop,
931 .show = c_show
932};
This page took 0.159963 seconds and 5 git commands to generate.