[PATCH] kdump: x86_64: add memmmap command line option
[deliverable/linux.git] / arch / x86_64 / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86-64/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
8 *
9 * $Id$
10 */
11
12/*
13 * This file handles the architecture-dependent parts of initialization
14 */
15
16#include <linux/errno.h>
17#include <linux/sched.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/stddef.h>
21#include <linux/unistd.h>
22#include <linux/ptrace.h>
23#include <linux/slab.h>
24#include <linux/user.h>
25#include <linux/a.out.h>
26#include <linux/tty.h>
27#include <linux/ioport.h>
28#include <linux/delay.h>
29#include <linux/config.h>
30#include <linux/init.h>
31#include <linux/initrd.h>
32#include <linux/highmem.h>
33#include <linux/bootmem.h>
34#include <linux/module.h>
35#include <asm/processor.h>
36#include <linux/console.h>
37#include <linux/seq_file.h>
38#include <linux/root_dev.h>
39#include <linux/pci.h>
40#include <linux/acpi.h>
41#include <linux/kallsyms.h>
42#include <linux/edd.h>
bbfceef4 43#include <linux/mmzone.h>
5f5609df 44#include <linux/kexec.h>
95235ca2 45#include <linux/cpufreq.h>
bbfceef4 46
1da177e4
LT
47#include <asm/mtrr.h>
48#include <asm/uaccess.h>
49#include <asm/system.h>
50#include <asm/io.h>
51#include <asm/smp.h>
52#include <asm/msr.h>
53#include <asm/desc.h>
54#include <video/edid.h>
55#include <asm/e820.h>
56#include <asm/dma.h>
57#include <asm/mpspec.h>
58#include <asm/mmu_context.h>
59#include <asm/bootsetup.h>
60#include <asm/proto.h>
61#include <asm/setup.h>
62#include <asm/mach_apic.h>
63#include <asm/numa.h>
2bc0414e 64#include <asm/sections.h>
1da177e4
LT
65
66/*
67 * Machine setup..
68 */
69
6c231b7b 70struct cpuinfo_x86 boot_cpu_data __read_mostly;
1da177e4
LT
71
72unsigned long mmu_cr4_features;
73
74int acpi_disabled;
75EXPORT_SYMBOL(acpi_disabled);
888ba6c6 76#ifdef CONFIG_ACPI
1da177e4
LT
77extern int __initdata acpi_ht;
78extern acpi_interrupt_flags acpi_sci_flags;
79int __initdata acpi_force = 0;
80#endif
81
82int acpi_numa __initdata;
83
1da177e4
LT
84/* Boot loader ID as an integer, for the benefit of proc_dointvec */
85int bootloader_type;
86
87unsigned long saved_video_mode;
88
89#ifdef CONFIG_SWIOTLB
90int swiotlb;
91EXPORT_SYMBOL(swiotlb);
92#endif
93
94/*
95 * Setup options
96 */
97struct drive_info_struct { char dummy[32]; } drive_info;
98struct screen_info screen_info;
99struct sys_desc_table_struct {
100 unsigned short length;
101 unsigned char table[0];
102};
103
104struct edid_info edid_info;
105struct e820map e820;
106
107extern int root_mountflags;
1da177e4
LT
108
109char command_line[COMMAND_LINE_SIZE];
110
111struct resource standard_io_resources[] = {
112 { .name = "dma1", .start = 0x00, .end = 0x1f,
113 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
114 { .name = "pic1", .start = 0x20, .end = 0x21,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "timer0", .start = 0x40, .end = 0x43,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "timer1", .start = 0x50, .end = 0x53,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "keyboard", .start = 0x60, .end = 0x6f,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
124 { .name = "pic2", .start = 0xa0, .end = 0xa1,
125 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
126 { .name = "dma2", .start = 0xc0, .end = 0xdf,
127 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
128 { .name = "fpu", .start = 0xf0, .end = 0xff,
129 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
130};
131
132#define STANDARD_IO_RESOURCES \
133 (sizeof standard_io_resources / sizeof standard_io_resources[0])
134
135#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
136
137struct resource data_resource = {
138 .name = "Kernel data",
139 .start = 0,
140 .end = 0,
141 .flags = IORESOURCE_RAM,
142};
143struct resource code_resource = {
144 .name = "Kernel code",
145 .start = 0,
146 .end = 0,
147 .flags = IORESOURCE_RAM,
148};
149
150#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
151
152static struct resource system_rom_resource = {
153 .name = "System ROM",
154 .start = 0xf0000,
155 .end = 0xfffff,
156 .flags = IORESOURCE_ROM,
157};
158
159static struct resource extension_rom_resource = {
160 .name = "Extension ROM",
161 .start = 0xe0000,
162 .end = 0xeffff,
163 .flags = IORESOURCE_ROM,
164};
165
166static struct resource adapter_rom_resources[] = {
167 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
168 .flags = IORESOURCE_ROM },
169 { .name = "Adapter ROM", .start = 0, .end = 0,
170 .flags = IORESOURCE_ROM },
171 { .name = "Adapter ROM", .start = 0, .end = 0,
172 .flags = IORESOURCE_ROM },
173 { .name = "Adapter ROM", .start = 0, .end = 0,
174 .flags = IORESOURCE_ROM },
175 { .name = "Adapter ROM", .start = 0, .end = 0,
176 .flags = IORESOURCE_ROM },
177 { .name = "Adapter ROM", .start = 0, .end = 0,
178 .flags = IORESOURCE_ROM }
179};
180
181#define ADAPTER_ROM_RESOURCES \
182 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
183
184static struct resource video_rom_resource = {
185 .name = "Video ROM",
186 .start = 0xc0000,
187 .end = 0xc7fff,
188 .flags = IORESOURCE_ROM,
189};
190
191static struct resource video_ram_resource = {
192 .name = "Video RAM area",
193 .start = 0xa0000,
194 .end = 0xbffff,
195 .flags = IORESOURCE_RAM,
196};
197
198#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
199
200static int __init romchecksum(unsigned char *rom, unsigned long length)
201{
202 unsigned char *p, sum = 0;
203
204 for (p = rom; p < rom + length; p++)
205 sum += *p;
206 return sum == 0;
207}
208
209static void __init probe_roms(void)
210{
211 unsigned long start, length, upper;
212 unsigned char *rom;
213 int i;
214
215 /* video rom */
216 upper = adapter_rom_resources[0].start;
217 for (start = video_rom_resource.start; start < upper; start += 2048) {
218 rom = isa_bus_to_virt(start);
219 if (!romsignature(rom))
220 continue;
221
222 video_rom_resource.start = start;
223
224 /* 0 < length <= 0x7f * 512, historically */
225 length = rom[2] * 512;
226
227 /* if checksum okay, trust length byte */
228 if (length && romchecksum(rom, length))
229 video_rom_resource.end = start + length - 1;
230
231 request_resource(&iomem_resource, &video_rom_resource);
232 break;
233 }
234
235 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
236 if (start < upper)
237 start = upper;
238
239 /* system rom */
240 request_resource(&iomem_resource, &system_rom_resource);
241 upper = system_rom_resource.start;
242
243 /* check for extension rom (ignore length byte!) */
244 rom = isa_bus_to_virt(extension_rom_resource.start);
245 if (romsignature(rom)) {
246 length = extension_rom_resource.end - extension_rom_resource.start + 1;
247 if (romchecksum(rom, length)) {
248 request_resource(&iomem_resource, &extension_rom_resource);
249 upper = extension_rom_resource.start;
250 }
251 }
252
253 /* check for adapter roms on 2k boundaries */
254 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
255 rom = isa_bus_to_virt(start);
256 if (!romsignature(rom))
257 continue;
258
259 /* 0 < length <= 0x7f * 512, historically */
260 length = rom[2] * 512;
261
262 /* but accept any length that fits if checksum okay */
263 if (!length || start + length > upper || !romchecksum(rom, length))
264 continue;
265
266 adapter_rom_resources[i].start = start;
267 adapter_rom_resources[i].end = start + length - 1;
268 request_resource(&iomem_resource, &adapter_rom_resources[i]);
269
270 start = adapter_rom_resources[i++].end & ~2047UL;
271 }
272}
273
274static __init void parse_cmdline_early (char ** cmdline_p)
275{
276 char c = ' ', *to = command_line, *from = COMMAND_LINE;
277 int len = 0;
69cda7b1 278 int userdef = 0;
1da177e4
LT
279
280 /* Save unparsed command line copy for /proc/cmdline */
281 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
282 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
283
284 for (;;) {
285 if (c != ' ')
286 goto next_char;
287
288#ifdef CONFIG_SMP
289 /*
290 * If the BIOS enumerates physical processors before logical,
291 * maxcpus=N at enumeration-time can be used to disable HT.
292 */
293 else if (!memcmp(from, "maxcpus=", 8)) {
294 extern unsigned int maxcpus;
295
296 maxcpus = simple_strtoul(from + 8, NULL, 0);
297 }
298#endif
888ba6c6 299#ifdef CONFIG_ACPI
1da177e4
LT
300 /* "acpi=off" disables both ACPI table parsing and interpreter init */
301 if (!memcmp(from, "acpi=off", 8))
302 disable_acpi();
303
304 if (!memcmp(from, "acpi=force", 10)) {
305 /* add later when we do DMI horrors: */
306 acpi_force = 1;
307 acpi_disabled = 0;
308 }
309
310 /* acpi=ht just means: do ACPI MADT parsing
311 at bootup, but don't enable the full ACPI interpreter */
312 if (!memcmp(from, "acpi=ht", 7)) {
313 if (!acpi_force)
314 disable_acpi();
315 acpi_ht = 1;
316 }
317 else if (!memcmp(from, "pci=noacpi", 10))
318 acpi_disable_pci();
319 else if (!memcmp(from, "acpi=noirq", 10))
320 acpi_noirq_set();
321
322 else if (!memcmp(from, "acpi_sci=edge", 13))
323 acpi_sci_flags.trigger = 1;
324 else if (!memcmp(from, "acpi_sci=level", 14))
325 acpi_sci_flags.trigger = 3;
326 else if (!memcmp(from, "acpi_sci=high", 13))
327 acpi_sci_flags.polarity = 1;
328 else if (!memcmp(from, "acpi_sci=low", 12))
329 acpi_sci_flags.polarity = 3;
330
331 /* acpi=strict disables out-of-spec workarounds */
332 else if (!memcmp(from, "acpi=strict", 11)) {
333 acpi_strict = 1;
334 }
22999244
AK
335#ifdef CONFIG_X86_IO_APIC
336 else if (!memcmp(from, "acpi_skip_timer_override", 24))
337 acpi_skip_timer_override = 1;
338#endif
1da177e4
LT
339#endif
340
66759a01
CE
341 if (!memcmp(from, "disable_timer_pin_1", 19))
342 disable_timer_pin_1 = 1;
343 if (!memcmp(from, "enable_timer_pin_1", 18))
344 disable_timer_pin_1 = -1;
345
1da177e4
LT
346 if (!memcmp(from, "nolapic", 7) ||
347 !memcmp(from, "disableapic", 11))
348 disable_apic = 1;
349
350 if (!memcmp(from, "noapic", 6))
351 skip_ioapic_setup = 1;
352
353 if (!memcmp(from, "apic", 4)) {
354 skip_ioapic_setup = 0;
355 ioapic_force = 1;
356 }
357
358 if (!memcmp(from, "mem=", 4))
359 parse_memopt(from+4, &from);
360
69cda7b1 361 if (!memcmp(from, "memmap=", 7)) {
362 /* exactmap option is for used defined memory */
363 if (!memcmp(from+7, "exactmap", 8)) {
364#ifdef CONFIG_CRASH_DUMP
365 /* If we are doing a crash dump, we
366 * still need to know the real mem
367 * size before original memory map is
368 * reset.
369 */
370 saved_max_pfn = e820_end_of_ram();
371#endif
372 from += 8+7;
373 end_pfn_map = 0;
374 e820.nr_map = 0;
375 userdef = 1;
376 }
377 else {
378 parse_memmapopt(from+7, &from);
379 userdef = 1;
380 }
381 }
382
2b97690f 383#ifdef CONFIG_NUMA
1da177e4
LT
384 if (!memcmp(from, "numa=", 5))
385 numa_setup(from+5);
386#endif
387
388#ifdef CONFIG_GART_IOMMU
389 if (!memcmp(from,"iommu=",6)) {
390 iommu_setup(from+6);
391 }
392#endif
393
394 if (!memcmp(from,"oops=panic", 10))
395 panic_on_oops = 1;
396
397 if (!memcmp(from, "noexec=", 7))
398 nonx_setup(from + 7);
399
5f5609df
EB
400#ifdef CONFIG_KEXEC
401 /* crashkernel=size@addr specifies the location to reserve for
402 * a crash kernel. By reserving this memory we guarantee
403 * that linux never set's it up as a DMA target.
404 * Useful for holding code to do something appropriate
405 * after a kernel panic.
406 */
407 else if (!memcmp(from, "crashkernel=", 12)) {
408 unsigned long size, base;
409 size = memparse(from+12, &from);
410 if (*from == '@') {
411 base = memparse(from+1, &from);
412 /* FIXME: Do I want a sanity check
413 * to validate the memory range?
414 */
415 crashk_res.start = base;
416 crashk_res.end = base + size - 1;
417 }
418 }
419#endif
420
1da177e4
LT
421 next_char:
422 c = *(from++);
423 if (!c)
424 break;
425 if (COMMAND_LINE_SIZE <= ++len)
426 break;
427 *(to++) = c;
428 }
69cda7b1 429 if (userdef) {
430 printk(KERN_INFO "user-defined physical RAM map:\n");
431 e820_print_map("user");
432 }
1da177e4
LT
433 *to = '\0';
434 *cmdline_p = command_line;
435}
436
2b97690f 437#ifndef CONFIG_NUMA
bbfceef4
MT
438static void __init
439contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
1da177e4 440{
bbfceef4
MT
441 unsigned long bootmap_size, bootmap;
442
bbfceef4
MT
443 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
444 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
445 if (bootmap == -1L)
446 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
447 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
448 e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
449 reserve_bootmem(bootmap, bootmap_size);
1da177e4
LT
450}
451#endif
452
453/* Use inline assembly to define this because the nops are defined
454 as inline assembly strings in the include files and we cannot
455 get them easily into strings. */
456asm("\t.data\nk8nops: "
457 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
458 K8_NOP7 K8_NOP8);
459
460extern unsigned char k8nops[];
461static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
462 NULL,
463 k8nops,
464 k8nops + 1,
465 k8nops + 1 + 2,
466 k8nops + 1 + 2 + 3,
467 k8nops + 1 + 2 + 3 + 4,
468 k8nops + 1 + 2 + 3 + 4 + 5,
469 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
470 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
471};
472
473/* Replace instructions with better alternatives for this CPU type.
474
475 This runs before SMP is initialized to avoid SMP problems with
476 self modifying code. This implies that assymetric systems where
477 APs have less capabilities than the boot processor are not handled.
478 In this case boot with "noreplacement". */
479void apply_alternatives(void *start, void *end)
480{
481 struct alt_instr *a;
482 int diff, i, k;
483 for (a = start; (void *)a < end; a++) {
484 if (!boot_cpu_has(a->cpuid))
485 continue;
486
487 BUG_ON(a->replacementlen > a->instrlen);
488 __inline_memcpy(a->instr, a->replacement, a->replacementlen);
489 diff = a->instrlen - a->replacementlen;
490
491 /* Pad the rest with nops */
492 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
493 k = diff;
494 if (k > ASM_NOP_MAX)
495 k = ASM_NOP_MAX;
496 __inline_memcpy(a->instr + i, k8_nops[k], k);
497 }
498 }
499}
500
501static int no_replacement __initdata = 0;
502
503void __init alternative_instructions(void)
504{
505 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
506 if (no_replacement)
507 return;
508 apply_alternatives(__alt_instructions, __alt_instructions_end);
509}
510
511static int __init noreplacement_setup(char *s)
512{
513 no_replacement = 1;
514 return 0;
515}
516
517__setup("noreplacement", noreplacement_setup);
518
519#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
520struct edd edd;
521#ifdef CONFIG_EDD_MODULE
522EXPORT_SYMBOL(edd);
523#endif
524/**
525 * copy_edd() - Copy the BIOS EDD information
526 * from boot_params into a safe place.
527 *
528 */
529static inline void copy_edd(void)
530{
531 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
532 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
533 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
534 edd.edd_info_nr = EDD_NR;
535}
536#else
537static inline void copy_edd(void)
538{
539}
540#endif
541
542#define EBDA_ADDR_POINTER 0x40E
543static void __init reserve_ebda_region(void)
544{
545 unsigned int addr;
546 /**
547 * there is a real-mode segmented pointer pointing to the
548 * 4K EBDA area at 0x40E
549 */
550 addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
551 addr <<= 4;
552 if (addr)
553 reserve_bootmem_generic(addr, PAGE_SIZE);
554}
555
556void __init setup_arch(char **cmdline_p)
557{
1da177e4
LT
558 unsigned long kernel_end;
559
560 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
561 drive_info = DRIVE_INFO;
562 screen_info = SCREEN_INFO;
563 edid_info = EDID_INFO;
564 saved_video_mode = SAVED_VIDEO_MODE;
565 bootloader_type = LOADER_TYPE;
566
567#ifdef CONFIG_BLK_DEV_RAM
568 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
569 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
570 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
571#endif
572 setup_memory_region();
573 copy_edd();
574
575 if (!MOUNT_ROOT_RDONLY)
576 root_mountflags &= ~MS_RDONLY;
577 init_mm.start_code = (unsigned long) &_text;
578 init_mm.end_code = (unsigned long) &_etext;
579 init_mm.end_data = (unsigned long) &_edata;
580 init_mm.brk = (unsigned long) &_end;
581
582 code_resource.start = virt_to_phys(&_text);
583 code_resource.end = virt_to_phys(&_etext)-1;
584 data_resource.start = virt_to_phys(&_etext);
585 data_resource.end = virt_to_phys(&_edata)-1;
586
587 parse_cmdline_early(cmdline_p);
588
589 early_identify_cpu(&boot_cpu_data);
590
591 /*
592 * partially used pages are not usable - thus
593 * we are rounding upwards:
594 */
595 end_pfn = e820_end_of_ram();
596
597 check_efer();
598
599 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
600
f6c2e333
SS
601 zap_low_mappings(0);
602
888ba6c6 603#ifdef CONFIG_ACPI
1da177e4
LT
604 /*
605 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
606 * Call this early for SRAT node setup.
607 */
608 acpi_boot_table_init();
609#endif
610
611#ifdef CONFIG_ACPI_NUMA
612 /*
613 * Parse SRAT to discover nodes.
614 */
615 acpi_numa_init();
616#endif
617
2b97690f 618#ifdef CONFIG_NUMA
1da177e4
LT
619 numa_initmem_init(0, end_pfn);
620#else
bbfceef4 621 contig_initmem_init(0, end_pfn);
1da177e4
LT
622#endif
623
624 /* Reserve direct mapping */
625 reserve_bootmem_generic(table_start << PAGE_SHIFT,
626 (table_end - table_start) << PAGE_SHIFT);
627
628 /* reserve kernel */
629 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
630 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
631
632 /*
633 * reserve physical page 0 - it's a special BIOS page on many boxes,
634 * enabling clean reboots, SMP operation, laptop functions.
635 */
636 reserve_bootmem_generic(0, PAGE_SIZE);
637
638 /* reserve ebda region */
639 reserve_ebda_region();
640
641#ifdef CONFIG_SMP
642 /*
643 * But first pinch a few for the stack/trampoline stuff
644 * FIXME: Don't need the extra page at 4K, but need to fix
645 * trampoline before removing it. (see the GDT stuff)
646 */
647 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
648
649 /* Reserve SMP trampoline */
650 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
651#endif
652
653#ifdef CONFIG_ACPI_SLEEP
654 /*
655 * Reserve low memory region for sleep support.
656 */
657 acpi_reserve_bootmem();
658#endif
659#ifdef CONFIG_X86_LOCAL_APIC
660 /*
661 * Find and reserve possible boot-time SMP configuration:
662 */
663 find_smp_config();
664#endif
665#ifdef CONFIG_BLK_DEV_INITRD
666 if (LOADER_TYPE && INITRD_START) {
667 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
668 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
669 initrd_start =
670 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
671 initrd_end = initrd_start+INITRD_SIZE;
672 }
673 else {
674 printk(KERN_ERR "initrd extends beyond end of memory "
675 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
676 (unsigned long)(INITRD_START + INITRD_SIZE),
677 (unsigned long)(end_pfn << PAGE_SHIFT));
678 initrd_start = 0;
679 }
680 }
681#endif
5f5609df
EB
682#ifdef CONFIG_KEXEC
683 if (crashk_res.start != crashk_res.end) {
684 reserve_bootmem(crashk_res.start,
685 crashk_res.end - crashk_res.start + 1);
686 }
687#endif
0d317fb7 688
1da177e4
LT
689 paging_init();
690
691 check_ioapic();
692
888ba6c6 693#ifdef CONFIG_ACPI
1da177e4
LT
694 /*
695 * Read APIC and some other early information from ACPI tables.
696 */
697 acpi_boot_init();
698#endif
699
700#ifdef CONFIG_X86_LOCAL_APIC
701 /*
702 * get boot-time SMP configuration:
703 */
704 if (smp_found_config)
705 get_smp_config();
706 init_apic_mappings();
707#endif
708
709 /*
710 * Request address space for all standard RAM and ROM resources
711 * and also for regions reported as reserved by the e820.
712 */
713 probe_roms();
714 e820_reserve_resources();
715
716 request_resource(&iomem_resource, &video_ram_resource);
717
718 {
719 unsigned i;
720 /* request I/O space for devices used on all i[345]86 PCs */
721 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
722 request_resource(&ioport_resource, &standard_io_resources[i]);
723 }
724
a1e97782 725 e820_setup_gap();
1da177e4
LT
726
727#ifdef CONFIG_GART_IOMMU
728 iommu_hole_init();
729#endif
730
731#ifdef CONFIG_VT
732#if defined(CONFIG_VGA_CONSOLE)
733 conswitchp = &vga_con;
734#elif defined(CONFIG_DUMMY_CONSOLE)
735 conswitchp = &dummy_con;
736#endif
737#endif
738}
739
e6982c67 740static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
741{
742 unsigned int *v;
743
ebfcaa96 744 if (c->extended_cpuid_level < 0x80000004)
1da177e4
LT
745 return 0;
746
747 v = (unsigned int *) c->x86_model_id;
748 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
749 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
750 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
751 c->x86_model_id[48] = 0;
752 return 1;
753}
754
755
e6982c67 756static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
757{
758 unsigned int n, dummy, eax, ebx, ecx, edx;
759
ebfcaa96 760 n = c->extended_cpuid_level;
1da177e4
LT
761
762 if (n >= 0x80000005) {
763 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
764 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
765 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
766 c->x86_cache_size=(ecx>>24)+(edx>>24);
767 /* On K8 L1 TLB is inclusive, so don't count it */
768 c->x86_tlbsize = 0;
769 }
770
771 if (n >= 0x80000006) {
772 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
773 ecx = cpuid_ecx(0x80000006);
774 c->x86_cache_size = ecx >> 16;
775 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
776
777 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
778 c->x86_cache_size, ecx & 0xFF);
779 }
780
781 if (n >= 0x80000007)
782 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
783 if (n >= 0x80000008) {
784 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
785 c->x86_virt_bits = (eax >> 8) & 0xff;
786 c->x86_phys_bits = eax & 0xff;
787 }
788}
789
3f098c26
AK
790#ifdef CONFIG_NUMA
791static int nearby_node(int apicid)
792{
793 int i;
794 for (i = apicid - 1; i >= 0; i--) {
795 int node = apicid_to_node[i];
796 if (node != NUMA_NO_NODE && node_online(node))
797 return node;
798 }
799 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
800 int node = apicid_to_node[i];
801 if (node != NUMA_NO_NODE && node_online(node))
802 return node;
803 }
804 return first_node(node_online_map); /* Shouldn't happen */
805}
806#endif
807
63518644
AK
808/*
809 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
810 * Assumes number of cores is a power of two.
811 */
812static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
813{
814#ifdef CONFIG_SMP
2942283e 815 int cpu = smp_processor_id();
b41e2939 816 unsigned bits;
3f098c26
AK
817#ifdef CONFIG_NUMA
818 int node = 0;
0b07e984 819 unsigned apicid = phys_proc_id[cpu];
3f098c26 820#endif
b41e2939
AK
821
822 bits = 0;
94605eff 823 while ((1 << bits) < c->x86_max_cores)
b41e2939
AK
824 bits++;
825
826 /* Low order bits define the core id (index of core in socket) */
827 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
828 /* Convert the APIC ID into the socket ID */
829 phys_proc_id[cpu] >>= bits;
63518644
AK
830
831#ifdef CONFIG_NUMA
3f098c26
AK
832 node = phys_proc_id[cpu];
833 if (apicid_to_node[apicid] != NUMA_NO_NODE)
834 node = apicid_to_node[apicid];
835 if (!node_online(node)) {
836 /* Two possibilities here:
837 - The CPU is missing memory and no node was created.
838 In that case try picking one from a nearby CPU
839 - The APIC IDs differ from the HyperTransport node IDs
840 which the K8 northbridge parsing fills in.
841 Assume they are all increased by a constant offset,
842 but in the same order as the HT nodeids.
843 If that doesn't result in a usable node fall back to the
844 path for the previous case. */
845 int ht_nodeid = apicid - (phys_proc_id[0] << bits);
846 if (ht_nodeid >= 0 &&
847 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
848 node = apicid_to_node[ht_nodeid];
849 /* Pick a nearby node */
850 if (!node_online(node))
851 node = nearby_node(apicid);
852 }
69d81fcd 853 numa_set_node(cpu, node);
3f098c26
AK
854
855 printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
94605eff 856 cpu, c->x86_max_cores, node, cpu_core_id[cpu]);
63518644 857#endif
63518644
AK
858#endif
859}
1da177e4
LT
860
861static int __init init_amd(struct cpuinfo_x86 *c)
862{
863 int r;
864 int level;
1da177e4 865
bc5e8fdf
LT
866#ifdef CONFIG_SMP
867 unsigned long value;
868
7d318d77
AK
869 /*
870 * Disable TLB flush filter by setting HWCR.FFDIS on K8
871 * bit 6 of msr C001_0015
872 *
873 * Errata 63 for SH-B3 steppings
874 * Errata 122 for all steppings (F+ have it disabled by default)
875 */
876 if (c->x86 == 15) {
877 rdmsrl(MSR_K8_HWCR, value);
878 value |= 1 << 6;
879 wrmsrl(MSR_K8_HWCR, value);
880 }
bc5e8fdf
LT
881#endif
882
1da177e4
LT
883 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
884 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
885 clear_bit(0*32+31, &c->x86_capability);
886
887 /* C-stepping K8? */
888 level = cpuid_eax(1);
889 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
890 set_bit(X86_FEATURE_K8_C, &c->x86_capability);
891
892 r = get_model_name(c);
893 if (!r) {
894 switch (c->x86) {
895 case 15:
896 /* Should distinguish Models here, but this is only
897 a fallback anyways. */
898 strcpy(c->x86_model_id, "Hammer");
899 break;
900 }
901 }
902 display_cacheinfo(c);
903
ebfcaa96 904 if (c->extended_cpuid_level >= 0x80000008) {
94605eff
SS
905 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
906 if (c->x86_max_cores & (c->x86_max_cores - 1))
907 c->x86_max_cores = 1;
1da177e4 908
63518644 909 amd_detect_cmp(c);
1da177e4
LT
910 }
911
912 return r;
913}
914
e6982c67 915static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4
LT
916{
917#ifdef CONFIG_SMP
918 u32 eax, ebx, ecx, edx;
94605eff 919 int index_msb, core_bits;
1da177e4 920 int cpu = smp_processor_id();
94605eff
SS
921
922 cpuid(1, &eax, &ebx, &ecx, &edx);
923
924 c->apicid = phys_pkg_id(0);
925
63518644 926 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
1da177e4
LT
927 return;
928
1da177e4 929 smp_num_siblings = (ebx & 0xff0000) >> 16;
94605eff 930
1da177e4
LT
931 if (smp_num_siblings == 1) {
932 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
94605eff
SS
933 } else if (smp_num_siblings > 1 ) {
934
1da177e4
LT
935 if (smp_num_siblings > NR_CPUS) {
936 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
937 smp_num_siblings = 1;
938 return;
939 }
94605eff
SS
940
941 index_msb = get_count_order(smp_num_siblings);
1da177e4 942 phys_proc_id[cpu] = phys_pkg_id(index_msb);
94605eff 943
1da177e4
LT
944 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
945 phys_proc_id[cpu]);
3dd9d514 946
94605eff 947 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
3dd9d514 948
94605eff
SS
949 index_msb = get_count_order(smp_num_siblings) ;
950
951 core_bits = get_count_order(c->x86_max_cores);
3dd9d514 952
94605eff
SS
953 cpu_core_id[cpu] = phys_pkg_id(index_msb) &
954 ((1 << core_bits) - 1);
3dd9d514 955
94605eff 956 if (c->x86_max_cores > 1)
3dd9d514
AK
957 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
958 cpu_core_id[cpu]);
1da177e4
LT
959 }
960#endif
961}
962
3dd9d514
AK
963/*
964 * find out the number of processor cores on the die
965 */
e6982c67 966static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514
AK
967{
968 unsigned int eax;
969
970 if (c->cpuid_level < 4)
971 return 1;
972
973 __asm__("cpuid"
974 : "=a" (eax)
975 : "0" (4), "c" (0)
976 : "bx", "dx");
977
978 if (eax & 0x1f)
979 return ((eax >> 26) + 1);
980 else
981 return 1;
982}
983
df0cc26b
AK
984static void srat_detect_node(void)
985{
986#ifdef CONFIG_NUMA
ddea7be0 987 unsigned node;
df0cc26b
AK
988 int cpu = smp_processor_id();
989
990 /* Don't do the funky fallback heuristics the AMD version employs
991 for now. */
ddea7be0 992 node = apicid_to_node[hard_smp_processor_id()];
df0cc26b
AK
993 if (node == NUMA_NO_NODE)
994 node = 0;
69d81fcd 995 numa_set_node(cpu, node);
df0cc26b
AK
996
997 if (acpi_numa > 0)
998 printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node);
999#endif
1000}
1001
e6982c67 1002static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
1003{
1004 /* Cache sizes */
1005 unsigned n;
1006
1007 init_intel_cacheinfo(c);
ebfcaa96 1008 n = c->extended_cpuid_level;
1da177e4
LT
1009 if (n >= 0x80000008) {
1010 unsigned eax = cpuid_eax(0x80000008);
1011 c->x86_virt_bits = (eax >> 8) & 0xff;
1012 c->x86_phys_bits = eax & 0xff;
af9c142d
SL
1013 /* CPUID workaround for Intel 0F34 CPU */
1014 if (c->x86_vendor == X86_VENDOR_INTEL &&
1015 c->x86 == 0xF && c->x86_model == 0x3 &&
1016 c->x86_mask == 0x4)
1017 c->x86_phys_bits = 36;
1da177e4
LT
1018 }
1019
1020 if (c->x86 == 15)
1021 c->x86_cache_alignment = c->x86_clflush_size * 2;
c29601e9
AK
1022 if (c->x86 >= 15)
1023 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
94605eff 1024 c->x86_max_cores = intel_num_cpu_cores(c);
df0cc26b
AK
1025
1026 srat_detect_node();
1da177e4
LT
1027}
1028
672289e9 1029static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
1030{
1031 char *v = c->x86_vendor_id;
1032
1033 if (!strcmp(v, "AuthenticAMD"))
1034 c->x86_vendor = X86_VENDOR_AMD;
1035 else if (!strcmp(v, "GenuineIntel"))
1036 c->x86_vendor = X86_VENDOR_INTEL;
1037 else
1038 c->x86_vendor = X86_VENDOR_UNKNOWN;
1039}
1040
1041struct cpu_model_info {
1042 int vendor;
1043 int family;
1044 char *model_names[16];
1045};
1046
1047/* Do some early cpuid on the boot CPU to get some parameter that are
1048 needed before check_bugs. Everything advanced is in identify_cpu
1049 below. */
e6982c67 1050void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
1051{
1052 u32 tfms;
1053
1054 c->loops_per_jiffy = loops_per_jiffy;
1055 c->x86_cache_size = -1;
1056 c->x86_vendor = X86_VENDOR_UNKNOWN;
1057 c->x86_model = c->x86_mask = 0; /* So far unknown... */
1058 c->x86_vendor_id[0] = '\0'; /* Unset */
1059 c->x86_model_id[0] = '\0'; /* Unset */
1060 c->x86_clflush_size = 64;
1061 c->x86_cache_alignment = c->x86_clflush_size;
94605eff 1062 c->x86_max_cores = 1;
ebfcaa96 1063 c->extended_cpuid_level = 0;
1da177e4
LT
1064 memset(&c->x86_capability, 0, sizeof c->x86_capability);
1065
1066 /* Get vendor name */
1067 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1068 (unsigned int *)&c->x86_vendor_id[0],
1069 (unsigned int *)&c->x86_vendor_id[8],
1070 (unsigned int *)&c->x86_vendor_id[4]);
1071
1072 get_cpu_vendor(c);
1073
1074 /* Initialize the standard set of capabilities */
1075 /* Note that the vendor-specific code below might override */
1076
1077 /* Intel-defined flags: level 0x00000001 */
1078 if (c->cpuid_level >= 0x00000001) {
1079 __u32 misc;
1080 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1081 &c->x86_capability[0]);
1082 c->x86 = (tfms >> 8) & 0xf;
1083 c->x86_model = (tfms >> 4) & 0xf;
1084 c->x86_mask = tfms & 0xf;
f5f786d0 1085 if (c->x86 == 0xf)
1da177e4 1086 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 1087 if (c->x86 >= 0x6)
1da177e4 1088 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1da177e4
LT
1089 if (c->x86_capability[0] & (1<<19))
1090 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1da177e4
LT
1091 } else {
1092 /* Have CPUID level 0 only - unheard of */
1093 c->x86 = 4;
1094 }
a158608b
AK
1095
1096#ifdef CONFIG_SMP
b41e2939 1097 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
a158608b 1098#endif
1da177e4
LT
1099}
1100
1101/*
1102 * This does the hard work of actually picking apart the CPU stuff...
1103 */
e6982c67 1104void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
1105{
1106 int i;
1107 u32 xlvl;
1108
1109 early_identify_cpu(c);
1110
1111 /* AMD-defined flags: level 0x80000001 */
1112 xlvl = cpuid_eax(0x80000000);
ebfcaa96 1113 c->extended_cpuid_level = xlvl;
1da177e4
LT
1114 if ((xlvl & 0xffff0000) == 0x80000000) {
1115 if (xlvl >= 0x80000001) {
1116 c->x86_capability[1] = cpuid_edx(0x80000001);
5b7abc6f 1117 c->x86_capability[6] = cpuid_ecx(0x80000001);
1da177e4
LT
1118 }
1119 if (xlvl >= 0x80000004)
1120 get_model_name(c); /* Default name */
1121 }
1122
1123 /* Transmeta-defined flags: level 0x80860001 */
1124 xlvl = cpuid_eax(0x80860000);
1125 if ((xlvl & 0xffff0000) == 0x80860000) {
1126 /* Don't set x86_cpuid_level here for now to not confuse. */
1127 if (xlvl >= 0x80860001)
1128 c->x86_capability[2] = cpuid_edx(0x80860001);
1129 }
1130
1131 /*
1132 * Vendor-specific initialization. In this section we
1133 * canonicalize the feature flags, meaning if there are
1134 * features a certain CPU supports which CPUID doesn't
1135 * tell us, CPUID claiming incorrect flags, or other bugs,
1136 * we handle them here.
1137 *
1138 * At the end of this section, c->x86_capability better
1139 * indicate the features this CPU genuinely supports!
1140 */
1141 switch (c->x86_vendor) {
1142 case X86_VENDOR_AMD:
1143 init_amd(c);
1144 break;
1145
1146 case X86_VENDOR_INTEL:
1147 init_intel(c);
1148 break;
1149
1150 case X86_VENDOR_UNKNOWN:
1151 default:
1152 display_cacheinfo(c);
1153 break;
1154 }
1155
1156 select_idle_routine(c);
1157 detect_ht(c);
1da177e4
LT
1158
1159 /*
1160 * On SMP, boot_cpu_data holds the common feature set between
1161 * all CPUs; so make sure that we indicate which features are
1162 * common between the CPUs. The first time this routine gets
1163 * executed, c == &boot_cpu_data.
1164 */
1165 if (c != &boot_cpu_data) {
1166 /* AND the already accumulated flags with these */
1167 for (i = 0 ; i < NCAPINTS ; i++)
1168 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1169 }
1170
1171#ifdef CONFIG_X86_MCE
1172 mcheck_init(c);
1173#endif
3b520b23
SL
1174 if (c == &boot_cpu_data)
1175 mtrr_bp_init();
1176 else
1177 mtrr_ap_init();
1da177e4 1178#ifdef CONFIG_NUMA
3019e8eb 1179 numa_add_cpu(smp_processor_id());
1da177e4
LT
1180#endif
1181}
1182
1183
e6982c67 1184void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
1185{
1186 if (c->x86_model_id[0])
1187 printk("%s", c->x86_model_id);
1188
1189 if (c->x86_mask || c->cpuid_level >= 0)
1190 printk(" stepping %02x\n", c->x86_mask);
1191 else
1192 printk("\n");
1193}
1194
1195/*
1196 * Get CPU information for use by the procfs.
1197 */
1198
1199static int show_cpuinfo(struct seq_file *m, void *v)
1200{
1201 struct cpuinfo_x86 *c = v;
1202
1203 /*
1204 * These flag bits must match the definitions in <asm/cpufeature.h>.
1205 * NULL means this bit is undefined or reserved; either way it doesn't
1206 * have meaning as far as Linux is concerned. Note that it's important
1207 * to realize there is a difference between this table and CPUID -- if
1208 * applications want to get the raw CPUID data, they should access
1209 * /dev/cpu/<cpu_nr>/cpuid instead.
1210 */
1211 static char *x86_cap_flags[] = {
1212 /* Intel-defined */
1213 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1214 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1215 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1216 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1217
1218 /* AMD-defined */
3c3b73b6 1219 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1220 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1221 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1222 NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
1223
1224 /* Transmeta-defined */
1225 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1226 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1227 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1228 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1229
1230 /* Other (Linux-defined) */
622dcaf9 1231 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
c29601e9 1232 "constant_tsc", NULL, NULL,
1da177e4
LT
1233 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1234 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1235 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1236
1237 /* Intel-defined (#2) */
daedb82d 1238 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est",
1da177e4
LT
1239 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1240 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1241 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1242
5b7abc6f
PA
1243 /* VIA/Cyrix/Centaur-defined */
1244 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1245 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1246 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1247 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1248
1da177e4
LT
1249 /* AMD-defined (#2) */
1250 "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL,
1251 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1252 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
5b7abc6f 1253 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1254 };
1255 static char *x86_power_flags[] = {
1256 "ts", /* temperature sensor */
1257 "fid", /* frequency id control */
1258 "vid", /* voltage id control */
1259 "ttp", /* thermal trip */
1260 "tm",
1261 "stc"
1262 };
1263
1264
1265#ifdef CONFIG_SMP
1266 if (!cpu_online(c-cpu_data))
1267 return 0;
1268#endif
1269
1270 seq_printf(m,"processor\t: %u\n"
1271 "vendor_id\t: %s\n"
1272 "cpu family\t: %d\n"
1273 "model\t\t: %d\n"
1274 "model name\t: %s\n",
1275 (unsigned)(c-cpu_data),
1276 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1277 c->x86,
1278 (int)c->x86_model,
1279 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1280
1281 if (c->x86_mask || c->cpuid_level >= 0)
1282 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1283 else
1284 seq_printf(m, "stepping\t: unknown\n");
1285
1286 if (cpu_has(c,X86_FEATURE_TSC)) {
95235ca2
VP
1287 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1288 if (!freq)
1289 freq = cpu_khz;
1da177e4 1290 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
95235ca2 1291 freq / 1000, (freq % 1000));
1da177e4
LT
1292 }
1293
1294 /* Cache size */
1295 if (c->x86_cache_size >= 0)
1296 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1297
1298#ifdef CONFIG_SMP
94605eff 1299 if (smp_num_siblings * c->x86_max_cores > 1) {
db468681
AK
1300 int cpu = c - cpu_data;
1301 seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
94605eff 1302 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
d31ddaa1 1303 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
94605eff 1304 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
db468681 1305 }
1da177e4
LT
1306#endif
1307
1308 seq_printf(m,
1309 "fpu\t\t: yes\n"
1310 "fpu_exception\t: yes\n"
1311 "cpuid level\t: %d\n"
1312 "wp\t\t: yes\n"
1313 "flags\t\t:",
1314 c->cpuid_level);
1315
1316 {
1317 int i;
1318 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1319 if ( test_bit(i, &c->x86_capability) &&
1320 x86_cap_flags[i] != NULL )
1321 seq_printf(m, " %s", x86_cap_flags[i]);
1322 }
1323
1324 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1325 c->loops_per_jiffy/(500000/HZ),
1326 (c->loops_per_jiffy/(5000/HZ)) % 100);
1327
1328 if (c->x86_tlbsize > 0)
1329 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1330 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1331 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1332
1333 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1334 c->x86_phys_bits, c->x86_virt_bits);
1335
1336 seq_printf(m, "power management:");
1337 {
1338 unsigned i;
1339 for (i = 0; i < 32; i++)
1340 if (c->x86_power & (1 << i)) {
1341 if (i < ARRAY_SIZE(x86_power_flags))
1342 seq_printf(m, " %s", x86_power_flags[i]);
1343 else
1344 seq_printf(m, " [%d]", i);
1345 }
1346 }
1da177e4 1347
d31ddaa1 1348 seq_printf(m, "\n\n");
1da177e4
LT
1349
1350 return 0;
1351}
1352
1353static void *c_start(struct seq_file *m, loff_t *pos)
1354{
1355 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1356}
1357
1358static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1359{
1360 ++*pos;
1361 return c_start(m, pos);
1362}
1363
1364static void c_stop(struct seq_file *m, void *v)
1365{
1366}
1367
1368struct seq_operations cpuinfo_op = {
1369 .start =c_start,
1370 .next = c_next,
1371 .stop = c_stop,
1372 .show = show_cpuinfo,
1373};
This page took 0.486034 seconds and 5 git commands to generate.