x86: EFI_PAGE_SHIFT fix
[deliverable/linux.git] / arch / x86 / kernel / setup_64.c
CommitLineData
1da177e4 1/*
1da177e4 2 * Copyright (C) 1995 Linus Torvalds
1da177e4
LT
3 */
4
5/*
6 * This file handles the architecture-dependent parts of initialization
7 */
8
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/stddef.h>
14#include <linux/unistd.h>
15#include <linux/ptrace.h>
16#include <linux/slab.h>
17#include <linux/user.h>
894673ee 18#include <linux/screen_info.h>
1da177e4
LT
19#include <linux/ioport.h>
20#include <linux/delay.h>
1da177e4
LT
21#include <linux/init.h>
22#include <linux/initrd.h>
23#include <linux/highmem.h>
24#include <linux/bootmem.h>
25#include <linux/module.h>
26#include <asm/processor.h>
27#include <linux/console.h>
28#include <linux/seq_file.h>
aac04b32 29#include <linux/crash_dump.h>
1da177e4
LT
30#include <linux/root_dev.h>
31#include <linux/pci.h>
5b83683f 32#include <linux/efi.h>
1da177e4
LT
33#include <linux/acpi.h>
34#include <linux/kallsyms.h>
35#include <linux/edd.h>
bbfceef4 36#include <linux/mmzone.h>
5f5609df 37#include <linux/kexec.h>
95235ca2 38#include <linux/cpufreq.h>
e9928674 39#include <linux/dmi.h>
17a941d8 40#include <linux/dma-mapping.h>
681558fd 41#include <linux/ctype.h>
746ef0cd 42#include <linux/uaccess.h>
f212ec4b 43#include <linux/init_ohci1394_dma.h>
bbfceef4 44
1da177e4
LT
45#include <asm/mtrr.h>
46#include <asm/uaccess.h>
47#include <asm/system.h>
e4026440 48#include <asm/vsyscall.h>
1da177e4
LT
49#include <asm/io.h>
50#include <asm/smp.h>
51#include <asm/msr.h>
52#include <asm/desc.h>
53#include <video/edid.h>
54#include <asm/e820.h>
55#include <asm/dma.h>
aaf23042 56#include <asm/gart.h>
1da177e4
LT
57#include <asm/mpspec.h>
58#include <asm/mmu_context.h>
1da177e4
LT
59#include <asm/proto.h>
60#include <asm/setup.h>
1da177e4 61#include <asm/numa.h>
2bc0414e 62#include <asm/sections.h>
f2d3efed 63#include <asm/dmi.h>
00bf4098 64#include <asm/cacheflush.h>
af7a78e9 65#include <asm/mce.h>
eee3af4a 66#include <asm/ds.h>
df3825c5 67#include <asm/topology.h>
e44b7b75 68#include <asm/trampoline.h>
1da177e4 69
dd46e3ca 70#include <mach_apic.h>
746ef0cd
GOC
71#ifdef CONFIG_PARAVIRT
72#include <asm/paravirt.h>
73#else
74#define ARCH_SETUP
75#endif
76
1da177e4
LT
77/*
78 * Machine setup..
79 */
80
6c231b7b 81struct cpuinfo_x86 boot_cpu_data __read_mostly;
2ee60e17 82EXPORT_SYMBOL(boot_cpu_data);
1da177e4 83
7d851c8d
AK
84__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
85
1da177e4
LT
86unsigned long mmu_cr4_features;
87
1da177e4
LT
88/* Boot loader ID as an integer, for the benefit of proc_dointvec */
89int bootloader_type;
90
91unsigned long saved_video_mode;
92
f039b754
AK
93int force_mwait __cpuinitdata;
94
04e1ba85 95/*
f2d3efed
AK
96 * Early DMI memory
97 */
98int dmi_alloc_index;
99char dmi_alloc_data[DMI_MAX_DATA];
100
1da177e4
LT
101/*
102 * Setup options
103 */
1da177e4 104struct screen_info screen_info;
2ee60e17 105EXPORT_SYMBOL(screen_info);
1da177e4
LT
106struct sys_desc_table_struct {
107 unsigned short length;
108 unsigned char table[0];
109};
110
111struct edid_info edid_info;
ba70710e 112EXPORT_SYMBOL_GPL(edid_info);
1da177e4
LT
113
114extern int root_mountflags;
1da177e4 115
adf48856 116char __initdata command_line[COMMAND_LINE_SIZE];
1da177e4
LT
117
118struct resource standard_io_resources[] = {
119 { .name = "dma1", .start = 0x00, .end = 0x1f,
120 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
121 { .name = "pic1", .start = 0x20, .end = 0x21,
122 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
123 { .name = "timer0", .start = 0x40, .end = 0x43,
124 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
125 { .name = "timer1", .start = 0x50, .end = 0x53,
126 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
127 { .name = "keyboard", .start = 0x60, .end = 0x6f,
128 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
129 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
130 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
131 { .name = "pic2", .start = 0xa0, .end = 0xa1,
132 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
133 { .name = "dma2", .start = 0xc0, .end = 0xdf,
134 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
135 { .name = "fpu", .start = 0xf0, .end = 0xff,
136 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
137};
138
1da177e4
LT
139#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
140
c9cce83d 141static struct resource data_resource = {
1da177e4
LT
142 .name = "Kernel data",
143 .start = 0,
144 .end = 0,
145 .flags = IORESOURCE_RAM,
146};
c9cce83d 147static struct resource code_resource = {
1da177e4
LT
148 .name = "Kernel code",
149 .start = 0,
150 .end = 0,
151 .flags = IORESOURCE_RAM,
152};
c9cce83d 153static struct resource bss_resource = {
00bf4098
BW
154 .name = "Kernel bss",
155 .start = 0,
156 .end = 0,
157 .flags = IORESOURCE_RAM,
158};
1da177e4 159
8c61b900
TG
160static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
161
2c8c0e6b
AK
162#ifdef CONFIG_PROC_VMCORE
163/* elfcorehdr= specifies the location of elf core header
164 * stored by the crashed kernel. This option will be passed
165 * by kexec loader to the capture kernel.
166 */
167static int __init setup_elfcorehdr(char *arg)
681558fd 168{
2c8c0e6b
AK
169 char *end;
170 if (!arg)
171 return -EINVAL;
172 elfcorehdr_addr = memparse(arg, &end);
173 return end > arg ? 0 : -EINVAL;
681558fd 174}
2c8c0e6b 175early_param("elfcorehdr", setup_elfcorehdr);
e2c03888
AK
176#endif
177
2b97690f 178#ifndef CONFIG_NUMA
bbfceef4
MT
179static void __init
180contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
1da177e4 181{
bbfceef4
MT
182 unsigned long bootmap_size, bootmap;
183
bbfceef4 184 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
24a5da73
YL
185 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size,
186 PAGE_SIZE);
bbfceef4 187 if (bootmap == -1L)
04e1ba85 188 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
bbfceef4 189 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
5cb248ab
MG
190 e820_register_active_regions(0, start_pfn, end_pfn);
191 free_bootmem_with_active_regions(0, end_pfn);
72a7fe39 192 reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
04e1ba85 193}
1da177e4
LT
194#endif
195
1da177e4
LT
196#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
197struct edd edd;
198#ifdef CONFIG_EDD_MODULE
199EXPORT_SYMBOL(edd);
200#endif
201/**
202 * copy_edd() - Copy the BIOS EDD information
203 * from boot_params into a safe place.
204 *
205 */
206static inline void copy_edd(void)
207{
30c82645
PA
208 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
209 sizeof(edd.mbr_signature));
210 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
211 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
212 edd.edd_info_nr = boot_params.eddbuf_entries;
1da177e4
LT
213}
214#else
215static inline void copy_edd(void)
216{
217}
218#endif
219
5c3391f9
BW
220#ifdef CONFIG_KEXEC
221static void __init reserve_crashkernel(void)
222{
18a01a3b 223 unsigned long long total_mem;
5c3391f9
BW
224 unsigned long long crash_size, crash_base;
225 int ret;
226
18a01a3b 227 total_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
5c3391f9 228
18a01a3b 229 ret = parse_crashkernel(boot_command_line, total_mem,
5c3391f9
BW
230 &crash_size, &crash_base);
231 if (ret == 0 && crash_size) {
18a01a3b 232 if (crash_base <= 0) {
5c3391f9
BW
233 printk(KERN_INFO "crashkernel reservation failed - "
234 "you have to specify a base address\n");
18a01a3b
BW
235 return;
236 }
237
238 if (reserve_bootmem(crash_base, crash_size,
239 BOOTMEM_EXCLUSIVE) < 0) {
240 printk(KERN_INFO "crashkernel reservation failed - "
241 "memory is in use\n");
242 return;
243 }
244
245 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
246 "for crashkernel (System RAM: %ldMB)\n",
247 (unsigned long)(crash_size >> 20),
248 (unsigned long)(crash_base >> 20),
249 (unsigned long)(total_mem >> 20));
250 crashk_res.start = crash_base;
251 crashk_res.end = crash_base + crash_size - 1;
3def3d6d 252 insert_resource(&iomem_resource, &crashk_res);
5c3391f9
BW
253 }
254}
255#else
256static inline void __init reserve_crashkernel(void)
257{}
258#endif
259
746ef0cd 260/* Overridden in paravirt.c if CONFIG_PARAVIRT */
e3cfac84 261void __attribute__((weak)) __init memory_setup(void)
746ef0cd
GOC
262{
263 machine_specific_memory_setup();
264}
265
f212ec4b
BK
266/*
267 * setup_arch - architecture-specific boot-time initializations
268 *
269 * Note: On x86_64, fixmaps are ready for use even before this is called.
270 */
1da177e4
LT
271void __init setup_arch(char **cmdline_p)
272{
04e1ba85
TG
273 unsigned i;
274
adf48856 275 printk(KERN_INFO "Command line: %s\n", boot_command_line);
43c85c9c 276
30c82645
PA
277 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
278 screen_info = boot_params.screen_info;
279 edid_info = boot_params.edid_info;
280 saved_video_mode = boot_params.hdr.vid_mode;
281 bootloader_type = boot_params.hdr.type_of_loader;
1da177e4
LT
282
283#ifdef CONFIG_BLK_DEV_RAM
30c82645
PA
284 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
285 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
286 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
1da177e4 287#endif
5b83683f
HY
288#ifdef CONFIG_EFI
289 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
290 "EL64", 4))
291 efi_enabled = 1;
292#endif
746ef0cd
GOC
293
294 ARCH_SETUP
295
296 memory_setup();
1da177e4
LT
297 copy_edd();
298
30c82645 299 if (!boot_params.hdr.root_flags)
1da177e4
LT
300 root_mountflags &= ~MS_RDONLY;
301 init_mm.start_code = (unsigned long) &_text;
302 init_mm.end_code = (unsigned long) &_etext;
303 init_mm.end_data = (unsigned long) &_edata;
304 init_mm.brk = (unsigned long) &_end;
305
e3ebadd9
LT
306 code_resource.start = virt_to_phys(&_text);
307 code_resource.end = virt_to_phys(&_etext)-1;
308 data_resource.start = virt_to_phys(&_etext);
309 data_resource.end = virt_to_phys(&_edata)-1;
00bf4098
BW
310 bss_resource.start = virt_to_phys(&__bss_start);
311 bss_resource.end = virt_to_phys(&__bss_stop)-1;
1da177e4 312
1da177e4
LT
313 early_identify_cpu(&boot_cpu_data);
314
adf48856 315 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
2c8c0e6b
AK
316 *cmdline_p = command_line;
317
318 parse_early_param();
319
f212ec4b
BK
320#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
321 if (init_ohci1394_dma_early)
322 init_ohci1394_dma_on_all_controllers();
323#endif
324
2c8c0e6b 325 finish_e820_parsing();
9ca33eb6 326
3def3d6d
YL
327 /* after parse_early_param, so could debug it */
328 insert_resource(&iomem_resource, &code_resource);
329 insert_resource(&iomem_resource, &data_resource);
330 insert_resource(&iomem_resource, &bss_resource);
331
aaf23042
YL
332 early_gart_iommu_check();
333
5cb248ab 334 e820_register_active_regions(0, 0, -1UL);
1da177e4
LT
335 /*
336 * partially used pages are not usable - thus
337 * we are rounding upwards:
338 */
339 end_pfn = e820_end_of_ram();
99fc8d42
JB
340 /* update e820 for memory not covered by WB MTRRs */
341 mtrr_bp_init();
342 if (mtrr_trim_uncached_memory(end_pfn)) {
343 e820_register_active_regions(0, 0, -1UL);
344 end_pfn = e820_end_of_ram();
345 }
346
caff0710 347 num_physpages = end_pfn;
1da177e4
LT
348
349 check_efer();
350
cc615032 351 max_pfn_mapped = init_memory_mapping(0, (max_pfn_mapped << PAGE_SHIFT));
5b83683f
HY
352 if (efi_enabled)
353 efi_init();
1da177e4 354
2785c8d0 355 vsmp_init();
2785c8d0 356
f2d3efed
AK
357 dmi_scan_machine();
358
b02aae9c
RH
359 io_delay_init();
360
71fff5e6 361#ifdef CONFIG_SMP
df3825c5 362 /* setup to use the early static init tables during kernel startup */
3effef1f
YL
363 x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init;
364 x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init;
e8c10ef9 365#ifdef CONFIG_NUMA
3effef1f 366 x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init;
71fff5e6 367#endif
e8c10ef9 368#endif
71fff5e6 369
888ba6c6 370#ifdef CONFIG_ACPI
1da177e4
LT
371 /*
372 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
373 * Call this early for SRAT node setup.
374 */
375 acpi_boot_table_init();
376#endif
377
caff0710
JB
378 /* How many end-of-memory variables you have, grandma! */
379 max_low_pfn = end_pfn;
380 max_pfn = end_pfn;
381 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
382
5cb248ab
MG
383 /* Remove active ranges so rediscovery with NUMA-awareness happens */
384 remove_all_active_ranges();
385
1da177e4
LT
386#ifdef CONFIG_ACPI_NUMA
387 /*
388 * Parse SRAT to discover nodes.
389 */
390 acpi_numa_init();
391#endif
392
2b97690f 393#ifdef CONFIG_NUMA
04e1ba85 394 numa_initmem_init(0, end_pfn);
1da177e4 395#else
bbfceef4 396 contig_initmem_init(0, end_pfn);
1da177e4
LT
397#endif
398
75175278 399 early_res_to_bootmem();
1da177e4 400
673d5b43 401#ifdef CONFIG_ACPI_SLEEP
1da177e4 402 /*
04e1ba85 403 * Reserve low memory region for sleep support.
1da177e4 404 */
04e1ba85
TG
405 acpi_reserve_bootmem();
406#endif
5b83683f 407
a3828064 408 if (efi_enabled)
5b83683f 409 efi_reserve_bootmem();
5b83683f 410
04e1ba85
TG
411 /*
412 * Find and reserve possible boot-time SMP configuration:
413 */
1da177e4 414 find_smp_config();
1da177e4 415#ifdef CONFIG_BLK_DEV_INITRD
30c82645
PA
416 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
417 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
418 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
419 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
420 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
421
422 if (ramdisk_end <= end_of_mem) {
423 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
424 initrd_start = ramdisk_image + PAGE_OFFSET;
425 initrd_end = initrd_start+ramdisk_size;
426 } else {
75175278
AK
427 /* Assumes everything on node 0 */
428 free_bootmem(ramdisk_image, ramdisk_size);
1da177e4 429 printk(KERN_ERR "initrd extends beyond end of memory "
30c82645
PA
430 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
431 ramdisk_end, end_of_mem);
1da177e4
LT
432 initrd_start = 0;
433 }
434 }
435#endif
5c3391f9 436 reserve_crashkernel();
1da177e4 437 paging_init();
e4026440 438 map_vsyscall();
1da177e4 439
dfa4698c 440 early_quirks();
1da177e4 441
888ba6c6 442#ifdef CONFIG_ACPI
1da177e4
LT
443 /*
444 * Read APIC and some other early information from ACPI tables.
445 */
446 acpi_boot_init();
447#endif
448
05b3cbd8
RT
449 init_cpu_to_node();
450
1da177e4
LT
451 /*
452 * get boot-time SMP configuration:
453 */
454 if (smp_found_config)
455 get_smp_config();
456 init_apic_mappings();
3e35a0e5 457 ioapic_init_mappings();
1da177e4
LT
458
459 /*
fc986db4 460 * We trust e820 completely. No explicit ROM probing in memory.
04e1ba85 461 */
3def3d6d 462 e820_reserve_resources();
e8eff5ac 463 e820_mark_nosave_regions();
1da177e4 464
1da177e4 465 /* request I/O space for devices used on all i[345]86 PCs */
9d0ef4fd 466 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
1da177e4 467 request_resource(&ioport_resource, &standard_io_resources[i]);
1da177e4 468
a1e97782 469 e820_setup_gap();
1da177e4 470
1da177e4
LT
471#ifdef CONFIG_VT
472#if defined(CONFIG_VGA_CONSOLE)
5b83683f
HY
473 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
474 conswitchp = &vga_con;
1da177e4
LT
475#elif defined(CONFIG_DUMMY_CONSOLE)
476 conswitchp = &dummy_con;
477#endif
478#endif
479}
480
e6982c67 481static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
482{
483 unsigned int *v;
484
ebfcaa96 485 if (c->extended_cpuid_level < 0x80000004)
1da177e4
LT
486 return 0;
487
488 v = (unsigned int *) c->x86_model_id;
489 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
490 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
491 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
492 c->x86_model_id[48] = 0;
493 return 1;
494}
495
496
e6982c67 497static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
498{
499 unsigned int n, dummy, eax, ebx, ecx, edx;
500
ebfcaa96 501 n = c->extended_cpuid_level;
1da177e4
LT
502
503 if (n >= 0x80000005) {
504 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
04e1ba85
TG
505 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
506 "D cache %dK (%d bytes/line)\n",
507 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
508 c->x86_cache_size = (ecx>>24) + (edx>>24);
1da177e4
LT
509 /* On K8 L1 TLB is inclusive, so don't count it */
510 c->x86_tlbsize = 0;
511 }
512
513 if (n >= 0x80000006) {
514 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
515 ecx = cpuid_ecx(0x80000006);
516 c->x86_cache_size = ecx >> 16;
517 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
518
519 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
520 c->x86_cache_size, ecx & 0xFF);
521 }
1da177e4 522 if (n >= 0x80000008) {
04e1ba85 523 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
1da177e4
LT
524 c->x86_virt_bits = (eax >> 8) & 0xff;
525 c->x86_phys_bits = eax & 0xff;
526 }
527}
528
3f098c26 529#ifdef CONFIG_NUMA
08acb672 530static int __cpuinit nearby_node(int apicid)
3f098c26 531{
04e1ba85
TG
532 int i, node;
533
3f098c26 534 for (i = apicid - 1; i >= 0; i--) {
04e1ba85 535 node = apicid_to_node[i];
3f098c26
AK
536 if (node != NUMA_NO_NODE && node_online(node))
537 return node;
538 }
539 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
04e1ba85 540 node = apicid_to_node[i];
3f098c26
AK
541 if (node != NUMA_NO_NODE && node_online(node))
542 return node;
543 }
544 return first_node(node_online_map); /* Shouldn't happen */
545}
546#endif
547
63518644
AK
548/*
549 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
550 * Assumes number of cores is a power of two.
551 */
adb8daed 552static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
63518644
AK
553{
554#ifdef CONFIG_SMP
b41e2939 555 unsigned bits;
3f098c26 556#ifdef CONFIG_NUMA
f3fa8ebc 557 int cpu = smp_processor_id();
3f098c26 558 int node = 0;
60c1bc82 559 unsigned apicid = hard_smp_processor_id();
3f098c26 560#endif
a860b63c 561 bits = c->x86_coreid_bits;
b41e2939
AK
562
563 /* Low order bits define the core id (index of core in socket) */
01aaea1a
YL
564 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
565 /* Convert the initial APIC ID into the socket ID */
566 c->phys_proc_id = c->initial_apicid >> bits;
63518644
AK
567
568#ifdef CONFIG_NUMA
04e1ba85
TG
569 node = c->phys_proc_id;
570 if (apicid_to_node[apicid] != NUMA_NO_NODE)
571 node = apicid_to_node[apicid];
572 if (!node_online(node)) {
573 /* Two possibilities here:
574 - The CPU is missing memory and no node was created.
575 In that case try picking one from a nearby CPU
576 - The APIC IDs differ from the HyperTransport node IDs
577 which the K8 northbridge parsing fills in.
578 Assume they are all increased by a constant offset,
579 but in the same order as the HT nodeids.
580 If that doesn't result in a usable node fall back to the
581 path for the previous case. */
582
01aaea1a 583 int ht_nodeid = c->initial_apicid;
04e1ba85
TG
584
585 if (ht_nodeid >= 0 &&
586 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
587 node = apicid_to_node[ht_nodeid];
588 /* Pick a nearby node */
589 if (!node_online(node))
590 node = nearby_node(apicid);
591 }
69d81fcd 592 numa_set_node(cpu, node);
3f098c26 593
e42f9437 594 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
63518644 595#endif
63518644
AK
596#endif
597}
1da177e4 598
2b16a235 599static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
a860b63c
YL
600{
601#ifdef CONFIG_SMP
602 unsigned bits, ecx;
603
604 /* Multi core CPU? */
605 if (c->extended_cpuid_level < 0x80000008)
606 return;
607
608 ecx = cpuid_ecx(0x80000008);
609
610 c->x86_max_cores = (ecx & 0xff) + 1;
611
612 /* CPU telling us the core id bits shift? */
613 bits = (ecx >> 12) & 0xF;
614
615 /* Otherwise recompute */
616 if (bits == 0) {
617 while ((1 << bits) < c->x86_max_cores)
618 bits++;
619 }
620
621 c->x86_coreid_bits = bits;
622
623#endif
624}
625
fb79d22e
TG
626#define ENABLE_C1E_MASK 0x18000000
627#define CPUID_PROCESSOR_SIGNATURE 1
628#define CPUID_XFAM 0x0ff00000
629#define CPUID_XFAM_K8 0x00000000
630#define CPUID_XFAM_10H 0x00100000
631#define CPUID_XFAM_11H 0x00200000
632#define CPUID_XMOD 0x000f0000
633#define CPUID_XMOD_REV_F 0x00040000
634
635/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
636static __cpuinit int amd_apic_timer_broken(void)
637{
04e1ba85
TG
638 u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
639
fb79d22e
TG
640 switch (eax & CPUID_XFAM) {
641 case CPUID_XFAM_K8:
642 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
643 break;
644 case CPUID_XFAM_10H:
645 case CPUID_XFAM_11H:
646 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
647 if (lo & ENABLE_C1E_MASK)
648 return 1;
649 break;
650 default:
651 /* err on the side of caution */
652 return 1;
653 }
654 return 0;
655}
656
2b16a235
AK
657static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
658{
659 early_init_amd_mc(c);
660
661 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
662 if (c->x86_power & (1<<8))
663 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
664}
665
ed77504b 666static void __cpuinit init_amd(struct cpuinfo_x86 *c)
1da177e4 667{
7bcd3f34 668 unsigned level;
1da177e4 669
bc5e8fdf
LT
670#ifdef CONFIG_SMP
671 unsigned long value;
672
7d318d77
AK
673 /*
674 * Disable TLB flush filter by setting HWCR.FFDIS on K8
675 * bit 6 of msr C001_0015
04e1ba85 676 *
7d318d77
AK
677 * Errata 63 for SH-B3 steppings
678 * Errata 122 for all steppings (F+ have it disabled by default)
679 */
680 if (c->x86 == 15) {
681 rdmsrl(MSR_K8_HWCR, value);
682 value |= 1 << 6;
683 wrmsrl(MSR_K8_HWCR, value);
684 }
bc5e8fdf
LT
685#endif
686
1da177e4
LT
687 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
688 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
9716951e 689 clear_cpu_cap(c, 0*32+31);
04e1ba85 690
7bcd3f34
AK
691 /* On C+ stepping K8 rep microcode works well for copy/memset */
692 level = cpuid_eax(1);
04e1ba85
TG
693 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
694 level >= 0x0f58))
53756d37 695 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
99741faa 696 if (c->x86 == 0x10 || c->x86 == 0x11)
53756d37 697 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
7bcd3f34 698
18bd057b
AK
699 /* Enable workaround for FXSAVE leak */
700 if (c->x86 >= 6)
53756d37 701 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
18bd057b 702
e42f9437
RS
703 level = get_model_name(c);
704 if (!level) {
04e1ba85 705 switch (c->x86) {
1da177e4
LT
706 case 15:
707 /* Should distinguish Models here, but this is only
708 a fallback anyways. */
709 strcpy(c->x86_model_id, "Hammer");
04e1ba85
TG
710 break;
711 }
712 }
1da177e4
LT
713 display_cacheinfo(c);
714
faee9a5d
AK
715 /* Multi core CPU? */
716 if (c->extended_cpuid_level >= 0x80000008)
63518644 717 amd_detect_cmp(c);
1da177e4 718
67cddd94
AK
719 if (c->extended_cpuid_level >= 0x80000006 &&
720 (cpuid_edx(0x80000006) & 0xf000))
721 num_cache_leaves = 4;
722 else
723 num_cache_leaves = 3;
2049336f 724
0bd8acd1 725 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
53756d37 726 set_cpu_cap(c, X86_FEATURE_K8);
0bd8acd1 727
de421863
AK
728 /* MFENCE stops RDTSC speculation */
729 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
f039b754 730
fb79d22e
TG
731 if (amd_apic_timer_broken())
732 disable_apic_timer = 1;
8346ea17
AK
733
734 if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) {
735 unsigned long long tseg;
736
737 /*
738 * Split up direct mapping around the TSEG SMM area.
739 * Don't do it for gbpages because there seems very little
740 * benefit in doing so.
741 */
742 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg) &&
743 (tseg >> PMD_SHIFT) < (max_pfn_mapped >> (PMD_SHIFT-PAGE_SHIFT)))
744 set_memory_4k((unsigned long)__va(tseg), 1);
745 }
1da177e4
LT
746}
747
1a53905a 748void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4
LT
749{
750#ifdef CONFIG_SMP
04e1ba85
TG
751 u32 eax, ebx, ecx, edx;
752 int index_msb, core_bits;
94605eff
SS
753
754 cpuid(1, &eax, &ebx, &ecx, &edx);
755
94605eff 756
e42f9437 757 if (!cpu_has(c, X86_FEATURE_HT))
1da177e4 758 return;
04e1ba85 759 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
e42f9437 760 goto out;
1da177e4 761
1da177e4 762 smp_num_siblings = (ebx & 0xff0000) >> 16;
94605eff 763
1da177e4
LT
764 if (smp_num_siblings == 1) {
765 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
04e1ba85 766 } else if (smp_num_siblings > 1) {
94605eff 767
1da177e4 768 if (smp_num_siblings > NR_CPUS) {
04e1ba85
TG
769 printk(KERN_WARNING "CPU: Unsupported number of "
770 "siblings %d", smp_num_siblings);
1da177e4
LT
771 smp_num_siblings = 1;
772 return;
773 }
94605eff
SS
774
775 index_msb = get_count_order(smp_num_siblings);
f3fa8ebc 776 c->phys_proc_id = phys_pkg_id(index_msb);
3dd9d514 777
94605eff 778 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
3dd9d514 779
04e1ba85 780 index_msb = get_count_order(smp_num_siblings);
94605eff
SS
781
782 core_bits = get_count_order(c->x86_max_cores);
3dd9d514 783
f3fa8ebc 784 c->cpu_core_id = phys_pkg_id(index_msb) &
94605eff 785 ((1 << core_bits) - 1);
1da177e4 786 }
e42f9437
RS
787out:
788 if ((c->x86_max_cores * smp_num_siblings) > 1) {
04e1ba85
TG
789 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
790 c->phys_proc_id);
791 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
792 c->cpu_core_id);
e42f9437
RS
793 }
794
1da177e4
LT
795#endif
796}
797
3dd9d514
AK
798/*
799 * find out the number of processor cores on the die
800 */
e6982c67 801static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514 802{
2bbc419f 803 unsigned int eax, t;
3dd9d514
AK
804
805 if (c->cpuid_level < 4)
806 return 1;
807
2bbc419f 808 cpuid_count(4, 0, &eax, &t, &t, &t);
3dd9d514
AK
809
810 if (eax & 0x1f)
811 return ((eax >> 26) + 1);
812 else
813 return 1;
814}
815
04d733bd 816static void __cpuinit srat_detect_node(void)
df0cc26b
AK
817{
818#ifdef CONFIG_NUMA
ddea7be0 819 unsigned node;
df0cc26b 820 int cpu = smp_processor_id();
e42f9437 821 int apicid = hard_smp_processor_id();
df0cc26b
AK
822
823 /* Don't do the funky fallback heuristics the AMD version employs
824 for now. */
e42f9437 825 node = apicid_to_node[apicid];
475613b9 826 if (node == NUMA_NO_NODE || !node_online(node))
0d015324 827 node = first_node(node_online_map);
69d81fcd 828 numa_set_node(cpu, node);
df0cc26b 829
c31fbb1a 830 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
df0cc26b
AK
831#endif
832}
833
2b16a235
AK
834static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
835{
836 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
837 (c->x86 == 0x6 && c->x86_model >= 0x0e))
9716951e 838 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
2b16a235
AK
839}
840
e6982c67 841static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
842{
843 /* Cache sizes */
844 unsigned n;
845
846 init_intel_cacheinfo(c);
04e1ba85 847 if (c->cpuid_level > 9) {
0080e667
VP
848 unsigned eax = cpuid_eax(10);
849 /* Check for version and the number of counters */
850 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
53756d37 851 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
0080e667
VP
852 }
853
36b2a8d5
SE
854 if (cpu_has_ds) {
855 unsigned int l1, l2;
856 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
ee58fad5 857 if (!(l1 & (1<<11)))
53756d37 858 set_cpu_cap(c, X86_FEATURE_BTS);
36b2a8d5 859 if (!(l1 & (1<<12)))
53756d37 860 set_cpu_cap(c, X86_FEATURE_PEBS);
36b2a8d5
SE
861 }
862
eee3af4a
MM
863
864 if (cpu_has_bts)
865 ds_init_intel(c);
866
ebfcaa96 867 n = c->extended_cpuid_level;
1da177e4
LT
868 if (n >= 0x80000008) {
869 unsigned eax = cpuid_eax(0x80000008);
870 c->x86_virt_bits = (eax >> 8) & 0xff;
871 c->x86_phys_bits = eax & 0xff;
af9c142d
SL
872 /* CPUID workaround for Intel 0F34 CPU */
873 if (c->x86_vendor == X86_VENDOR_INTEL &&
874 c->x86 == 0xF && c->x86_model == 0x3 &&
875 c->x86_mask == 0x4)
876 c->x86_phys_bits = 36;
1da177e4
LT
877 }
878
879 if (c->x86 == 15)
880 c->x86_cache_alignment = c->x86_clflush_size * 2;
27fbe5b2 881 if (c->x86 == 6)
53756d37 882 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
707fa8ed 883 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
04e1ba85 884 c->x86_max_cores = intel_num_cpu_cores(c);
df0cc26b
AK
885
886 srat_detect_node();
1da177e4
LT
887}
888
0e03eb86
DJ
889static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
890{
891 if (c->x86 == 0x6 && c->x86_model >= 0xf)
892 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
893}
894
895static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
896{
897 /* Cache sizes */
898 unsigned n;
899
900 n = c->extended_cpuid_level;
901 if (n >= 0x80000008) {
902 unsigned eax = cpuid_eax(0x80000008);
903 c->x86_virt_bits = (eax >> 8) & 0xff;
904 c->x86_phys_bits = eax & 0xff;
905 }
906
907 if (c->x86 == 0x6 && c->x86_model >= 0xf) {
908 c->x86_cache_alignment = c->x86_clflush_size * 2;
909 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
910 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
911 }
912 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
913}
914
672289e9 915static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
916{
917 char *v = c->x86_vendor_id;
918
919 if (!strcmp(v, "AuthenticAMD"))
920 c->x86_vendor = X86_VENDOR_AMD;
921 else if (!strcmp(v, "GenuineIntel"))
922 c->x86_vendor = X86_VENDOR_INTEL;
0e03eb86
DJ
923 else if (!strcmp(v, "CentaurHauls"))
924 c->x86_vendor = X86_VENDOR_CENTAUR;
1da177e4
LT
925 else
926 c->x86_vendor = X86_VENDOR_UNKNOWN;
927}
928
1da177e4
LT
929/* Do some early cpuid on the boot CPU to get some parameter that are
930 needed before check_bugs. Everything advanced is in identify_cpu
931 below. */
8c61b900 932static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1da177e4 933{
a860b63c 934 u32 tfms, xlvl;
1da177e4
LT
935
936 c->loops_per_jiffy = loops_per_jiffy;
937 c->x86_cache_size = -1;
938 c->x86_vendor = X86_VENDOR_UNKNOWN;
939 c->x86_model = c->x86_mask = 0; /* So far unknown... */
940 c->x86_vendor_id[0] = '\0'; /* Unset */
941 c->x86_model_id[0] = '\0'; /* Unset */
942 c->x86_clflush_size = 64;
943 c->x86_cache_alignment = c->x86_clflush_size;
94605eff 944 c->x86_max_cores = 1;
a860b63c 945 c->x86_coreid_bits = 0;
ebfcaa96 946 c->extended_cpuid_level = 0;
1da177e4
LT
947 memset(&c->x86_capability, 0, sizeof c->x86_capability);
948
949 /* Get vendor name */
950 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
951 (unsigned int *)&c->x86_vendor_id[0],
952 (unsigned int *)&c->x86_vendor_id[8],
953 (unsigned int *)&c->x86_vendor_id[4]);
04e1ba85 954
1da177e4
LT
955 get_cpu_vendor(c);
956
957 /* Initialize the standard set of capabilities */
958 /* Note that the vendor-specific code below might override */
959
960 /* Intel-defined flags: level 0x00000001 */
961 if (c->cpuid_level >= 0x00000001) {
962 __u32 misc;
963 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
964 &c->x86_capability[0]);
965 c->x86 = (tfms >> 8) & 0xf;
966 c->x86_model = (tfms >> 4) & 0xf;
967 c->x86_mask = tfms & 0xf;
f5f786d0 968 if (c->x86 == 0xf)
1da177e4 969 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 970 if (c->x86 >= 0x6)
1da177e4 971 c->x86_model += ((tfms >> 16) & 0xF) << 4;
9716951e 972 if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
1da177e4 973 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1da177e4
LT
974 } else {
975 /* Have CPUID level 0 only - unheard of */
976 c->x86 = 4;
977 }
a158608b 978
01aaea1a 979 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
a158608b 980#ifdef CONFIG_SMP
01aaea1a 981 c->phys_proc_id = c->initial_apicid;
a158608b 982#endif
1da177e4
LT
983 /* AMD-defined flags: level 0x80000001 */
984 xlvl = cpuid_eax(0x80000000);
ebfcaa96 985 c->extended_cpuid_level = xlvl;
1da177e4
LT
986 if ((xlvl & 0xffff0000) == 0x80000000) {
987 if (xlvl >= 0x80000001) {
988 c->x86_capability[1] = cpuid_edx(0x80000001);
5b7abc6f 989 c->x86_capability[6] = cpuid_ecx(0x80000001);
1da177e4
LT
990 }
991 if (xlvl >= 0x80000004)
992 get_model_name(c); /* Default name */
993 }
994
995 /* Transmeta-defined flags: level 0x80860001 */
996 xlvl = cpuid_eax(0x80860000);
997 if ((xlvl & 0xffff0000) == 0x80860000) {
998 /* Don't set x86_cpuid_level here for now to not confuse. */
999 if (xlvl >= 0x80860001)
1000 c->x86_capability[2] = cpuid_edx(0x80860001);
1001 }
1002
9566e91d
AH
1003 c->extended_cpuid_level = cpuid_eax(0x80000000);
1004 if (c->extended_cpuid_level >= 0x80000007)
1005 c->x86_power = cpuid_edx(0x80000007);
1006
9307caca
YL
1007
1008 clear_cpu_cap(c, X86_FEATURE_PAT);
1009
a860b63c
YL
1010 switch (c->x86_vendor) {
1011 case X86_VENDOR_AMD:
1012 early_init_amd(c);
9307caca
YL
1013 if (c->x86 >= 0xf && c->x86 <= 0x11)
1014 set_cpu_cap(c, X86_FEATURE_PAT);
a860b63c 1015 break;
71617bf1
YL
1016 case X86_VENDOR_INTEL:
1017 early_init_intel(c);
9307caca
YL
1018 if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
1019 set_cpu_cap(c, X86_FEATURE_PAT);
71617bf1 1020 break;
0e03eb86
DJ
1021 case X86_VENDOR_CENTAUR:
1022 early_init_centaur(c);
1023 break;
a860b63c
YL
1024 }
1025
1026}
1027
1028/*
1029 * This does the hard work of actually picking apart the CPU stuff...
1030 */
1031void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1032{
1033 int i;
1034
1035 early_identify_cpu(c);
1036
1d67953f
VP
1037 init_scattered_cpuid_features(c);
1038
1e9f28fa
SS
1039 c->apicid = phys_pkg_id(0);
1040
1da177e4
LT
1041 /*
1042 * Vendor-specific initialization. In this section we
1043 * canonicalize the feature flags, meaning if there are
1044 * features a certain CPU supports which CPUID doesn't
1045 * tell us, CPUID claiming incorrect flags, or other bugs,
1046 * we handle them here.
1047 *
1048 * At the end of this section, c->x86_capability better
1049 * indicate the features this CPU genuinely supports!
1050 */
1051 switch (c->x86_vendor) {
1052 case X86_VENDOR_AMD:
1053 init_amd(c);
1054 break;
1055
1056 case X86_VENDOR_INTEL:
1057 init_intel(c);
1058 break;
1059
0e03eb86
DJ
1060 case X86_VENDOR_CENTAUR:
1061 init_centaur(c);
1062 break;
1063
1da177e4
LT
1064 case X86_VENDOR_UNKNOWN:
1065 default:
1066 display_cacheinfo(c);
1067 break;
1068 }
1069
04e1ba85 1070 detect_ht(c);
1da177e4
LT
1071
1072 /*
1073 * On SMP, boot_cpu_data holds the common feature set between
1074 * all CPUs; so make sure that we indicate which features are
1075 * common between the CPUs. The first time this routine gets
1076 * executed, c == &boot_cpu_data.
1077 */
1078 if (c != &boot_cpu_data) {
1079 /* AND the already accumulated flags with these */
04e1ba85 1080 for (i = 0; i < NCAPINTS; i++)
1da177e4
LT
1081 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1082 }
1083
7d851c8d
AK
1084 /* Clear all flags overriden by options */
1085 for (i = 0; i < NCAPINTS; i++)
12c247a6 1086 c->x86_capability[i] &= ~cleared_cpu_caps[i];
7d851c8d 1087
1da177e4
LT
1088#ifdef CONFIG_X86_MCE
1089 mcheck_init(c);
1090#endif
74ff305b
HS
1091 select_idle_routine(c);
1092
1da177e4 1093#ifdef CONFIG_NUMA
3019e8eb 1094 numa_add_cpu(smp_processor_id());
1da177e4 1095#endif
2b16a235 1096
1da177e4 1097}
1da177e4 1098
7a636af6
GOC
1099void __cpuinit identify_boot_cpu(void)
1100{
1101 identify_cpu(&boot_cpu_data);
1102}
1103
1104void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
1105{
1106 BUG_ON(c == &boot_cpu_data);
1107 identify_cpu(c);
1108 mtrr_ap_init();
1109}
1110
191679fd
AK
1111static __init int setup_noclflush(char *arg)
1112{
1113 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
1114 return 1;
1115}
1116__setup("noclflush", setup_noclflush);
1117
e6982c67 1118void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
1119{
1120 if (c->x86_model_id[0])
d8ff0bbf 1121 printk(KERN_CONT "%s", c->x86_model_id);
1da177e4 1122
04e1ba85
TG
1123 if (c->x86_mask || c->cpuid_level >= 0)
1124 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1da177e4 1125 else
04e1ba85 1126 printk(KERN_CONT "\n");
1da177e4
LT
1127}
1128
ac72e788
AK
1129static __init int setup_disablecpuid(char *arg)
1130{
1131 int bit;
1132 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
1133 setup_clear_cpu_cap(bit);
1134 else
1135 return 0;
1136 return 1;
1137}
1138__setup("clearcpuid=", setup_disablecpuid);
This page took 0.444813 seconds and 5 git commands to generate.