x86: support always running TSC on Intel CPUs
[deliverable/linux.git] / arch / x86 / kernel / cpu / addon_cpuid_features.c
CommitLineData
1d67953f
VP
1/*
2 * Routines to indentify additional cpu features that are scattered in
3 * cpuid space.
4 */
1d67953f
VP
5#include <linux/cpu.h>
6
8d4a4300 7#include <asm/pat.h>
1d67953f
VP
8#include <asm/processor.h>
9
bbb65d2d
SS
10#include <mach_apic.h>
11
1d67953f
VP
12struct cpuid_bit {
13 u16 feature;
14 u8 reg;
15 u8 bit;
16 u32 level;
17};
18
19enum cpuid_regs {
20 CR_EAX = 0,
21 CR_ECX,
22 CR_EDX,
23 CR_EBX
24};
25
26void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
27{
28 u32 max_level;
29 u32 regs[4];
30 const struct cpuid_bit *cb;
31
32 static const struct cpuid_bit cpuid_bits[] = {
33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
34 { 0, 0, 0, 0 }
35 };
36
37 for (cb = cpuid_bits; cb->feature; cb++) {
38
39 /* Verify that the level is valid */
40 max_level = cpuid_eax(cb->level & 0xffff0000);
41 if (max_level < cb->level ||
42 max_level > (cb->level | 0xffff))
43 continue;
44
45 cpuid(cb->level, &regs[CR_EAX], &regs[CR_EBX],
46 &regs[CR_ECX], &regs[CR_EDX]);
47
48 if (regs[cb->reg] & (1 << cb->bit))
53756d37 49 set_cpu_cap(c, cb->feature);
1d67953f
VP
50 }
51}
8d4a4300 52
bbb65d2d
SS
53/* leaf 0xb SMT level */
54#define SMT_LEVEL 0
55
56/* leaf 0xb sub-leaf types */
57#define INVALID_TYPE 0
58#define SMT_TYPE 1
59#define CORE_TYPE 2
60
61#define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
62#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
63#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
64
65/*
66 * Check for extended topology enumeration cpuid leaf 0xb and if it
67 * exists, use it for populating initial_apicid and cpu topology
68 * detection.
69 */
70void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
71{
017d9d20 72#ifdef CONFIG_X86_SMP
bbb65d2d
SS
73 unsigned int eax, ebx, ecx, edx, sub_index;
74 unsigned int ht_mask_width, core_plus_mask_width;
75 unsigned int core_select_mask, core_level_siblings;
76
77 if (c->cpuid_level < 0xb)
78 return;
79
80 cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
81
82 /*
83 * check if the cpuid leaf 0xb is actually implemented.
84 */
85 if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
86 return;
87
88 set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
89
90 /*
91 * initial apic id, which also represents 32-bit extended x2apic id.
92 */
93 c->initial_apicid = edx;
94
95 /*
96 * Populate HT related information from sub-leaf level 0.
97 */
98 core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
99 core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
100
101 sub_index = 1;
102 do {
103 cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
104
105 /*
106 * Check for the Core type in the implemented sub leaves.
107 */
108 if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
109 core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
110 core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
111 break;
112 }
113
114 sub_index++;
115 } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
116
117 core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
118
119#ifdef CONFIG_X86_32
120 c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
121 & core_select_mask;
122 c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width);
123#else
124 c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
125 c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
126#endif
127 c->x86_max_cores = (core_level_siblings / smp_num_siblings);
128
129
130 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
131 c->phys_proc_id);
132 if (c->x86_max_cores > 1)
133 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
134 c->cpu_core_id);
135 return;
11c231a9 136#endif
bbb65d2d
SS
137}
138
8d4a4300
TG
139#ifdef CONFIG_X86_PAT
140void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
141{
97cfab6a
AH
142 if (!cpu_has_pat)
143 pat_disable("PAT not supported by CPU.");
144
8d4a4300 145 switch (c->x86_vendor) {
8d4a4300 146 case X86_VENDOR_INTEL:
8323444b 147 /*
148 * There is a known erratum on Pentium III and Core Solo
149 * and Core Duo CPUs.
150 * " Page with PAT set to WC while associated MTRR is UC
151 * may consolidate to UC "
152 * Because of this erratum, it is better to stick with
153 * setting WC in MTRR rather than using PAT on these CPUs.
154 *
155 * Enable PAT WC only on P4, Core 2 or later CPUs.
156 */
157 if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
8d4a4300 158 return;
8323444b 159
160 pat_disable("PAT WC disabled due to known CPU erratum.");
161 return;
162
ee863ba7 163 case X86_VENDOR_AMD:
873b274a
DJ
164 case X86_VENDOR_CENTAUR:
165 case X86_VENDOR_TRANSMETA:
166 return;
8d4a4300
TG
167 }
168
97cfab6a 169 pat_disable("PAT disabled. Not yet verified on this CPU type.");
8d4a4300
TG
170}
171#endif
This page took 0.175571 seconds and 5 git commands to generate.