Commit | Line | Data |
---|---|---|
69c60c88 | 1 | #include <linux/export.h> |
1da177e4 LT |
2 | #include <linux/init.h> |
3 | #include <linux/bitops.h> | |
5cdd174f | 4 | #include <linux/elf.h> |
1da177e4 | 5 | #include <linux/mm.h> |
8d71a2ea | 6 | |
8bdbd962 | 7 | #include <linux/io.h> |
c98fdeaa | 8 | #include <linux/sched.h> |
1da177e4 | 9 | #include <asm/processor.h> |
d3f7eae1 | 10 | #include <asm/apic.h> |
1f442d70 | 11 | #include <asm/cpu.h> |
42937e81 | 12 | #include <asm/pci-direct.h> |
1da177e4 | 13 | |
8d71a2ea YL |
14 | #ifdef CONFIG_X86_64 |
15 | # include <asm/numa_64.h> | |
16 | # include <asm/mmconfig.h> | |
17 | # include <asm/cacheflush.h> | |
18 | #endif | |
19 | ||
1da177e4 LT |
20 | #include "cpu.h" |
21 | ||
6c62aa4a | 22 | #ifdef CONFIG_X86_32 |
1da177e4 LT |
23 | /* |
24 | * B step AMD K6 before B 9730xxxx have hardware bugs that can cause | |
25 | * misexecution of code under Linux. Owners of such processors should | |
26 | * contact AMD for precise details and a CPU swap. | |
27 | * | |
28 | * See http://www.multimania.com/poulot/k6bug.html | |
29 | * http://www.amd.com/K6/k6docs/revgd.html | |
30 | * | |
31 | * The following test is erm.. interesting. AMD neglected to up | |
32 | * the chip setting when fixing the bug but they also tweaked some | |
33 | * performance at the same time.. | |
34 | */ | |
fb87a298 | 35 | |
1da177e4 LT |
36 | extern void vide(void); |
37 | __asm__(".align 4\nvide: ret"); | |
38 | ||
11fdd252 YL |
39 | static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) |
40 | { | |
41 | /* | |
42 | * General Systems BIOSen alias the cpu frequency registers | |
43 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux | |
44 | * drivers subsequently pokes it, and changes the CPU speed. | |
45 | * Workaround : Remove the unneeded alias. | |
46 | */ | |
47 | #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ | |
48 | #define CBAR_ENB (0x80000000) | |
49 | #define CBAR_KEY (0X000000CB) | |
50 | if (c->x86_model == 9 || c->x86_model == 10) { | |
8bdbd962 AC |
51 | if (inl(CBAR) & CBAR_ENB) |
52 | outl(0 | CBAR_KEY, CBAR); | |
11fdd252 YL |
53 | } |
54 | } | |
55 | ||
56 | ||
57 | static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | |
58 | { | |
59 | u32 l, h; | |
60 | int mbytes = num_physpages >> (20-PAGE_SHIFT); | |
61 | ||
62 | if (c->x86_model < 6) { | |
63 | /* Based on AMD doc 20734R - June 2000 */ | |
64 | if (c->x86_model == 0) { | |
65 | clear_cpu_cap(c, X86_FEATURE_APIC); | |
66 | set_cpu_cap(c, X86_FEATURE_PGE); | |
67 | } | |
68 | return; | |
69 | } | |
70 | ||
71 | if (c->x86_model == 6 && c->x86_mask == 1) { | |
72 | const int K6_BUG_LOOP = 1000000; | |
73 | int n; | |
74 | void (*f_vide)(void); | |
75 | unsigned long d, d2; | |
76 | ||
77 | printk(KERN_INFO "AMD K6 stepping B detected - "); | |
78 | ||
79 | /* | |
80 | * It looks like AMD fixed the 2.6.2 bug and improved indirect | |
81 | * calls at the same time. | |
82 | */ | |
83 | ||
84 | n = K6_BUG_LOOP; | |
85 | f_vide = vide; | |
86 | rdtscl(d); | |
87 | while (n--) | |
88 | f_vide(); | |
89 | rdtscl(d2); | |
90 | d = d2-d; | |
91 | ||
92 | if (d > 20*K6_BUG_LOOP) | |
8bdbd962 AC |
93 | printk(KERN_CONT |
94 | "system stability may be impaired when more than 32 MB are used.\n"); | |
11fdd252 | 95 | else |
8bdbd962 | 96 | printk(KERN_CONT "probably OK (after B9730xxxx).\n"); |
11fdd252 YL |
97 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); |
98 | } | |
99 | ||
100 | /* K6 with old style WHCR */ | |
101 | if (c->x86_model < 8 || | |
102 | (c->x86_model == 8 && c->x86_mask < 8)) { | |
103 | /* We can only write allocate on the low 508Mb */ | |
104 | if (mbytes > 508) | |
105 | mbytes = 508; | |
106 | ||
107 | rdmsr(MSR_K6_WHCR, l, h); | |
108 | if ((l&0x0000FFFF) == 0) { | |
109 | unsigned long flags; | |
110 | l = (1<<0)|((mbytes/4)<<1); | |
111 | local_irq_save(flags); | |
112 | wbinvd(); | |
113 | wrmsr(MSR_K6_WHCR, l, h); | |
114 | local_irq_restore(flags); | |
115 | printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", | |
116 | mbytes); | |
117 | } | |
118 | return; | |
119 | } | |
120 | ||
121 | if ((c->x86_model == 8 && c->x86_mask > 7) || | |
122 | c->x86_model == 9 || c->x86_model == 13) { | |
123 | /* The more serious chips .. */ | |
124 | ||
125 | if (mbytes > 4092) | |
126 | mbytes = 4092; | |
127 | ||
128 | rdmsr(MSR_K6_WHCR, l, h); | |
129 | if ((l&0xFFFF0000) == 0) { | |
130 | unsigned long flags; | |
131 | l = ((mbytes>>2)<<22)|(1<<16); | |
132 | local_irq_save(flags); | |
133 | wbinvd(); | |
134 | wrmsr(MSR_K6_WHCR, l, h); | |
135 | local_irq_restore(flags); | |
136 | printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", | |
137 | mbytes); | |
138 | } | |
139 | ||
140 | return; | |
141 | } | |
142 | ||
143 | if (c->x86_model == 10) { | |
144 | /* AMD Geode LX is model 10 */ | |
145 | /* placeholder for any needed mods */ | |
146 | return; | |
147 | } | |
148 | } | |
149 | ||
1f442d70 YL |
150 | static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) |
151 | { | |
1f442d70 | 152 | /* calling is from identify_secondary_cpu() ? */ |
f6e9456c | 153 | if (!c->cpu_index) |
1f442d70 YL |
154 | return; |
155 | ||
156 | /* | |
157 | * Certain Athlons might work (for various values of 'work') in SMP | |
158 | * but they are not certified as MP capable. | |
159 | */ | |
160 | /* Athlon 660/661 is valid. */ | |
161 | if ((c->x86_model == 6) && ((c->x86_mask == 0) || | |
162 | (c->x86_mask == 1))) | |
163 | goto valid_k7; | |
164 | ||
165 | /* Duron 670 is valid */ | |
166 | if ((c->x86_model == 7) && (c->x86_mask == 0)) | |
167 | goto valid_k7; | |
168 | ||
169 | /* | |
170 | * Athlon 662, Duron 671, and Athlon >model 7 have capability | |
171 | * bit. It's worth noting that the A5 stepping (662) of some | |
172 | * Athlon XP's have the MP bit set. | |
173 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for | |
174 | * more. | |
175 | */ | |
176 | if (((c->x86_model == 6) && (c->x86_mask >= 2)) || | |
177 | ((c->x86_model == 7) && (c->x86_mask >= 1)) || | |
178 | (c->x86_model > 7)) | |
179 | if (cpu_has_mp) | |
180 | goto valid_k7; | |
181 | ||
182 | /* If we get here, not a certified SMP capable AMD system. */ | |
183 | ||
184 | /* | |
185 | * Don't taint if we are running SMP kernel on a single non-MP | |
186 | * approved Athlon | |
187 | */ | |
188 | WARN_ONCE(1, "WARNING: This combination of AMD" | |
7da8b6dd | 189 | " processors is not suitable for SMP.\n"); |
1f442d70 YL |
190 | if (!test_taint(TAINT_UNSAFE_SMP)) |
191 | add_taint(TAINT_UNSAFE_SMP); | |
192 | ||
193 | valid_k7: | |
194 | ; | |
1f442d70 YL |
195 | } |
196 | ||
11fdd252 YL |
197 | static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) |
198 | { | |
199 | u32 l, h; | |
200 | ||
201 | /* | |
202 | * Bit 15 of Athlon specific MSR 15, needs to be 0 | |
203 | * to enable SSE on Palomino/Morgan/Barton CPU's. | |
204 | * If the BIOS didn't enable it already, enable it here. | |
205 | */ | |
206 | if (c->x86_model >= 6 && c->x86_model <= 10) { | |
207 | if (!cpu_has(c, X86_FEATURE_XMM)) { | |
208 | printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); | |
209 | rdmsr(MSR_K7_HWCR, l, h); | |
210 | l &= ~0x00008000; | |
211 | wrmsr(MSR_K7_HWCR, l, h); | |
212 | set_cpu_cap(c, X86_FEATURE_XMM); | |
213 | } | |
214 | } | |
215 | ||
216 | /* | |
217 | * It's been determined by AMD that Athlons since model 8 stepping 1 | |
218 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx | |
219 | * As per AMD technical note 27212 0.2 | |
220 | */ | |
221 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | |
222 | rdmsr(MSR_K7_CLK_CTL, l, h); | |
223 | if ((l & 0xfff00000) != 0x20000000) { | |
8bdbd962 AC |
224 | printk(KERN_INFO |
225 | "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", | |
226 | l, ((l & 0x000fffff)|0x20000000)); | |
11fdd252 YL |
227 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); |
228 | } | |
229 | } | |
230 | ||
231 | set_cpu_cap(c, X86_FEATURE_K7); | |
1f442d70 YL |
232 | |
233 | amd_k7_smp_check(c); | |
11fdd252 | 234 | } |
6c62aa4a YL |
235 | #endif |
236 | ||
645a7919 | 237 | #ifdef CONFIG_NUMA |
bbc9e2f4 TH |
238 | /* |
239 | * To workaround broken NUMA config. Read the comment in | |
240 | * srat_detect_node(). | |
241 | */ | |
6c62aa4a YL |
242 | static int __cpuinit nearby_node(int apicid) |
243 | { | |
244 | int i, node; | |
245 | ||
246 | for (i = apicid - 1; i >= 0; i--) { | |
bbc9e2f4 | 247 | node = __apicid_to_node[i]; |
6c62aa4a YL |
248 | if (node != NUMA_NO_NODE && node_online(node)) |
249 | return node; | |
250 | } | |
251 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | |
bbc9e2f4 | 252 | node = __apicid_to_node[i]; |
6c62aa4a YL |
253 | if (node != NUMA_NO_NODE && node_online(node)) |
254 | return node; | |
255 | } | |
256 | return first_node(node_online_map); /* Shouldn't happen */ | |
257 | } | |
258 | #endif | |
11fdd252 | 259 | |
4a376ec3 | 260 | /* |
23588c38 AH |
261 | * Fixup core topology information for |
262 | * (1) AMD multi-node processors | |
263 | * Assumption: Number of cores in each internal node is the same. | |
6057b4d3 | 264 | * (2) AMD processors supporting compute units |
4a376ec3 AH |
265 | */ |
266 | #ifdef CONFIG_X86_HT | |
23588c38 | 267 | static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) |
4a376ec3 | 268 | { |
9e81509e | 269 | u32 nodes, cores_per_cu = 1; |
23588c38 | 270 | u8 node_id; |
4a376ec3 AH |
271 | int cpu = smp_processor_id(); |
272 | ||
23588c38 AH |
273 | /* get information required for multi-node processors */ |
274 | if (cpu_has(c, X86_FEATURE_TOPOEXT)) { | |
6057b4d3 AH |
275 | u32 eax, ebx, ecx, edx; |
276 | ||
277 | cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); | |
278 | nodes = ((ecx >> 8) & 7) + 1; | |
279 | node_id = ecx & 7; | |
280 | ||
281 | /* get compute unit information */ | |
282 | smp_num_siblings = ((ebx >> 8) & 3) + 1; | |
283 | c->compute_unit_id = ebx & 0xff; | |
9e81509e | 284 | cores_per_cu += ((ebx >> 8) & 3); |
23588c38 | 285 | } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { |
6057b4d3 AH |
286 | u64 value; |
287 | ||
23588c38 AH |
288 | rdmsrl(MSR_FAM10H_NODE_ID, value); |
289 | nodes = ((value >> 3) & 7) + 1; | |
290 | node_id = value & 7; | |
291 | } else | |
4a376ec3 AH |
292 | return; |
293 | ||
23588c38 AH |
294 | /* fixup multi-node processor information */ |
295 | if (nodes > 1) { | |
6057b4d3 | 296 | u32 cores_per_node; |
d518573d | 297 | u32 cus_per_node; |
6057b4d3 | 298 | |
23588c38 AH |
299 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); |
300 | cores_per_node = c->x86_max_cores / nodes; | |
d518573d | 301 | cus_per_node = cores_per_node / cores_per_cu; |
9d260ebc | 302 | |
23588c38 AH |
303 | /* store NodeID, use llc_shared_map to store sibling info */ |
304 | per_cpu(cpu_llc_id, cpu) = node_id; | |
4a376ec3 | 305 | |
9e81509e | 306 | /* core id has to be in the [0 .. cores_per_node - 1] range */ |
d518573d AH |
307 | c->cpu_core_id %= cores_per_node; |
308 | c->compute_unit_id %= cus_per_node; | |
23588c38 | 309 | } |
4a376ec3 AH |
310 | } |
311 | #endif | |
312 | ||
11fdd252 YL |
313 | /* |
314 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. | |
315 | * Assumes number of cores is a power of two. | |
316 | */ | |
317 | static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | |
318 | { | |
319 | #ifdef CONFIG_X86_HT | |
320 | unsigned bits; | |
99bd0c0f | 321 | int cpu = smp_processor_id(); |
11fdd252 YL |
322 | |
323 | bits = c->x86_coreid_bits; | |
11fdd252 YL |
324 | /* Low order bits define the core id (index of core in socket) */ |
325 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); | |
326 | /* Convert the initial APIC ID into the socket ID */ | |
327 | c->phys_proc_id = c->initial_apicid >> bits; | |
99bd0c0f AH |
328 | /* use socket ID also for last level cache */ |
329 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; | |
23588c38 | 330 | amd_get_topology(c); |
11fdd252 YL |
331 | #endif |
332 | } | |
333 | ||
6a812691 AH |
334 | int amd_get_nb_id(int cpu) |
335 | { | |
336 | int id = 0; | |
337 | #ifdef CONFIG_SMP | |
338 | id = per_cpu(cpu_llc_id, cpu); | |
339 | #endif | |
340 | return id; | |
341 | } | |
342 | EXPORT_SYMBOL_GPL(amd_get_nb_id); | |
343 | ||
6c62aa4a YL |
344 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) |
345 | { | |
645a7919 | 346 | #ifdef CONFIG_NUMA |
6c62aa4a YL |
347 | int cpu = smp_processor_id(); |
348 | int node; | |
0d96b9ff | 349 | unsigned apicid = c->apicid; |
6c62aa4a | 350 | |
bbc9e2f4 TH |
351 | node = numa_cpu_node(cpu); |
352 | if (node == NUMA_NO_NODE) | |
353 | node = per_cpu(cpu_llc_id, cpu); | |
6c62aa4a | 354 | |
64be4c1c DB |
355 | /* |
356 | * If core numbers are inconsistent, it's likely a multi-fabric platform, | |
357 | * so invoke platform-specific handler | |
358 | */ | |
359 | if (c->phys_proc_id != node) | |
360 | x86_cpuinit.fixup_cpu_id(c, node); | |
361 | ||
6c62aa4a | 362 | if (!node_online(node)) { |
bbc9e2f4 TH |
363 | /* |
364 | * Two possibilities here: | |
365 | * | |
366 | * - The CPU is missing memory and no node was created. In | |
367 | * that case try picking one from a nearby CPU. | |
368 | * | |
369 | * - The APIC IDs differ from the HyperTransport node IDs | |
370 | * which the K8 northbridge parsing fills in. Assume | |
371 | * they are all increased by a constant offset, but in | |
372 | * the same order as the HT nodeids. If that doesn't | |
373 | * result in a usable node fall back to the path for the | |
374 | * previous case. | |
375 | * | |
376 | * This workaround operates directly on the mapping between | |
377 | * APIC ID and NUMA node, assuming certain relationship | |
378 | * between APIC ID, HT node ID and NUMA topology. As going | |
379 | * through CPU mapping may alter the outcome, directly | |
380 | * access __apicid_to_node[]. | |
381 | */ | |
6c62aa4a YL |
382 | int ht_nodeid = c->initial_apicid; |
383 | ||
384 | if (ht_nodeid >= 0 && | |
bbc9e2f4 TH |
385 | __apicid_to_node[ht_nodeid] != NUMA_NO_NODE) |
386 | node = __apicid_to_node[ht_nodeid]; | |
6c62aa4a YL |
387 | /* Pick a nearby node */ |
388 | if (!node_online(node)) | |
389 | node = nearby_node(apicid); | |
390 | } | |
391 | numa_set_node(cpu, node); | |
6c62aa4a YL |
392 | #endif |
393 | } | |
394 | ||
11fdd252 YL |
395 | static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) |
396 | { | |
397 | #ifdef CONFIG_X86_HT | |
398 | unsigned bits, ecx; | |
399 | ||
400 | /* Multi core CPU? */ | |
401 | if (c->extended_cpuid_level < 0x80000008) | |
402 | return; | |
403 | ||
404 | ecx = cpuid_ecx(0x80000008); | |
405 | ||
406 | c->x86_max_cores = (ecx & 0xff) + 1; | |
407 | ||
408 | /* CPU telling us the core id bits shift? */ | |
409 | bits = (ecx >> 12) & 0xF; | |
410 | ||
411 | /* Otherwise recompute */ | |
412 | if (bits == 0) { | |
413 | while ((1 << bits) < c->x86_max_cores) | |
414 | bits++; | |
415 | } | |
416 | ||
417 | c->x86_coreid_bits = bits; | |
418 | #endif | |
419 | } | |
420 | ||
8fa8b035 BP |
421 | static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c) |
422 | { | |
423 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { | |
424 | ||
425 | if (c->x86 > 0x10 || | |
426 | (c->x86 == 0x10 && c->x86_model >= 0x2)) { | |
427 | u64 val; | |
428 | ||
429 | rdmsrl(MSR_K7_HWCR, val); | |
430 | if (!(val & BIT(24))) | |
431 | printk(KERN_WARNING FW_BUG "TSC doesn't count " | |
432 | "with P0 frequency!\n"); | |
433 | } | |
434 | } | |
435 | ||
436 | if (c->x86 == 0x15) { | |
437 | unsigned long upperbit; | |
438 | u32 cpuid, assoc; | |
439 | ||
440 | cpuid = cpuid_edx(0x80000005); | |
441 | assoc = cpuid >> 16 & 0xff; | |
442 | upperbit = ((cpuid >> 24) << 10) / assoc; | |
443 | ||
444 | va_align.mask = (upperbit - 1) & PAGE_MASK; | |
445 | va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; | |
446 | } | |
447 | } | |
448 | ||
03ae5768 | 449 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) |
2b16a235 | 450 | { |
11fdd252 YL |
451 | early_init_amd_mc(c); |
452 | ||
40fb1715 VP |
453 | /* |
454 | * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate | |
455 | * with P/T states and does not stop in deep C-states | |
456 | */ | |
457 | if (c->x86_power & (1 << 8)) { | |
e3224234 | 458 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
40fb1715 | 459 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); |
c98fdeaa BP |
460 | if (!check_tsc_unstable()) |
461 | sched_clock_stable = 1; | |
40fb1715 | 462 | } |
5fef55fd | 463 | |
6c62aa4a YL |
464 | #ifdef CONFIG_X86_64 |
465 | set_cpu_cap(c, X86_FEATURE_SYSCALL32); | |
466 | #else | |
5fef55fd | 467 | /* Set MTRR capability flag if appropriate */ |
6c62aa4a YL |
468 | if (c->x86 == 5) |
469 | if (c->x86_model == 13 || c->x86_model == 9 || | |
470 | (c->x86_model == 8 && c->x86_mask >= 8)) | |
471 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | |
472 | #endif | |
42937e81 AH |
473 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) |
474 | /* check CPU config space for extended APIC ID */ | |
2cb07860 | 475 | if (cpu_has_apic && c->x86 >= 0xf) { |
42937e81 AH |
476 | unsigned int val; |
477 | val = read_pci_config(0, 24, 0, 0x68); | |
478 | if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18))) | |
479 | set_cpu_cap(c, X86_FEATURE_EXTD_APICID); | |
480 | } | |
481 | #endif | |
2b16a235 AK |
482 | } |
483 | ||
b4af3f7c | 484 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) |
1da177e4 | 485 | { |
8e8da023 LT |
486 | u32 dummy; |
487 | ||
7d318d77 | 488 | #ifdef CONFIG_SMP |
3c92c2ba | 489 | unsigned long long value; |
7d318d77 | 490 | |
fb87a298 PC |
491 | /* |
492 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 | |
7d318d77 AK |
493 | * bit 6 of msr C001_0015 |
494 | * | |
495 | * Errata 63 for SH-B3 steppings | |
496 | * Errata 122 for all steppings (F+ have it disabled by default) | |
497 | */ | |
11fdd252 | 498 | if (c->x86 == 0xf) { |
7d318d77 AK |
499 | rdmsrl(MSR_K7_HWCR, value); |
500 | value |= 1 << 6; | |
501 | wrmsrl(MSR_K7_HWCR, value); | |
502 | } | |
503 | #endif | |
504 | ||
2b16a235 AK |
505 | early_init_amd(c); |
506 | ||
fb87a298 PC |
507 | /* |
508 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; | |
16282a8e | 509 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway |
fb87a298 | 510 | */ |
16282a8e | 511 | clear_cpu_cap(c, 0*32+31); |
fb87a298 | 512 | |
6c62aa4a YL |
513 | #ifdef CONFIG_X86_64 |
514 | /* On C+ stepping K8 rep microcode works well for copy/memset */ | |
515 | if (c->x86 == 0xf) { | |
516 | u32 level; | |
517 | ||
518 | level = cpuid_eax(1); | |
8bdbd962 | 519 | if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) |
6c62aa4a | 520 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
fbd8b181 KW |
521 | |
522 | /* | |
523 | * Some BIOSes incorrectly force this feature, but only K8 | |
524 | * revision D (model = 0x14) and later actually support it. | |
6b0f43dd | 525 | * (AMD Erratum #110, docId: 25759). |
fbd8b181 | 526 | */ |
6b0f43dd BP |
527 | if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { |
528 | u64 val; | |
529 | ||
fbd8b181 | 530 | clear_cpu_cap(c, X86_FEATURE_LAHF_LM); |
6b0f43dd BP |
531 | if (!rdmsrl_amd_safe(0xc001100d, &val)) { |
532 | val &= ~(1ULL << 32); | |
533 | wrmsrl_amd_safe(0xc001100d, val); | |
534 | } | |
535 | } | |
536 | ||
6c62aa4a | 537 | } |
12d8a961 | 538 | if (c->x86 >= 0x10) |
6c62aa4a | 539 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
0d96b9ff YL |
540 | |
541 | /* get apicid instead of initial apic id from cpuid */ | |
542 | c->apicid = hard_smp_processor_id(); | |
6c62aa4a YL |
543 | #else |
544 | ||
545 | /* | |
546 | * FIXME: We should handle the K5 here. Set up the write | |
547 | * range and also turn on MSR 83 bits 4 and 31 (write alloc, | |
548 | * no bus pipeline) | |
549 | */ | |
550 | ||
fb87a298 PC |
551 | switch (c->x86) { |
552 | case 4: | |
11fdd252 YL |
553 | init_amd_k5(c); |
554 | break; | |
fb87a298 | 555 | case 5: |
11fdd252 | 556 | init_amd_k6(c); |
1da177e4 | 557 | break; |
11fdd252 YL |
558 | case 6: /* An Athlon/Duron */ |
559 | init_amd_k7(c); | |
1da177e4 LT |
560 | break; |
561 | } | |
11fdd252 YL |
562 | |
563 | /* K6s reports MCEs but don't actually have all the MSRs */ | |
564 | if (c->x86 < 6) | |
565 | clear_cpu_cap(c, X86_FEATURE_MCE); | |
6c62aa4a | 566 | #endif |
11fdd252 | 567 | |
6c62aa4a | 568 | /* Enable workaround for FXSAVE leak */ |
18bd057b | 569 | if (c->x86 >= 6) |
16282a8e | 570 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); |
1da177e4 | 571 | |
11fdd252 YL |
572 | if (!c->x86_model_id[0]) { |
573 | switch (c->x86) { | |
574 | case 0xf: | |
575 | /* Should distinguish Models here, but this is only | |
576 | a fallback anyways. */ | |
577 | strcpy(c->x86_model_id, "Hammer"); | |
578 | break; | |
579 | } | |
580 | } | |
3dd9d514 | 581 | |
27c13ece | 582 | cpu_detect_cache_sizes(c); |
3dd9d514 | 583 | |
11fdd252 | 584 | /* Multi core CPU? */ |
6c62aa4a | 585 | if (c->extended_cpuid_level >= 0x80000008) { |
11fdd252 | 586 | amd_detect_cmp(c); |
6c62aa4a YL |
587 | srat_detect_node(c); |
588 | } | |
faee9a5d | 589 | |
6c62aa4a | 590 | #ifdef CONFIG_X86_32 |
11fdd252 | 591 | detect_ht(c); |
6c62aa4a | 592 | #endif |
39b3a791 | 593 | |
11fdd252 | 594 | if (c->extended_cpuid_level >= 0x80000006) { |
d9fadd7b | 595 | if (cpuid_edx(0x80000006) & 0xf000) |
67cddd94 AK |
596 | num_cache_leaves = 4; |
597 | else | |
598 | num_cache_leaves = 3; | |
599 | } | |
3556ddfa | 600 | |
12d8a961 | 601 | if (c->x86 >= 0xf) |
11fdd252 | 602 | set_cpu_cap(c, X86_FEATURE_K8); |
de421863 | 603 | |
11fdd252 YL |
604 | if (cpu_has_xmm2) { |
605 | /* MFENCE stops RDTSC speculation */ | |
16282a8e | 606 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); |
11fdd252 | 607 | } |
6c62aa4a YL |
608 | |
609 | #ifdef CONFIG_X86_64 | |
610 | if (c->x86 == 0x10) { | |
611 | /* do this for boot cpu */ | |
612 | if (c == &boot_cpu_data) | |
613 | check_enable_amd_mmconf_dmi(); | |
614 | ||
615 | fam10h_check_enable_mmcfg(); | |
616 | } | |
617 | ||
12d8a961 | 618 | if (c == &boot_cpu_data && c->x86 >= 0xf) { |
6c62aa4a YL |
619 | unsigned long long tseg; |
620 | ||
621 | /* | |
622 | * Split up direct mapping around the TSEG SMM area. | |
623 | * Don't do it for gbpages because there seems very little | |
624 | * benefit in doing so. | |
625 | */ | |
626 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | |
8bdbd962 AC |
627 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); |
628 | if ((tseg>>PMD_SHIFT) < | |
6c62aa4a | 629 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || |
8bdbd962 | 630 | ((tseg>>PMD_SHIFT) < |
6c62aa4a | 631 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && |
8bdbd962 AC |
632 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) |
633 | set_memory_4k((unsigned long)__va(tseg), 1); | |
6c62aa4a YL |
634 | } |
635 | } | |
636 | #endif | |
b87cf80a | 637 | |
e9cdd343 BO |
638 | /* |
639 | * Family 0x12 and above processors have APIC timer | |
640 | * running in deep C states. | |
641 | */ | |
642 | if (c->x86 > 0x11) | |
b87cf80a | 643 | set_cpu_cap(c, X86_FEATURE_ARAT); |
5bbc097d JR |
644 | |
645 | /* | |
646 | * Disable GART TLB Walk Errors on Fam10h. We do this here | |
647 | * because this is always needed when GART is enabled, even in a | |
648 | * kernel which has no MCE support built in. | |
649 | */ | |
650 | if (c->x86 == 0x10) { | |
651 | /* | |
652 | * BIOS should disable GartTlbWlk Errors themself. If | |
653 | * it doesn't do it here as suggested by the BKDG. | |
654 | * | |
655 | * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 | |
656 | */ | |
657 | u64 mask; | |
d47cc0db | 658 | int err; |
5bbc097d | 659 | |
d47cc0db RJ |
660 | err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask); |
661 | if (err == 0) { | |
662 | mask |= (1 << 10); | |
663 | checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask); | |
664 | } | |
5bbc097d | 665 | } |
8e8da023 LT |
666 | |
667 | rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); | |
1da177e4 LT |
668 | } |
669 | ||
6c62aa4a | 670 | #ifdef CONFIG_X86_32 |
8bdbd962 AC |
671 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, |
672 | unsigned int size) | |
1da177e4 LT |
673 | { |
674 | /* AMD errata T13 (order #21922) */ | |
675 | if ((c->x86 == 6)) { | |
8bdbd962 AC |
676 | /* Duron Rev A0 */ |
677 | if (c->x86_model == 3 && c->x86_mask == 0) | |
1da177e4 | 678 | size = 64; |
8bdbd962 | 679 | /* Tbird rev A1/A2 */ |
1da177e4 | 680 | if (c->x86_model == 4 && |
8bdbd962 | 681 | (c->x86_mask == 0 || c->x86_mask == 1)) |
1da177e4 LT |
682 | size = 256; |
683 | } | |
684 | return size; | |
685 | } | |
6c62aa4a | 686 | #endif |
1da177e4 | 687 | |
02dde8b4 | 688 | static const struct cpu_dev __cpuinitconst amd_cpu_dev = { |
1da177e4 | 689 | .c_vendor = "AMD", |
fb87a298 | 690 | .c_ident = { "AuthenticAMD" }, |
6c62aa4a | 691 | #ifdef CONFIG_X86_32 |
1da177e4 LT |
692 | .c_models = { |
693 | { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = | |
694 | { | |
695 | [3] = "486 DX/2", | |
696 | [7] = "486 DX/2-WB", | |
fb87a298 PC |
697 | [8] = "486 DX/4", |
698 | [9] = "486 DX/4-WB", | |
1da177e4 | 699 | [14] = "Am5x86-WT", |
fb87a298 | 700 | [15] = "Am5x86-WB" |
1da177e4 LT |
701 | } |
702 | }, | |
703 | }, | |
6c62aa4a YL |
704 | .c_size_cache = amd_size_cache, |
705 | #endif | |
03ae5768 | 706 | .c_early_init = early_init_amd, |
8fa8b035 | 707 | .c_bsp_init = bsp_init_amd, |
1da177e4 | 708 | .c_init = init_amd, |
10a434fc | 709 | .c_x86_vendor = X86_VENDOR_AMD, |
1da177e4 LT |
710 | }; |
711 | ||
10a434fc | 712 | cpu_dev_register(amd_cpu_dev); |
d78d671d HR |
713 | |
714 | /* | |
715 | * AMD errata checking | |
716 | * | |
717 | * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or | |
718 | * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that | |
719 | * have an OSVW id assigned, which it takes as first argument. Both take a | |
720 | * variable number of family-specific model-stepping ranges created by | |
721 | * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const | |
722 | * int[] in arch/x86/include/asm/processor.h. | |
723 | * | |
724 | * Example: | |
725 | * | |
726 | * const int amd_erratum_319[] = | |
727 | * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), | |
728 | * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), | |
729 | * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); | |
730 | */ | |
731 | ||
9d8888c2 | 732 | const int amd_erratum_400[] = |
328935e6 | 733 | AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), |
9d8888c2 | 734 | AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); |
a5b91606 | 735 | EXPORT_SYMBOL_GPL(amd_erratum_400); |
9d8888c2 | 736 | |
1be85a6d HR |
737 | const int amd_erratum_383[] = |
738 | AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); | |
a5b91606 | 739 | EXPORT_SYMBOL_GPL(amd_erratum_383); |
9d8888c2 | 740 | |
d78d671d HR |
741 | bool cpu_has_amd_erratum(const int *erratum) |
742 | { | |
7b543a53 | 743 | struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); |
d78d671d HR |
744 | int osvw_id = *erratum++; |
745 | u32 range; | |
746 | u32 ms; | |
747 | ||
748 | /* | |
749 | * If called early enough that current_cpu_data hasn't been initialized | |
750 | * yet, fall back to boot_cpu_data. | |
751 | */ | |
752 | if (cpu->x86 == 0) | |
753 | cpu = &boot_cpu_data; | |
754 | ||
755 | if (cpu->x86_vendor != X86_VENDOR_AMD) | |
756 | return false; | |
757 | ||
758 | if (osvw_id >= 0 && osvw_id < 65536 && | |
759 | cpu_has(cpu, X86_FEATURE_OSVW)) { | |
760 | u64 osvw_len; | |
761 | ||
762 | rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); | |
763 | if (osvw_id < osvw_len) { | |
764 | u64 osvw_bits; | |
765 | ||
766 | rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), | |
767 | osvw_bits); | |
768 | return osvw_bits & (1ULL << (osvw_id & 0x3f)); | |
769 | } | |
770 | } | |
771 | ||
772 | /* OSVW unavailable or ID unknown, match family-model-stepping range */ | |
07a7795c | 773 | ms = (cpu->x86_model << 4) | cpu->x86_mask; |
d78d671d HR |
774 | while ((range = *erratum++)) |
775 | if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && | |
776 | (ms >= AMD_MODEL_RANGE_START(range)) && | |
777 | (ms <= AMD_MODEL_RANGE_END(range))) | |
778 | return true; | |
779 | ||
780 | return false; | |
781 | } | |
a5b91606 PA |
782 | |
783 | EXPORT_SYMBOL_GPL(cpu_has_amd_erratum); |