Commit | Line | Data |
---|---|---|
80cc9f10 PO |
1 | /* |
2 | * AMD CPU Microcode Update Driver for Linux | |
fe055896 BP |
3 | * |
4 | * This driver allows to upgrade microcode on F10h AMD | |
5 | * CPUs and later. | |
6 | * | |
597e11a3 | 7 | * Copyright (C) 2008-2011 Advanced Micro Devices Inc. |
80cc9f10 PO |
8 | * |
9 | * Author: Peter Oruba <peter.oruba@amd.com> | |
10 | * | |
11 | * Based on work by: | |
12 | * Tigran Aivazian <tigran@aivazian.fsnet.co.uk> | |
13 | * | |
fe055896 BP |
14 | * early loader: |
15 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | |
16 | * | |
17 | * Author: Jacob Shin <jacob.shin@amd.com> | |
18 | * Fixes: Borislav Petkov <bp@suse.de> | |
80cc9f10 | 19 | * |
2a3282a7 | 20 | * Licensed under the terms of the GNU General Public |
80cc9f10 | 21 | * License version 2. See file COPYING for details. |
4bae1967 | 22 | */ |
6b26e1bf | 23 | #define pr_fmt(fmt) "microcode: " fmt |
f58e1f53 | 24 | |
fe055896 | 25 | #include <linux/earlycpio.h> |
4bae1967 | 26 | #include <linux/firmware.h> |
4bae1967 IM |
27 | #include <linux/uaccess.h> |
28 | #include <linux/vmalloc.h> | |
fe055896 | 29 | #include <linux/initrd.h> |
4bae1967 | 30 | #include <linux/kernel.h> |
80cc9f10 | 31 | #include <linux/pci.h> |
80cc9f10 | 32 | |
fe055896 | 33 | #include <asm/microcode_amd.h> |
80cc9f10 | 34 | #include <asm/microcode.h> |
4bae1967 | 35 | #include <asm/processor.h> |
fe055896 BP |
36 | #include <asm/setup.h> |
37 | #include <asm/cpu.h> | |
4bae1967 | 38 | #include <asm/msr.h> |
80cc9f10 | 39 | |
a0a29b62 | 40 | static struct equiv_cpu_entry *equiv_cpu_table; |
80cc9f10 | 41 | |
a3eb3b4d BP |
42 | struct ucode_patch { |
43 | struct list_head plist; | |
44 | void *data; | |
45 | u32 patch_id; | |
46 | u16 equiv_cpu; | |
47 | }; | |
48 | ||
49 | static LIST_HEAD(pcache); | |
50 | ||
fe055896 BP |
51 | /* |
52 | * This points to the current valid container of microcode patches which we will | |
53 | * save from the initrd before jettisoning its contents. | |
54 | */ | |
55 | static u8 *container; | |
56 | static size_t container_size; | |
57 | ||
58 | static u32 ucode_new_rev; | |
59 | u8 amd_ucode_patch[PATCH_MAX_SIZE]; | |
60 | static u16 this_equiv_id; | |
61 | ||
62 | static struct cpio_data ucode_cpio; | |
63 | ||
64 | /* | |
65 | * Microcode patch container file is prepended to the initrd in cpio format. | |
66 | * See Documentation/x86/early-microcode.txt | |
67 | */ | |
68 | static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin"; | |
69 | ||
70 | static struct cpio_data __init find_ucode_in_initrd(void) | |
71 | { | |
72 | long offset = 0; | |
73 | char *path; | |
74 | void *start; | |
75 | size_t size; | |
76 | ||
77 | #ifdef CONFIG_X86_32 | |
78 | struct boot_params *p; | |
79 | ||
80 | /* | |
81 | * On 32-bit, early load occurs before paging is turned on so we need | |
82 | * to use physical addresses. | |
83 | */ | |
84 | p = (struct boot_params *)__pa_nodebug(&boot_params); | |
85 | path = (char *)__pa_nodebug(ucode_path); | |
86 | start = (void *)p->hdr.ramdisk_image; | |
87 | size = p->hdr.ramdisk_size; | |
88 | #else | |
89 | path = ucode_path; | |
90 | start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET); | |
91 | size = boot_params.hdr.ramdisk_size; | |
92 | #endif | |
93 | ||
94 | return find_cpio_data(path, start, size, &offset); | |
95 | } | |
96 | ||
97 | static size_t compute_container_size(u8 *data, u32 total_size) | |
98 | { | |
99 | size_t size = 0; | |
100 | u32 *header = (u32 *)data; | |
101 | ||
102 | if (header[0] != UCODE_MAGIC || | |
103 | header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ | |
104 | header[2] == 0) /* size */ | |
105 | return size; | |
106 | ||
107 | size = header[2] + CONTAINER_HDR_SZ; | |
108 | total_size -= size; | |
109 | data += size; | |
110 | ||
111 | while (total_size) { | |
112 | u16 patch_size; | |
113 | ||
114 | header = (u32 *)data; | |
115 | ||
116 | if (header[0] != UCODE_UCODE_TYPE) | |
117 | break; | |
118 | ||
119 | /* | |
120 | * Sanity-check patch size. | |
121 | */ | |
122 | patch_size = header[1]; | |
123 | if (patch_size > PATCH_MAX_SIZE) | |
124 | break; | |
125 | ||
126 | size += patch_size + SECTION_HDR_SIZE; | |
127 | data += patch_size + SECTION_HDR_SIZE; | |
128 | total_size -= patch_size + SECTION_HDR_SIZE; | |
129 | } | |
130 | ||
131 | return size; | |
132 | } | |
133 | ||
134 | /* | |
135 | * Early load occurs before we can vmalloc(). So we look for the microcode | |
136 | * patch container file in initrd, traverse equivalent cpu table, look for a | |
137 | * matching microcode patch, and update, all in initrd memory in place. | |
138 | * When vmalloc() is available for use later -- on 64-bit during first AP load, | |
139 | * and on 32-bit during save_microcode_in_initrd_amd() -- we can call | |
140 | * load_microcode_amd() to save equivalent cpu table and microcode patches in | |
141 | * kernel heap memory. | |
142 | */ | |
143 | static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch) | |
144 | { | |
145 | struct equiv_cpu_entry *eq; | |
146 | size_t *cont_sz; | |
147 | u32 *header; | |
148 | u8 *data, **cont; | |
149 | u8 (*patch)[PATCH_MAX_SIZE]; | |
150 | u16 eq_id = 0; | |
151 | int offset, left; | |
152 | u32 rev, eax, ebx, ecx, edx; | |
153 | u32 *new_rev; | |
154 | ||
155 | #ifdef CONFIG_X86_32 | |
156 | new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); | |
157 | cont_sz = (size_t *)__pa_nodebug(&container_size); | |
158 | cont = (u8 **)__pa_nodebug(&container); | |
159 | patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch); | |
160 | #else | |
161 | new_rev = &ucode_new_rev; | |
162 | cont_sz = &container_size; | |
163 | cont = &container; | |
164 | patch = &amd_ucode_patch; | |
165 | #endif | |
166 | ||
167 | data = ucode; | |
168 | left = size; | |
169 | header = (u32 *)data; | |
170 | ||
171 | /* find equiv cpu table */ | |
172 | if (header[0] != UCODE_MAGIC || | |
173 | header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ | |
174 | header[2] == 0) /* size */ | |
175 | return; | |
176 | ||
177 | eax = 0x00000001; | |
178 | ecx = 0; | |
179 | native_cpuid(&eax, &ebx, &ecx, &edx); | |
180 | ||
181 | while (left > 0) { | |
182 | eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ); | |
183 | ||
184 | *cont = data; | |
185 | ||
186 | /* Advance past the container header */ | |
187 | offset = header[2] + CONTAINER_HDR_SZ; | |
188 | data += offset; | |
189 | left -= offset; | |
190 | ||
191 | eq_id = find_equiv_id(eq, eax); | |
192 | if (eq_id) { | |
193 | this_equiv_id = eq_id; | |
194 | *cont_sz = compute_container_size(*cont, left + offset); | |
195 | ||
196 | /* | |
197 | * truncate how much we need to iterate over in the | |
198 | * ucode update loop below | |
199 | */ | |
200 | left = *cont_sz - offset; | |
201 | break; | |
202 | } | |
203 | ||
204 | /* | |
205 | * support multiple container files appended together. if this | |
206 | * one does not have a matching equivalent cpu entry, we fast | |
207 | * forward to the next container file. | |
208 | */ | |
209 | while (left > 0) { | |
210 | header = (u32 *)data; | |
211 | if (header[0] == UCODE_MAGIC && | |
212 | header[1] == UCODE_EQUIV_CPU_TABLE_TYPE) | |
213 | break; | |
214 | ||
215 | offset = header[1] + SECTION_HDR_SIZE; | |
216 | data += offset; | |
217 | left -= offset; | |
218 | } | |
219 | ||
220 | /* mark where the next microcode container file starts */ | |
221 | offset = data - (u8 *)ucode; | |
222 | ucode = data; | |
223 | } | |
224 | ||
225 | if (!eq_id) { | |
226 | *cont = NULL; | |
227 | *cont_sz = 0; | |
228 | return; | |
229 | } | |
230 | ||
231 | if (check_current_patch_level(&rev, true)) | |
232 | return; | |
233 | ||
234 | while (left > 0) { | |
235 | struct microcode_amd *mc; | |
236 | ||
237 | header = (u32 *)data; | |
238 | if (header[0] != UCODE_UCODE_TYPE || /* type */ | |
239 | header[1] == 0) /* size */ | |
240 | break; | |
241 | ||
242 | mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE); | |
243 | ||
244 | if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) { | |
245 | ||
246 | if (!__apply_microcode_amd(mc)) { | |
247 | rev = mc->hdr.patch_id; | |
248 | *new_rev = rev; | |
249 | ||
250 | if (save_patch) | |
251 | memcpy(patch, mc, | |
252 | min_t(u32, header[1], PATCH_MAX_SIZE)); | |
253 | } | |
254 | } | |
255 | ||
256 | offset = header[1] + SECTION_HDR_SIZE; | |
257 | data += offset; | |
258 | left -= offset; | |
259 | } | |
260 | } | |
261 | ||
262 | static bool __init load_builtin_amd_microcode(struct cpio_data *cp, | |
263 | unsigned int family) | |
264 | { | |
265 | #ifdef CONFIG_X86_64 | |
266 | char fw_name[36] = "amd-ucode/microcode_amd.bin"; | |
267 | ||
268 | if (family >= 0x15) | |
269 | snprintf(fw_name, sizeof(fw_name), | |
270 | "amd-ucode/microcode_amd_fam%.2xh.bin", family); | |
271 | ||
272 | return get_builtin_firmware(cp, fw_name); | |
273 | #else | |
274 | return false; | |
275 | #endif | |
276 | } | |
277 | ||
278 | void __init load_ucode_amd_bsp(unsigned int family) | |
279 | { | |
280 | struct cpio_data cp; | |
281 | void **data; | |
282 | size_t *size; | |
283 | ||
284 | #ifdef CONFIG_X86_32 | |
285 | data = (void **)__pa_nodebug(&ucode_cpio.data); | |
286 | size = (size_t *)__pa_nodebug(&ucode_cpio.size); | |
287 | #else | |
288 | data = &ucode_cpio.data; | |
289 | size = &ucode_cpio.size; | |
290 | #endif | |
291 | ||
292 | cp = find_ucode_in_initrd(); | |
293 | if (!cp.data) { | |
294 | if (!load_builtin_amd_microcode(&cp, family)) | |
295 | return; | |
296 | } | |
297 | ||
298 | *data = cp.data; | |
299 | *size = cp.size; | |
300 | ||
301 | apply_ucode_in_initrd(cp.data, cp.size, true); | |
302 | } | |
303 | ||
304 | #ifdef CONFIG_X86_32 | |
305 | /* | |
306 | * On 32-bit, since AP's early load occurs before paging is turned on, we | |
307 | * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during | |
308 | * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During | |
309 | * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch, | |
310 | * which is used upon resume from suspend. | |
311 | */ | |
312 | void load_ucode_amd_ap(void) | |
313 | { | |
314 | struct microcode_amd *mc; | |
315 | size_t *usize; | |
316 | void **ucode; | |
317 | ||
318 | mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); | |
319 | if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { | |
320 | __apply_microcode_amd(mc); | |
321 | return; | |
322 | } | |
323 | ||
324 | ucode = (void *)__pa_nodebug(&container); | |
325 | usize = (size_t *)__pa_nodebug(&container_size); | |
326 | ||
327 | if (!*ucode || !*usize) | |
328 | return; | |
329 | ||
330 | apply_ucode_in_initrd(*ucode, *usize, false); | |
331 | } | |
332 | ||
333 | static void __init collect_cpu_sig_on_bsp(void *arg) | |
334 | { | |
335 | unsigned int cpu = smp_processor_id(); | |
336 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | |
337 | ||
338 | uci->cpu_sig.sig = cpuid_eax(0x00000001); | |
339 | } | |
340 | ||
341 | static void __init get_bsp_sig(void) | |
342 | { | |
343 | unsigned int bsp = boot_cpu_data.cpu_index; | |
344 | struct ucode_cpu_info *uci = ucode_cpu_info + bsp; | |
345 | ||
346 | if (!uci->cpu_sig.sig) | |
347 | smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); | |
348 | } | |
349 | #else | |
350 | void load_ucode_amd_ap(void) | |
351 | { | |
352 | unsigned int cpu = smp_processor_id(); | |
353 | struct equiv_cpu_entry *eq; | |
354 | struct microcode_amd *mc; | |
355 | u32 rev, eax; | |
356 | u16 eq_id; | |
357 | ||
358 | /* Exit if called on the BSP. */ | |
359 | if (!cpu) | |
360 | return; | |
361 | ||
362 | if (!container) | |
363 | return; | |
364 | ||
365 | /* | |
366 | * 64-bit runs with paging enabled, thus early==false. | |
367 | */ | |
368 | if (check_current_patch_level(&rev, false)) | |
369 | return; | |
370 | ||
371 | eax = cpuid_eax(0x00000001); | |
372 | eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ); | |
373 | ||
374 | eq_id = find_equiv_id(eq, eax); | |
375 | if (!eq_id) | |
376 | return; | |
377 | ||
378 | if (eq_id == this_equiv_id) { | |
379 | mc = (struct microcode_amd *)amd_ucode_patch; | |
380 | ||
381 | if (mc && rev < mc->hdr.patch_id) { | |
382 | if (!__apply_microcode_amd(mc)) | |
383 | ucode_new_rev = mc->hdr.patch_id; | |
384 | } | |
385 | ||
386 | } else { | |
387 | if (!ucode_cpio.data) | |
388 | return; | |
389 | ||
390 | /* | |
391 | * AP has a different equivalence ID than BSP, looks like | |
392 | * mixed-steppings silicon so go through the ucode blob anew. | |
393 | */ | |
394 | apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false); | |
395 | } | |
396 | } | |
397 | #endif | |
398 | ||
399 | int __init save_microcode_in_initrd_amd(void) | |
400 | { | |
401 | unsigned long cont; | |
402 | int retval = 0; | |
403 | enum ucode_state ret; | |
404 | u8 *cont_va; | |
405 | u32 eax; | |
406 | ||
407 | if (!container) | |
408 | return -EINVAL; | |
409 | ||
410 | #ifdef CONFIG_X86_32 | |
411 | get_bsp_sig(); | |
412 | cont = (unsigned long)container; | |
413 | cont_va = __va(container); | |
414 | #else | |
415 | /* | |
416 | * We need the physical address of the container for both bitness since | |
417 | * boot_params.hdr.ramdisk_image is a physical address. | |
418 | */ | |
419 | cont = __pa(container); | |
420 | cont_va = container; | |
421 | #endif | |
422 | ||
423 | /* | |
424 | * Take into account the fact that the ramdisk might get relocated and | |
425 | * therefore we need to recompute the container's position in virtual | |
426 | * memory space. | |
427 | */ | |
428 | if (relocated_ramdisk) | |
429 | container = (u8 *)(__va(relocated_ramdisk) + | |
430 | (cont - boot_params.hdr.ramdisk_image)); | |
431 | else | |
432 | container = cont_va; | |
433 | ||
434 | if (ucode_new_rev) | |
435 | pr_info("microcode: updated early to new patch_level=0x%08x\n", | |
436 | ucode_new_rev); | |
437 | ||
438 | eax = cpuid_eax(0x00000001); | |
439 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | |
440 | ||
441 | ret = load_microcode_amd(smp_processor_id(), eax, container, container_size); | |
442 | if (ret != UCODE_OK) | |
443 | retval = -EINVAL; | |
444 | ||
445 | /* | |
446 | * This will be freed any msec now, stash patches for the current | |
447 | * family and switch to patch cache for cpu hotplug, etc later. | |
448 | */ | |
449 | container = NULL; | |
450 | container_size = 0; | |
451 | ||
452 | return retval; | |
453 | } | |
454 | ||
455 | void reload_ucode_amd(void) | |
456 | { | |
457 | struct microcode_amd *mc; | |
458 | u32 rev; | |
459 | ||
460 | /* | |
461 | * early==false because this is a syscore ->resume path and by | |
462 | * that time paging is long enabled. | |
463 | */ | |
464 | if (check_current_patch_level(&rev, false)) | |
465 | return; | |
466 | ||
467 | mc = (struct microcode_amd *)amd_ucode_patch; | |
468 | ||
469 | if (mc && rev < mc->hdr.patch_id) { | |
470 | if (!__apply_microcode_amd(mc)) { | |
471 | ucode_new_rev = mc->hdr.patch_id; | |
472 | pr_info("microcode: reload patch_level=0x%08x\n", | |
473 | ucode_new_rev); | |
474 | } | |
475 | } | |
476 | } | |
a76096a6 | 477 | static u16 __find_equiv_id(unsigned int cpu) |
c96d2c09 BP |
478 | { |
479 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | |
a76096a6 | 480 | return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig); |
c96d2c09 BP |
481 | } |
482 | ||
483 | static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu) | |
484 | { | |
485 | int i = 0; | |
486 | ||
487 | BUG_ON(!equiv_cpu_table); | |
488 | ||
489 | while (equiv_cpu_table[i].equiv_cpu != 0) { | |
490 | if (equiv_cpu == equiv_cpu_table[i].equiv_cpu) | |
491 | return equiv_cpu_table[i].installed_cpu; | |
492 | i++; | |
493 | } | |
494 | return 0; | |
495 | } | |
496 | ||
a3eb3b4d BP |
497 | /* |
498 | * a small, trivial cache of per-family ucode patches | |
499 | */ | |
500 | static struct ucode_patch *cache_find_patch(u16 equiv_cpu) | |
501 | { | |
502 | struct ucode_patch *p; | |
503 | ||
504 | list_for_each_entry(p, &pcache, plist) | |
505 | if (p->equiv_cpu == equiv_cpu) | |
506 | return p; | |
507 | return NULL; | |
508 | } | |
509 | ||
510 | static void update_cache(struct ucode_patch *new_patch) | |
511 | { | |
512 | struct ucode_patch *p; | |
513 | ||
514 | list_for_each_entry(p, &pcache, plist) { | |
515 | if (p->equiv_cpu == new_patch->equiv_cpu) { | |
516 | if (p->patch_id >= new_patch->patch_id) | |
517 | /* we already have the latest patch */ | |
518 | return; | |
519 | ||
520 | list_replace(&p->plist, &new_patch->plist); | |
521 | kfree(p->data); | |
522 | kfree(p); | |
523 | return; | |
524 | } | |
525 | } | |
526 | /* no patch found, add it */ | |
527 | list_add_tail(&new_patch->plist, &pcache); | |
528 | } | |
529 | ||
530 | static void free_cache(void) | |
531 | { | |
2d297480 | 532 | struct ucode_patch *p, *tmp; |
a3eb3b4d | 533 | |
2d297480 | 534 | list_for_each_entry_safe(p, tmp, &pcache, plist) { |
a3eb3b4d BP |
535 | __list_del(p->plist.prev, p->plist.next); |
536 | kfree(p->data); | |
537 | kfree(p); | |
538 | } | |
539 | } | |
540 | ||
541 | static struct ucode_patch *find_patch(unsigned int cpu) | |
542 | { | |
543 | u16 equiv_id; | |
544 | ||
a76096a6 | 545 | equiv_id = __find_equiv_id(cpu); |
a3eb3b4d BP |
546 | if (!equiv_id) |
547 | return NULL; | |
548 | ||
549 | return cache_find_patch(equiv_id); | |
550 | } | |
551 | ||
d45de409 | 552 | static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) |
80cc9f10 | 553 | { |
3b2e3d85 | 554 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
757885e9 JS |
555 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
556 | struct ucode_patch *p; | |
80cc9f10 | 557 | |
5f5b7472 | 558 | csig->sig = cpuid_eax(0x00000001); |
bcb80e53 | 559 | csig->rev = c->microcode; |
757885e9 JS |
560 | |
561 | /* | |
562 | * a patch could have been loaded early, set uci->mc so that | |
563 | * mc_bp_resume() can call apply_microcode() | |
564 | */ | |
565 | p = find_patch(cpu); | |
566 | if (p && (p->patch_id == csig->rev)) | |
567 | uci->mc = p->data; | |
568 | ||
258721ef BP |
569 | pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); |
570 | ||
d45de409 | 571 | return 0; |
80cc9f10 PO |
572 | } |
573 | ||
84516098 | 574 | static unsigned int verify_patch_size(u8 family, u32 patch_size, |
be62adb4 | 575 | unsigned int size) |
80cc9f10 | 576 | { |
be62adb4 BP |
577 | u32 max_size; |
578 | ||
579 | #define F1XH_MPB_MAX_SIZE 2048 | |
580 | #define F14H_MPB_MAX_SIZE 1824 | |
581 | #define F15H_MPB_MAX_SIZE 4096 | |
36c46ca4 | 582 | #define F16H_MPB_MAX_SIZE 3458 |
be62adb4 | 583 | |
84516098 | 584 | switch (family) { |
be62adb4 BP |
585 | case 0x14: |
586 | max_size = F14H_MPB_MAX_SIZE; | |
587 | break; | |
588 | case 0x15: | |
589 | max_size = F15H_MPB_MAX_SIZE; | |
590 | break; | |
36c46ca4 BO |
591 | case 0x16: |
592 | max_size = F16H_MPB_MAX_SIZE; | |
593 | break; | |
be62adb4 BP |
594 | default: |
595 | max_size = F1XH_MPB_MAX_SIZE; | |
596 | break; | |
597 | } | |
598 | ||
599 | if (patch_size > min_t(u32, size, max_size)) { | |
600 | pr_err("patch size mismatch\n"); | |
601 | return 0; | |
602 | } | |
603 | ||
604 | return patch_size; | |
605 | } | |
606 | ||
0399f732 BP |
607 | /* |
608 | * Those patch levels cannot be updated to newer ones and thus should be final. | |
609 | */ | |
610 | static u32 final_levels[] = { | |
611 | 0x01000098, | |
612 | 0x0100009f, | |
613 | 0x010000af, | |
614 | 0, /* T-101 terminator */ | |
615 | }; | |
616 | ||
2eff73c0 BP |
617 | /* |
618 | * Check the current patch level on this CPU. | |
619 | * | |
620 | * @rev: Use it to return the patch level. It is set to 0 in the case of | |
621 | * error. | |
622 | * | |
623 | * Returns: | |
624 | * - true: if update should stop | |
625 | * - false: otherwise | |
626 | */ | |
0399f732 | 627 | bool check_current_patch_level(u32 *rev, bool early) |
2eff73c0 | 628 | { |
0399f732 BP |
629 | u32 lvl, dummy, i; |
630 | bool ret = false; | |
631 | u32 *levels; | |
632 | ||
633 | native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); | |
634 | ||
635 | if (IS_ENABLED(CONFIG_X86_32) && early) | |
636 | levels = (u32 *)__pa_nodebug(&final_levels); | |
637 | else | |
638 | levels = final_levels; | |
639 | ||
640 | for (i = 0; levels[i]; i++) { | |
641 | if (lvl == levels[i]) { | |
642 | lvl = 0; | |
643 | ret = true; | |
644 | break; | |
645 | } | |
646 | } | |
2eff73c0 | 647 | |
0399f732 BP |
648 | if (rev) |
649 | *rev = lvl; | |
2eff73c0 | 650 | |
0399f732 | 651 | return ret; |
2eff73c0 BP |
652 | } |
653 | ||
a76096a6 JS |
654 | int __apply_microcode_amd(struct microcode_amd *mc_amd) |
655 | { | |
656 | u32 rev, dummy; | |
657 | ||
5335ba5c | 658 | native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); |
a76096a6 JS |
659 | |
660 | /* verify patch application was successful */ | |
5335ba5c | 661 | native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); |
a76096a6 JS |
662 | if (rev != mc_amd->hdr.patch_id) |
663 | return -1; | |
664 | ||
665 | return 0; | |
666 | } | |
667 | ||
668 | int apply_microcode_amd(int cpu) | |
80cc9f10 | 669 | { |
bcb80e53 | 670 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
2efb05e8 BP |
671 | struct microcode_amd *mc_amd; |
672 | struct ucode_cpu_info *uci; | |
673 | struct ucode_patch *p; | |
2eff73c0 | 674 | u32 rev; |
2efb05e8 BP |
675 | |
676 | BUG_ON(raw_smp_processor_id() != cpu); | |
80cc9f10 | 677 | |
2efb05e8 | 678 | uci = ucode_cpu_info + cpu; |
80cc9f10 | 679 | |
2efb05e8 BP |
680 | p = find_patch(cpu); |
681 | if (!p) | |
871b72dd | 682 | return 0; |
80cc9f10 | 683 | |
2efb05e8 BP |
684 | mc_amd = p->data; |
685 | uci->mc = p->data; | |
686 | ||
0399f732 | 687 | if (check_current_patch_level(&rev, false)) |
2eff73c0 | 688 | return -1; |
80cc9f10 | 689 | |
685ca6d7 BP |
690 | /* need to apply patch? */ |
691 | if (rev >= mc_amd->hdr.patch_id) { | |
692 | c->microcode = rev; | |
accd1e82 | 693 | uci->cpu_sig.rev = rev; |
685ca6d7 BP |
694 | return 0; |
695 | } | |
696 | ||
d982057f | 697 | if (__apply_microcode_amd(mc_amd)) { |
258721ef | 698 | pr_err("CPU%d: update failed for patch_level=0x%08x\n", |
a76096a6 | 699 | cpu, mc_amd->hdr.patch_id); |
d982057f TK |
700 | return -1; |
701 | } | |
702 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, | |
703 | mc_amd->hdr.patch_id); | |
80cc9f10 | 704 | |
a76096a6 JS |
705 | uci->cpu_sig.rev = mc_amd->hdr.patch_id; |
706 | c->microcode = mc_amd->hdr.patch_id; | |
871b72dd DA |
707 | |
708 | return 0; | |
80cc9f10 PO |
709 | } |
710 | ||
0657d9eb | 711 | static int install_equiv_cpu_table(const u8 *buf) |
80cc9f10 | 712 | { |
10de52d6 BP |
713 | unsigned int *ibuf = (unsigned int *)buf; |
714 | unsigned int type = ibuf[1]; | |
715 | unsigned int size = ibuf[2]; | |
80cc9f10 | 716 | |
10de52d6 | 717 | if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { |
258721ef BP |
718 | pr_err("empty section/" |
719 | "invalid type field in container file section header\n"); | |
10de52d6 | 720 | return -EINVAL; |
80cc9f10 PO |
721 | } |
722 | ||
8e5e9521 | 723 | equiv_cpu_table = vmalloc(size); |
80cc9f10 | 724 | if (!equiv_cpu_table) { |
f58e1f53 | 725 | pr_err("failed to allocate equivalent CPU table\n"); |
10de52d6 | 726 | return -ENOMEM; |
80cc9f10 PO |
727 | } |
728 | ||
e7e632f5 | 729 | memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size); |
80cc9f10 | 730 | |
40b7f3df BP |
731 | /* add header length */ |
732 | return size + CONTAINER_HDR_SZ; | |
80cc9f10 PO |
733 | } |
734 | ||
a0a29b62 | 735 | static void free_equiv_cpu_table(void) |
80cc9f10 | 736 | { |
aeef50bc F |
737 | vfree(equiv_cpu_table); |
738 | equiv_cpu_table = NULL; | |
a0a29b62 | 739 | } |
80cc9f10 | 740 | |
2efb05e8 | 741 | static void cleanup(void) |
a0a29b62 | 742 | { |
2efb05e8 BP |
743 | free_equiv_cpu_table(); |
744 | free_cache(); | |
745 | } | |
746 | ||
747 | /* | |
748 | * We return the current size even if some of the checks failed so that | |
749 | * we can skip over the next patch. If we return a negative value, we | |
750 | * signal a grave error like a memory allocation has failed and the | |
751 | * driver cannot continue functioning normally. In such cases, we tear | |
752 | * down everything we've used up so far and exit. | |
753 | */ | |
84516098 | 754 | static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) |
2efb05e8 | 755 | { |
2efb05e8 BP |
756 | struct microcode_header_amd *mc_hdr; |
757 | struct ucode_patch *patch; | |
758 | unsigned int patch_size, crnt_size, ret; | |
759 | u32 proc_fam; | |
760 | u16 proc_id; | |
761 | ||
762 | patch_size = *(u32 *)(fw + 4); | |
763 | crnt_size = patch_size + SECTION_HDR_SIZE; | |
764 | mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE); | |
765 | proc_id = mc_hdr->processor_rev_id; | |
766 | ||
767 | proc_fam = find_cpu_family_by_equiv_cpu(proc_id); | |
768 | if (!proc_fam) { | |
769 | pr_err("No patch family for equiv ID: 0x%04x\n", proc_id); | |
770 | return crnt_size; | |
771 | } | |
772 | ||
773 | /* check if patch is for the current family */ | |
774 | proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); | |
84516098 | 775 | if (proc_fam != family) |
2efb05e8 BP |
776 | return crnt_size; |
777 | ||
778 | if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { | |
779 | pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", | |
780 | mc_hdr->patch_id); | |
781 | return crnt_size; | |
782 | } | |
783 | ||
84516098 | 784 | ret = verify_patch_size(family, patch_size, leftover); |
2efb05e8 BP |
785 | if (!ret) { |
786 | pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); | |
787 | return crnt_size; | |
788 | } | |
789 | ||
790 | patch = kzalloc(sizeof(*patch), GFP_KERNEL); | |
791 | if (!patch) { | |
792 | pr_err("Patch allocation failure.\n"); | |
793 | return -EINVAL; | |
794 | } | |
795 | ||
796 | patch->data = kzalloc(patch_size, GFP_KERNEL); | |
797 | if (!patch->data) { | |
798 | pr_err("Patch data allocation failure.\n"); | |
799 | kfree(patch); | |
800 | return -EINVAL; | |
801 | } | |
802 | ||
803 | /* All looks ok, copy patch... */ | |
804 | memcpy(patch->data, fw + SECTION_HDR_SIZE, patch_size); | |
805 | INIT_LIST_HEAD(&patch->plist); | |
806 | patch->patch_id = mc_hdr->patch_id; | |
807 | patch->equiv_cpu = proc_id; | |
808 | ||
5335ba5c BP |
809 | pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n", |
810 | __func__, patch->patch_id, proc_id); | |
811 | ||
2efb05e8 BP |
812 | /* ... and add to cache. */ |
813 | update_cache(patch); | |
814 | ||
815 | return crnt_size; | |
816 | } | |
817 | ||
84516098 TK |
818 | static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, |
819 | size_t size) | |
2efb05e8 BP |
820 | { |
821 | enum ucode_state ret = UCODE_ERROR; | |
822 | unsigned int leftover; | |
823 | u8 *fw = (u8 *)data; | |
824 | int crnt_size = 0; | |
1396fa9c | 825 | int offset; |
80cc9f10 | 826 | |
2efb05e8 | 827 | offset = install_equiv_cpu_table(data); |
10de52d6 | 828 | if (offset < 0) { |
f58e1f53 | 829 | pr_err("failed to create equivalent cpu table\n"); |
2efb05e8 | 830 | return ret; |
80cc9f10 | 831 | } |
2efb05e8 | 832 | fw += offset; |
a0a29b62 DA |
833 | leftover = size - offset; |
834 | ||
2efb05e8 | 835 | if (*(u32 *)fw != UCODE_UCODE_TYPE) { |
be62adb4 | 836 | pr_err("invalid type field in container file section header\n"); |
2efb05e8 BP |
837 | free_equiv_cpu_table(); |
838 | return ret; | |
be62adb4 | 839 | } |
a0a29b62 | 840 | |
be62adb4 | 841 | while (leftover) { |
84516098 | 842 | crnt_size = verify_and_add_patch(family, fw, leftover); |
2efb05e8 BP |
843 | if (crnt_size < 0) |
844 | return ret; | |
d733689a | 845 | |
2efb05e8 BP |
846 | fw += crnt_size; |
847 | leftover -= crnt_size; | |
80cc9f10 | 848 | } |
a0a29b62 | 849 | |
2efb05e8 | 850 | return UCODE_OK; |
a0a29b62 DA |
851 | } |
852 | ||
2ef84b3b | 853 | enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size) |
a76096a6 JS |
854 | { |
855 | enum ucode_state ret; | |
856 | ||
857 | /* free old equiv table */ | |
858 | free_equiv_cpu_table(); | |
859 | ||
84516098 | 860 | ret = __load_microcode_amd(family, data, size); |
a76096a6 JS |
861 | |
862 | if (ret != UCODE_OK) | |
863 | cleanup(); | |
864 | ||
fe055896 | 865 | #ifdef CONFIG_X86_32 |
757885e9 | 866 | /* save BSP's matching patch for early load */ |
2ef84b3b BP |
867 | if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { |
868 | struct ucode_patch *p = find_patch(cpu); | |
757885e9 | 869 | if (p) { |
5335ba5c BP |
870 | memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); |
871 | memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), | |
872 | PATCH_MAX_SIZE)); | |
757885e9 JS |
873 | } |
874 | } | |
875 | #endif | |
a76096a6 JS |
876 | return ret; |
877 | } | |
878 | ||
5b68edc9 AH |
879 | /* |
880 | * AMD microcode firmware naming convention, up to family 15h they are in | |
881 | * the legacy file: | |
882 | * | |
883 | * amd-ucode/microcode_amd.bin | |
884 | * | |
885 | * This legacy file is always smaller than 2K in size. | |
886 | * | |
2efb05e8 | 887 | * Beginning with family 15h, they are in family-specific firmware files: |
5b68edc9 AH |
888 | * |
889 | * amd-ucode/microcode_amd_fam15h.bin | |
890 | * amd-ucode/microcode_amd_fam16h.bin | |
891 | * ... | |
892 | * | |
893 | * These might be larger than 2K. | |
894 | */ | |
48e30685 BP |
895 | static enum ucode_state request_microcode_amd(int cpu, struct device *device, |
896 | bool refresh_fw) | |
a0a29b62 | 897 | { |
5b68edc9 | 898 | char fw_name[36] = "amd-ucode/microcode_amd.bin"; |
5b68edc9 | 899 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
2efb05e8 BP |
900 | enum ucode_state ret = UCODE_NFOUND; |
901 | const struct firmware *fw; | |
902 | ||
903 | /* reload ucode container only on the boot cpu */ | |
904 | if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index) | |
905 | return UCODE_OK; | |
5b68edc9 AH |
906 | |
907 | if (c->x86 >= 0x15) | |
908 | snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); | |
a0a29b62 | 909 | |
75da02b2 | 910 | if (request_firmware_direct(&fw, (const char *)fw_name, device)) { |
11f918d3 | 911 | pr_debug("failed to load file %s\n", fw_name); |
ffc7e8ac | 912 | goto out; |
3b2e3d85 | 913 | } |
a0a29b62 | 914 | |
ffc7e8ac BP |
915 | ret = UCODE_ERROR; |
916 | if (*(u32 *)fw->data != UCODE_MAGIC) { | |
258721ef | 917 | pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data); |
ffc7e8ac | 918 | goto fw_release; |
506f90ee BP |
919 | } |
920 | ||
2ef84b3b | 921 | ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size); |
a0a29b62 | 922 | |
2efb05e8 | 923 | fw_release: |
ffc7e8ac | 924 | release_firmware(fw); |
3b2e3d85 | 925 | |
2efb05e8 | 926 | out: |
a0a29b62 DA |
927 | return ret; |
928 | } | |
929 | ||
871b72dd DA |
930 | static enum ucode_state |
931 | request_microcode_user(int cpu, const void __user *buf, size_t size) | |
a0a29b62 | 932 | { |
871b72dd | 933 | return UCODE_ERROR; |
80cc9f10 PO |
934 | } |
935 | ||
80cc9f10 PO |
936 | static void microcode_fini_cpu_amd(int cpu) |
937 | { | |
938 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | |
939 | ||
18dbc916 | 940 | uci->mc = NULL; |
80cc9f10 PO |
941 | } |
942 | ||
943 | static struct microcode_ops microcode_amd_ops = { | |
a0a29b62 | 944 | .request_microcode_user = request_microcode_user, |
ffc7e8ac | 945 | .request_microcode_fw = request_microcode_amd, |
80cc9f10 PO |
946 | .collect_cpu_info = collect_cpu_info_amd, |
947 | .apply_microcode = apply_microcode_amd, | |
948 | .microcode_fini_cpu = microcode_fini_cpu_amd, | |
949 | }; | |
950 | ||
18dbc916 | 951 | struct microcode_ops * __init init_amd_microcode(void) |
80cc9f10 | 952 | { |
9a2bc335 | 953 | struct cpuinfo_x86 *c = &boot_cpu_data; |
283c1f25 AH |
954 | |
955 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { | |
1b74dde7 | 956 | pr_warn("AMD CPU family 0x%x not supported\n", c->x86); |
283c1f25 AH |
957 | return NULL; |
958 | } | |
959 | ||
18dbc916 | 960 | return µcode_amd_ops; |
80cc9f10 | 961 | } |
f72c1a57 BP |
962 | |
963 | void __exit exit_amd_microcode(void) | |
964 | { | |
2efb05e8 | 965 | cleanup(); |
f72c1a57 | 966 | } |