x86, microcode_amd: Change email addresses, MAINTAINERS entry
[deliverable/linux.git] / arch / x86 / kernel / microcode_amd.c
1 /*
2 * AMD CPU Microcode Update Driver for Linux
3 * Copyright (C) 2008-2011 Advanced Micro Devices Inc.
4 *
5 * Author: Peter Oruba <peter.oruba@amd.com>
6 *
7 * Based on work by:
8 * Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
9 *
10 * Maintainers:
11 * Andreas Herrmann <herrmann.der.user@googlemail.com>
12 * Borislav Petkov <bp@alien8.de>
13 *
14 * This driver allows to upgrade microcode on F10h AMD
15 * CPUs and later.
16 *
17 * Licensed under the terms of the GNU General Public
18 * License version 2. See file COPYING for details.
19 */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/firmware.h>
24 #include <linux/pci_ids.h>
25 #include <linux/uaccess.h>
26 #include <linux/vmalloc.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30
31 #include <asm/microcode.h>
32 #include <asm/processor.h>
33 #include <asm/msr.h>
34
35 MODULE_DESCRIPTION("AMD Microcode Update Driver");
36 MODULE_AUTHOR("Peter Oruba");
37 MODULE_LICENSE("GPL v2");
38
39 #define UCODE_MAGIC 0x00414d44
40 #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
41 #define UCODE_UCODE_TYPE 0x00000001
42
43 struct equiv_cpu_entry {
44 u32 installed_cpu;
45 u32 fixed_errata_mask;
46 u32 fixed_errata_compare;
47 u16 equiv_cpu;
48 u16 res;
49 } __attribute__((packed));
50
51 struct microcode_header_amd {
52 u32 data_code;
53 u32 patch_id;
54 u16 mc_patch_data_id;
55 u8 mc_patch_data_len;
56 u8 init_flag;
57 u32 mc_patch_data_checksum;
58 u32 nb_dev_id;
59 u32 sb_dev_id;
60 u16 processor_rev_id;
61 u8 nb_rev_id;
62 u8 sb_rev_id;
63 u8 bios_api_rev;
64 u8 reserved1[3];
65 u32 match_reg[8];
66 } __attribute__((packed));
67
68 struct microcode_amd {
69 struct microcode_header_amd hdr;
70 unsigned int mpb[0];
71 };
72
73 #define SECTION_HDR_SIZE 8
74 #define CONTAINER_HDR_SZ 12
75
76 static struct equiv_cpu_entry *equiv_cpu_table;
77
78 struct ucode_patch {
79 struct list_head plist;
80 void *data;
81 u32 patch_id;
82 u16 equiv_cpu;
83 };
84
85 static LIST_HEAD(pcache);
86
87 static u16 find_equiv_id(unsigned int cpu)
88 {
89 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
90 int i = 0;
91
92 if (!equiv_cpu_table)
93 return 0;
94
95 while (equiv_cpu_table[i].installed_cpu != 0) {
96 if (uci->cpu_sig.sig == equiv_cpu_table[i].installed_cpu)
97 return equiv_cpu_table[i].equiv_cpu;
98
99 i++;
100 }
101 return 0;
102 }
103
104 static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu)
105 {
106 int i = 0;
107
108 BUG_ON(!equiv_cpu_table);
109
110 while (equiv_cpu_table[i].equiv_cpu != 0) {
111 if (equiv_cpu == equiv_cpu_table[i].equiv_cpu)
112 return equiv_cpu_table[i].installed_cpu;
113 i++;
114 }
115 return 0;
116 }
117
118 /*
119 * a small, trivial cache of per-family ucode patches
120 */
121 static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
122 {
123 struct ucode_patch *p;
124
125 list_for_each_entry(p, &pcache, plist)
126 if (p->equiv_cpu == equiv_cpu)
127 return p;
128 return NULL;
129 }
130
131 static void update_cache(struct ucode_patch *new_patch)
132 {
133 struct ucode_patch *p;
134
135 list_for_each_entry(p, &pcache, plist) {
136 if (p->equiv_cpu == new_patch->equiv_cpu) {
137 if (p->patch_id >= new_patch->patch_id)
138 /* we already have the latest patch */
139 return;
140
141 list_replace(&p->plist, &new_patch->plist);
142 kfree(p->data);
143 kfree(p);
144 return;
145 }
146 }
147 /* no patch found, add it */
148 list_add_tail(&new_patch->plist, &pcache);
149 }
150
151 static void free_cache(void)
152 {
153 struct ucode_patch *p, *tmp;
154
155 list_for_each_entry_safe(p, tmp, &pcache, plist) {
156 __list_del(p->plist.prev, p->plist.next);
157 kfree(p->data);
158 kfree(p);
159 }
160 }
161
162 static struct ucode_patch *find_patch(unsigned int cpu)
163 {
164 u16 equiv_id;
165
166 equiv_id = find_equiv_id(cpu);
167 if (!equiv_id)
168 return NULL;
169
170 return cache_find_patch(equiv_id);
171 }
172
173 static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
174 {
175 struct cpuinfo_x86 *c = &cpu_data(cpu);
176
177 csig->sig = cpuid_eax(0x00000001);
178 csig->rev = c->microcode;
179 pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
180
181 return 0;
182 }
183
184 static unsigned int verify_patch_size(int cpu, u32 patch_size,
185 unsigned int size)
186 {
187 struct cpuinfo_x86 *c = &cpu_data(cpu);
188 u32 max_size;
189
190 #define F1XH_MPB_MAX_SIZE 2048
191 #define F14H_MPB_MAX_SIZE 1824
192 #define F15H_MPB_MAX_SIZE 4096
193
194 switch (c->x86) {
195 case 0x14:
196 max_size = F14H_MPB_MAX_SIZE;
197 break;
198 case 0x15:
199 max_size = F15H_MPB_MAX_SIZE;
200 break;
201 default:
202 max_size = F1XH_MPB_MAX_SIZE;
203 break;
204 }
205
206 if (patch_size > min_t(u32, size, max_size)) {
207 pr_err("patch size mismatch\n");
208 return 0;
209 }
210
211 return patch_size;
212 }
213
214 static int apply_microcode_amd(int cpu)
215 {
216 struct cpuinfo_x86 *c = &cpu_data(cpu);
217 struct microcode_amd *mc_amd;
218 struct ucode_cpu_info *uci;
219 struct ucode_patch *p;
220 u32 rev, dummy;
221
222 BUG_ON(raw_smp_processor_id() != cpu);
223
224 uci = ucode_cpu_info + cpu;
225
226 p = find_patch(cpu);
227 if (!p)
228 return 0;
229
230 mc_amd = p->data;
231 uci->mc = p->data;
232
233 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
234
235 /* need to apply patch? */
236 if (rev >= mc_amd->hdr.patch_id) {
237 c->microcode = rev;
238 return 0;
239 }
240
241 wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
242
243 /* verify patch application was successful */
244 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
245 if (rev != mc_amd->hdr.patch_id) {
246 pr_err("CPU%d: update failed for patch_level=0x%08x\n",
247 cpu, mc_amd->hdr.patch_id);
248 return -1;
249 }
250
251 pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
252 uci->cpu_sig.rev = rev;
253 c->microcode = rev;
254
255 return 0;
256 }
257
258 static int install_equiv_cpu_table(const u8 *buf)
259 {
260 unsigned int *ibuf = (unsigned int *)buf;
261 unsigned int type = ibuf[1];
262 unsigned int size = ibuf[2];
263
264 if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
265 pr_err("empty section/"
266 "invalid type field in container file section header\n");
267 return -EINVAL;
268 }
269
270 equiv_cpu_table = vmalloc(size);
271 if (!equiv_cpu_table) {
272 pr_err("failed to allocate equivalent CPU table\n");
273 return -ENOMEM;
274 }
275
276 memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
277
278 /* add header length */
279 return size + CONTAINER_HDR_SZ;
280 }
281
282 static void free_equiv_cpu_table(void)
283 {
284 vfree(equiv_cpu_table);
285 equiv_cpu_table = NULL;
286 }
287
288 static void cleanup(void)
289 {
290 free_equiv_cpu_table();
291 free_cache();
292 }
293
294 /*
295 * We return the current size even if some of the checks failed so that
296 * we can skip over the next patch. If we return a negative value, we
297 * signal a grave error like a memory allocation has failed and the
298 * driver cannot continue functioning normally. In such cases, we tear
299 * down everything we've used up so far and exit.
300 */
301 static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
302 {
303 struct cpuinfo_x86 *c = &cpu_data(cpu);
304 struct microcode_header_amd *mc_hdr;
305 struct ucode_patch *patch;
306 unsigned int patch_size, crnt_size, ret;
307 u32 proc_fam;
308 u16 proc_id;
309
310 patch_size = *(u32 *)(fw + 4);
311 crnt_size = patch_size + SECTION_HDR_SIZE;
312 mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
313 proc_id = mc_hdr->processor_rev_id;
314
315 proc_fam = find_cpu_family_by_equiv_cpu(proc_id);
316 if (!proc_fam) {
317 pr_err("No patch family for equiv ID: 0x%04x\n", proc_id);
318 return crnt_size;
319 }
320
321 /* check if patch is for the current family */
322 proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
323 if (proc_fam != c->x86)
324 return crnt_size;
325
326 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
327 pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n",
328 mc_hdr->patch_id);
329 return crnt_size;
330 }
331
332 ret = verify_patch_size(cpu, patch_size, leftover);
333 if (!ret) {
334 pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
335 return crnt_size;
336 }
337
338 patch = kzalloc(sizeof(*patch), GFP_KERNEL);
339 if (!patch) {
340 pr_err("Patch allocation failure.\n");
341 return -EINVAL;
342 }
343
344 patch->data = kzalloc(patch_size, GFP_KERNEL);
345 if (!patch->data) {
346 pr_err("Patch data allocation failure.\n");
347 kfree(patch);
348 return -EINVAL;
349 }
350
351 /* All looks ok, copy patch... */
352 memcpy(patch->data, fw + SECTION_HDR_SIZE, patch_size);
353 INIT_LIST_HEAD(&patch->plist);
354 patch->patch_id = mc_hdr->patch_id;
355 patch->equiv_cpu = proc_id;
356
357 /* ... and add to cache. */
358 update_cache(patch);
359
360 return crnt_size;
361 }
362
363 static enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size)
364 {
365 enum ucode_state ret = UCODE_ERROR;
366 unsigned int leftover;
367 u8 *fw = (u8 *)data;
368 int crnt_size = 0;
369 int offset;
370
371 offset = install_equiv_cpu_table(data);
372 if (offset < 0) {
373 pr_err("failed to create equivalent cpu table\n");
374 return ret;
375 }
376 fw += offset;
377 leftover = size - offset;
378
379 if (*(u32 *)fw != UCODE_UCODE_TYPE) {
380 pr_err("invalid type field in container file section header\n");
381 free_equiv_cpu_table();
382 return ret;
383 }
384
385 while (leftover) {
386 crnt_size = verify_and_add_patch(cpu, fw, leftover);
387 if (crnt_size < 0)
388 return ret;
389
390 fw += crnt_size;
391 leftover -= crnt_size;
392 }
393
394 return UCODE_OK;
395 }
396
397 /*
398 * AMD microcode firmware naming convention, up to family 15h they are in
399 * the legacy file:
400 *
401 * amd-ucode/microcode_amd.bin
402 *
403 * This legacy file is always smaller than 2K in size.
404 *
405 * Beginning with family 15h, they are in family-specific firmware files:
406 *
407 * amd-ucode/microcode_amd_fam15h.bin
408 * amd-ucode/microcode_amd_fam16h.bin
409 * ...
410 *
411 * These might be larger than 2K.
412 */
413 static enum ucode_state request_microcode_amd(int cpu, struct device *device,
414 bool refresh_fw)
415 {
416 char fw_name[36] = "amd-ucode/microcode_amd.bin";
417 struct cpuinfo_x86 *c = &cpu_data(cpu);
418 enum ucode_state ret = UCODE_NFOUND;
419 const struct firmware *fw;
420
421 /* reload ucode container only on the boot cpu */
422 if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
423 return UCODE_OK;
424
425 if (c->x86 >= 0x15)
426 snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
427
428 if (request_firmware(&fw, (const char *)fw_name, device)) {
429 pr_err("failed to load file %s\n", fw_name);
430 goto out;
431 }
432
433 ret = UCODE_ERROR;
434 if (*(u32 *)fw->data != UCODE_MAGIC) {
435 pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
436 goto fw_release;
437 }
438
439 /* free old equiv table */
440 free_equiv_cpu_table();
441
442 ret = load_microcode_amd(cpu, fw->data, fw->size);
443 if (ret != UCODE_OK)
444 cleanup();
445
446 fw_release:
447 release_firmware(fw);
448
449 out:
450 return ret;
451 }
452
453 static enum ucode_state
454 request_microcode_user(int cpu, const void __user *buf, size_t size)
455 {
456 return UCODE_ERROR;
457 }
458
459 static void microcode_fini_cpu_amd(int cpu)
460 {
461 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
462
463 uci->mc = NULL;
464 }
465
466 static struct microcode_ops microcode_amd_ops = {
467 .request_microcode_user = request_microcode_user,
468 .request_microcode_fw = request_microcode_amd,
469 .collect_cpu_info = collect_cpu_info_amd,
470 .apply_microcode = apply_microcode_amd,
471 .microcode_fini_cpu = microcode_fini_cpu_amd,
472 };
473
474 struct microcode_ops * __init init_amd_microcode(void)
475 {
476 struct cpuinfo_x86 *c = &cpu_data(0);
477
478 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
479 pr_warning("AMD CPU family 0x%x not supported\n", c->x86);
480 return NULL;
481 }
482
483 return &microcode_amd_ops;
484 }
485
486 void __exit exit_amd_microcode(void)
487 {
488 cleanup();
489 }
This page took 0.040658 seconds and 6 git commands to generate.