iommu/vt-d: fix invalid memory access when freeing DMAR irq
[deliverable/linux.git] / drivers / iommu / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
e9071b0b
DD
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
10e5247f
KA
31#include <linux/pci.h>
32#include <linux/dmar.h>
38717946
KA
33#include <linux/iova.h>
34#include <linux/intel-iommu.h>
fe962e90 35#include <linux/timer.h>
0ac2491f
SS
36#include <linux/irq.h>
37#include <linux/interrupt.h>
69575d38 38#include <linux/tboot.h>
eb27cae8 39#include <linux/dmi.h>
5a0e3ad6 40#include <linux/slab.h>
8a8f422d 41#include <asm/irq_remapping.h>
4db77ff3 42#include <asm/iommu_table.h>
10e5247f 43
078e1ee2
JR
44#include "irq_remapping.h"
45
10e5247f
KA
46/* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
49 */
50LIST_HEAD(dmar_drhd_units);
10e5247f 51
41750d31 52struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 53static acpi_size dmar_tbl_size;
10e5247f 54
694835dc
JL
55static int alloc_iommu(struct dmar_drhd_unit *drhd);
56
10e5247f
KA
57static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
58{
59 /*
60 * add INCLUDE_ALL at the tail, so scan the list will find it at
61 * the very end.
62 */
63 if (drhd->include_all)
64 list_add_tail(&drhd->list, &dmar_drhd_units);
65 else
66 list_add(&drhd->list, &dmar_drhd_units);
67}
68
10e5247f
KA
69static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
70 struct pci_dev **dev, u16 segment)
71{
72 struct pci_bus *bus;
73 struct pci_dev *pdev = NULL;
74 struct acpi_dmar_pci_path *path;
75 int count;
76
77 bus = pci_find_bus(segment, scope->bus);
78 path = (struct acpi_dmar_pci_path *)(scope + 1);
79 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
80 / sizeof(struct acpi_dmar_pci_path);
81
82 while (count) {
83 if (pdev)
84 pci_dev_put(pdev);
85 /*
86 * Some BIOSes list non-exist devices in DMAR table, just
87 * ignore it
88 */
89 if (!bus) {
e9071b0b 90 pr_warn("Device scope bus [%d] not found\n", scope->bus);
10e5247f
KA
91 break;
92 }
fa5f508f 93 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
10e5247f 94 if (!pdev) {
e9071b0b 95 /* warning will be printed below */
10e5247f
KA
96 break;
97 }
98 path ++;
99 count --;
100 bus = pdev->subordinate;
101 }
102 if (!pdev) {
e9071b0b 103 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
fa5f508f 104 segment, scope->bus, path->device, path->function);
10e5247f
KA
105 return 0;
106 }
107 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
108 pdev->subordinate) || (scope->entry_type == \
109 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
110 pci_dev_put(pdev);
e9071b0b
DD
111 pr_warn("Device scope type does not match for %s\n",
112 pci_name(pdev));
10e5247f
KA
113 return -EINVAL;
114 }
115 *dev = pdev;
116 return 0;
117}
118
318fe7df
SS
119int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
10e5247f
KA
121{
122 struct acpi_dmar_device_scope *scope;
123 void * tmp = start;
124 int index;
125 int ret;
126
127 *cnt = 0;
128 while (start < end) {
129 scope = start;
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132 (*cnt)++;
ae3e7f3a
LC
133 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
134 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
e9071b0b 135 pr_warn("Unsupported device scope\n");
5715f0f9 136 }
10e5247f
KA
137 start += scope->length;
138 }
139 if (*cnt == 0)
140 return 0;
141
142 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
143 if (!*devices)
144 return -ENOMEM;
145
146 start = tmp;
147 index = 0;
148 while (start < end) {
149 scope = start;
150 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152 ret = dmar_parse_one_dev_scope(scope,
153 &(*devices)[index], segment);
154 if (ret) {
ada4d4b2 155 dmar_free_dev_scope(devices, cnt);
10e5247f
KA
156 return ret;
157 }
158 index ++;
159 }
160 start += scope->length;
161 }
162
163 return 0;
164}
165
ada4d4b2
JL
166void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
167{
168 if (*devices && *cnt) {
169 while (--*cnt >= 0)
170 pci_dev_put((*devices)[*cnt]);
171 kfree(*devices);
172 *devices = NULL;
173 *cnt = 0;
174 }
175}
176
10e5247f
KA
177/**
178 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
179 * structure which uniquely represent one DMA remapping hardware unit
180 * present in the platform
181 */
182static int __init
183dmar_parse_one_drhd(struct acpi_dmar_header *header)
184{
185 struct acpi_dmar_hardware_unit *drhd;
186 struct dmar_drhd_unit *dmaru;
187 int ret = 0;
10e5247f 188
e523b38e 189 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
190 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
191 if (!dmaru)
192 return -ENOMEM;
193
1886e8a9 194 dmaru->hdr = header;
10e5247f 195 dmaru->reg_base_addr = drhd->address;
276dbf99 196 dmaru->segment = drhd->segment;
10e5247f
KA
197 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
198
1886e8a9
SS
199 ret = alloc_iommu(dmaru);
200 if (ret) {
201 kfree(dmaru);
202 return ret;
203 }
204 dmar_register_drhd_unit(dmaru);
205 return 0;
206}
207
f82851a8 208static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
209{
210 struct acpi_dmar_hardware_unit *drhd;
f82851a8 211 int ret = 0;
1886e8a9
SS
212
213 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
214
2e824f79
YZ
215 if (dmaru->include_all)
216 return 0;
217
218 ret = dmar_parse_dev_scope((void *)(drhd + 1),
1886e8a9 219 ((void *)drhd) + drhd->header.length,
10e5247f
KA
220 &dmaru->devices_cnt, &dmaru->devices,
221 drhd->segment);
1c7d1bca 222 if (ret) {
1886e8a9 223 list_del(&dmaru->list);
10e5247f 224 kfree(dmaru);
1886e8a9 225 }
10e5247f
KA
226 return ret;
227}
228
aa697079 229#ifdef CONFIG_ACPI_NUMA
ee34b32d
SS
230static int __init
231dmar_parse_one_rhsa(struct acpi_dmar_header *header)
232{
233 struct acpi_dmar_rhsa *rhsa;
234 struct dmar_drhd_unit *drhd;
235
236 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 237 for_each_drhd_unit(drhd) {
ee34b32d
SS
238 if (drhd->reg_base_addr == rhsa->base_address) {
239 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
240
241 if (!node_online(node))
242 node = -1;
243 drhd->iommu->node = node;
aa697079
DW
244 return 0;
245 }
ee34b32d 246 }
fd0c8894
BH
247 WARN_TAINT(
248 1, TAINT_FIRMWARE_WORKAROUND,
249 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
250 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
251 drhd->reg_base_addr,
252 dmi_get_system_info(DMI_BIOS_VENDOR),
253 dmi_get_system_info(DMI_BIOS_VERSION),
254 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 255
aa697079 256 return 0;
ee34b32d 257}
aa697079 258#endif
ee34b32d 259
10e5247f
KA
260static void __init
261dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
262{
263 struct acpi_dmar_hardware_unit *drhd;
264 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 265 struct acpi_dmar_atsr *atsr;
17b60977 266 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
267
268 switch (header->type) {
269 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
270 drhd = container_of(header, struct acpi_dmar_hardware_unit,
271 header);
e9071b0b 272 pr_info("DRHD base: %#016Lx flags: %#x\n",
aa5d2b51 273 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
274 break;
275 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
276 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
277 header);
e9071b0b 278 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
279 (unsigned long long)rmrr->base_address,
280 (unsigned long long)rmrr->end_address);
10e5247f 281 break;
aa5d2b51
YZ
282 case ACPI_DMAR_TYPE_ATSR:
283 atsr = container_of(header, struct acpi_dmar_atsr, header);
e9071b0b 284 pr_info("ATSR flags: %#x\n", atsr->flags);
aa5d2b51 285 break;
17b60977
RD
286 case ACPI_DMAR_HARDWARE_AFFINITY:
287 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
e9071b0b 288 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
17b60977
RD
289 (unsigned long long)rhsa->base_address,
290 rhsa->proximity_domain);
291 break;
10e5247f
KA
292 }
293}
294
f6dd5c31
YL
295/**
296 * dmar_table_detect - checks to see if the platform supports DMAR devices
297 */
298static int __init dmar_table_detect(void)
299{
300 acpi_status status = AE_OK;
301
302 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
303 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
304 (struct acpi_table_header **)&dmar_tbl,
305 &dmar_tbl_size);
f6dd5c31
YL
306
307 if (ACPI_SUCCESS(status) && !dmar_tbl) {
e9071b0b 308 pr_warn("Unable to map DMAR\n");
f6dd5c31
YL
309 status = AE_NOT_FOUND;
310 }
311
312 return (ACPI_SUCCESS(status) ? 1 : 0);
313}
aaa9d1dd 314
10e5247f
KA
315/**
316 * parse_dmar_table - parses the DMA reporting table
317 */
318static int __init
319parse_dmar_table(void)
320{
321 struct acpi_table_dmar *dmar;
322 struct acpi_dmar_header *entry_header;
323 int ret = 0;
7cef3347 324 int drhd_count = 0;
10e5247f 325
f6dd5c31
YL
326 /*
327 * Do it again, earlier dmar_tbl mapping could be mapped with
328 * fixed map.
329 */
330 dmar_table_detect();
331
a59b50e9
JC
332 /*
333 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
334 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
335 */
336 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
337
10e5247f
KA
338 dmar = (struct acpi_table_dmar *)dmar_tbl;
339 if (!dmar)
340 return -ENODEV;
341
5b6985ce 342 if (dmar->width < PAGE_SHIFT - 1) {
e9071b0b 343 pr_warn("Invalid DMAR haw\n");
10e5247f
KA
344 return -EINVAL;
345 }
346
e9071b0b 347 pr_info("Host address width %d\n", dmar->width + 1);
10e5247f
KA
348
349 entry_header = (struct acpi_dmar_header *)(dmar + 1);
350 while (((unsigned long)entry_header) <
351 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
352 /* Avoid looping forever on bad ACPI tables */
353 if (entry_header->length == 0) {
e9071b0b 354 pr_warn("Invalid 0-length structure\n");
084eb960
TB
355 ret = -EINVAL;
356 break;
357 }
358
10e5247f
KA
359 dmar_table_print_dmar_entry(entry_header);
360
361 switch (entry_header->type) {
362 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
7cef3347 363 drhd_count++;
10e5247f
KA
364 ret = dmar_parse_one_drhd(entry_header);
365 break;
366 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
367 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
368 break;
369 case ACPI_DMAR_TYPE_ATSR:
aa5d2b51 370 ret = dmar_parse_one_atsr(entry_header);
10e5247f 371 break;
17b60977 372 case ACPI_DMAR_HARDWARE_AFFINITY:
aa697079 373#ifdef CONFIG_ACPI_NUMA
ee34b32d 374 ret = dmar_parse_one_rhsa(entry_header);
aa697079 375#endif
17b60977 376 break;
10e5247f 377 default:
e9071b0b 378 pr_warn("Unknown DMAR structure type %d\n",
4de75cf9 379 entry_header->type);
10e5247f
KA
380 ret = 0; /* for forward compatibility */
381 break;
382 }
383 if (ret)
384 break;
385
386 entry_header = ((void *)entry_header + entry_header->length);
387 }
7cef3347
LZH
388 if (drhd_count == 0)
389 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
10e5247f
KA
390 return ret;
391}
392
dda56549 393static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
e61d98d8
SS
394 struct pci_dev *dev)
395{
396 int index;
397
398 while (dev) {
399 for (index = 0; index < cnt; index++)
400 if (dev == devices[index])
401 return 1;
402
403 /* Check our parent */
404 dev = dev->bus->self;
405 }
406
407 return 0;
408}
409
410struct dmar_drhd_unit *
411dmar_find_matched_drhd_unit(struct pci_dev *dev)
412{
2e824f79
YZ
413 struct dmar_drhd_unit *dmaru = NULL;
414 struct acpi_dmar_hardware_unit *drhd;
415
dda56549
Y
416 dev = pci_physfn(dev);
417
8b161f0e 418 for_each_drhd_unit(dmaru) {
2e824f79
YZ
419 drhd = container_of(dmaru->hdr,
420 struct acpi_dmar_hardware_unit,
421 header);
422
423 if (dmaru->include_all &&
424 drhd->segment == pci_domain_nr(dev->bus))
425 return dmaru;
e61d98d8 426
2e824f79
YZ
427 if (dmar_pci_device_match(dmaru->devices,
428 dmaru->devices_cnt, dev))
429 return dmaru;
e61d98d8
SS
430 }
431
432 return NULL;
433}
434
1886e8a9
SS
435int __init dmar_dev_scope_init(void)
436{
c2c7286a 437 static int dmar_dev_scope_initialized;
04e2ea67 438 struct dmar_drhd_unit *drhd, *drhd_n;
1886e8a9
SS
439 int ret = -ENODEV;
440
c2c7286a
SS
441 if (dmar_dev_scope_initialized)
442 return dmar_dev_scope_initialized;
443
318fe7df
SS
444 if (list_empty(&dmar_drhd_units))
445 goto fail;
446
04e2ea67 447 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
1886e8a9
SS
448 ret = dmar_parse_dev(drhd);
449 if (ret)
c2c7286a 450 goto fail;
1886e8a9
SS
451 }
452
318fe7df
SS
453 ret = dmar_parse_rmrr_atsr_dev();
454 if (ret)
455 goto fail;
1886e8a9 456
c2c7286a
SS
457 dmar_dev_scope_initialized = 1;
458 return 0;
459
460fail:
461 dmar_dev_scope_initialized = ret;
1886e8a9
SS
462 return ret;
463}
464
10e5247f
KA
465
466int __init dmar_table_init(void)
467{
1886e8a9 468 static int dmar_table_initialized;
093f87d2
FY
469 int ret;
470
1886e8a9
SS
471 if (dmar_table_initialized)
472 return 0;
473
474 dmar_table_initialized = 1;
475
093f87d2
FY
476 ret = parse_dmar_table();
477 if (ret) {
1886e8a9 478 if (ret != -ENODEV)
e9071b0b 479 pr_info("parse DMAR table failure.\n");
093f87d2
FY
480 return ret;
481 }
482
10e5247f 483 if (list_empty(&dmar_drhd_units)) {
e9071b0b 484 pr_info("No DMAR devices found\n");
10e5247f
KA
485 return -ENODEV;
486 }
093f87d2 487
10e5247f
KA
488 return 0;
489}
490
3a8663ee
BH
491static void warn_invalid_dmar(u64 addr, const char *message)
492{
fd0c8894
BH
493 WARN_TAINT_ONCE(
494 1, TAINT_FIRMWARE_WORKAROUND,
495 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
496 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
497 addr, message,
498 dmi_get_system_info(DMI_BIOS_VENDOR),
499 dmi_get_system_info(DMI_BIOS_VERSION),
500 dmi_get_system_info(DMI_PRODUCT_VERSION));
3a8663ee 501}
6ecbf01c 502
21004dcd 503static int __init check_zero_address(void)
86cf898e
DW
504{
505 struct acpi_table_dmar *dmar;
506 struct acpi_dmar_header *entry_header;
507 struct acpi_dmar_hardware_unit *drhd;
508
509 dmar = (struct acpi_table_dmar *)dmar_tbl;
510 entry_header = (struct acpi_dmar_header *)(dmar + 1);
511
512 while (((unsigned long)entry_header) <
513 (((unsigned long)dmar) + dmar_tbl->length)) {
514 /* Avoid looping forever on bad ACPI tables */
515 if (entry_header->length == 0) {
e9071b0b 516 pr_warn("Invalid 0-length structure\n");
86cf898e
DW
517 return 0;
518 }
519
520 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
2c992208
CW
521 void __iomem *addr;
522 u64 cap, ecap;
523
86cf898e
DW
524 drhd = (void *)entry_header;
525 if (!drhd->address) {
3a8663ee 526 warn_invalid_dmar(0, "");
2c992208
CW
527 goto failed;
528 }
529
530 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
531 if (!addr ) {
532 printk("IOMMU: can't validate: %llx\n", drhd->address);
533 goto failed;
534 }
535 cap = dmar_readq(addr + DMAR_CAP_REG);
536 ecap = dmar_readq(addr + DMAR_ECAP_REG);
537 early_iounmap(addr, VTD_PAGE_SIZE);
538 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
3a8663ee
BH
539 warn_invalid_dmar(drhd->address,
540 " returns all ones");
2c992208 541 goto failed;
86cf898e 542 }
86cf898e
DW
543 }
544
545 entry_header = ((void *)entry_header + entry_header->length);
546 }
547 return 1;
2c992208
CW
548
549failed:
2c992208 550 return 0;
86cf898e
DW
551}
552
480125ba 553int __init detect_intel_iommu(void)
2ae21010
SS
554{
555 int ret;
556
f6dd5c31 557 ret = dmar_table_detect();
86cf898e
DW
558 if (ret)
559 ret = check_zero_address();
2ae21010 560 {
11bd04f6 561 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
2ae21010 562 iommu_detected = 1;
5d990b62
CW
563 /* Make sure ACS will be enabled */
564 pci_request_acs();
565 }
f5d1b97b 566
9d5ce73a
FT
567#ifdef CONFIG_X86
568 if (ret)
569 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 570#endif
cacd4213 571 }
8e1568f3 572 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
f6dd5c31 573 dmar_tbl = NULL;
480125ba 574
4db77ff3 575 return ret ? 1 : -ENODEV;
2ae21010
SS
576}
577
578
6f5cf521
DD
579static void unmap_iommu(struct intel_iommu *iommu)
580{
581 iounmap(iommu->reg);
582 release_mem_region(iommu->reg_phys, iommu->reg_size);
583}
584
585/**
586 * map_iommu: map the iommu's registers
587 * @iommu: the iommu to map
588 * @phys_addr: the physical address of the base resgister
e9071b0b 589 *
6f5cf521 590 * Memory map the iommu's registers. Start w/ a single page, and
e9071b0b 591 * possibly expand if that turns out to be insufficent.
6f5cf521
DD
592 */
593static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
594{
595 int map_size, err=0;
596
597 iommu->reg_phys = phys_addr;
598 iommu->reg_size = VTD_PAGE_SIZE;
599
600 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
601 pr_err("IOMMU: can't reserve memory\n");
602 err = -EBUSY;
603 goto out;
604 }
605
606 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
607 if (!iommu->reg) {
608 pr_err("IOMMU: can't map the region\n");
609 err = -ENOMEM;
610 goto release;
611 }
612
613 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
614 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
615
616 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
617 err = -EINVAL;
618 warn_invalid_dmar(phys_addr, " returns all ones");
619 goto unmap;
620 }
621
622 /* the registers might be more than one page */
623 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
624 cap_max_fault_reg_offset(iommu->cap));
625 map_size = VTD_PAGE_ALIGN(map_size);
626 if (map_size > iommu->reg_size) {
627 iounmap(iommu->reg);
628 release_mem_region(iommu->reg_phys, iommu->reg_size);
629 iommu->reg_size = map_size;
630 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
631 iommu->name)) {
632 pr_err("IOMMU: can't reserve memory\n");
633 err = -EBUSY;
634 goto out;
635 }
636 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
637 if (!iommu->reg) {
638 pr_err("IOMMU: can't map the region\n");
639 err = -ENOMEM;
640 goto release;
641 }
642 }
643 err = 0;
644 goto out;
645
646unmap:
647 iounmap(iommu->reg);
648release:
649 release_mem_region(iommu->reg_phys, iommu->reg_size);
650out:
651 return err;
652}
653
694835dc 654static int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 655{
c42d9f32 656 struct intel_iommu *iommu;
3a93c841 657 u32 ver, sts;
c42d9f32 658 static int iommu_allocated = 0;
43f7392b 659 int agaw = 0;
4ed0d3e6 660 int msagaw = 0;
6f5cf521 661 int err;
c42d9f32 662
6ecbf01c 663 if (!drhd->reg_base_addr) {
3a8663ee 664 warn_invalid_dmar(0, "");
6ecbf01c
DW
665 return -EINVAL;
666 }
667
c42d9f32
SS
668 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
669 if (!iommu)
1886e8a9 670 return -ENOMEM;
c42d9f32
SS
671
672 iommu->seq_id = iommu_allocated++;
9d783ba0 673 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 674
6f5cf521
DD
675 err = map_iommu(iommu, drhd->reg_base_addr);
676 if (err) {
677 pr_err("IOMMU: failed to map %s\n", iommu->name);
e61d98d8
SS
678 goto error;
679 }
0815565a 680
6f5cf521 681 err = -EINVAL;
1b573683
WH
682 agaw = iommu_calculate_agaw(iommu);
683 if (agaw < 0) {
bf947fcb
DD
684 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
685 iommu->seq_id);
0815565a 686 goto err_unmap;
4ed0d3e6
FY
687 }
688 msagaw = iommu_calculate_max_sagaw(iommu);
689 if (msagaw < 0) {
bf947fcb 690 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 691 iommu->seq_id);
0815565a 692 goto err_unmap;
1b573683
WH
693 }
694 iommu->agaw = agaw;
4ed0d3e6 695 iommu->msagaw = msagaw;
1b573683 696
ee34b32d
SS
697 iommu->node = -1;
698
e61d98d8 699 ver = readl(iommu->reg + DMAR_VER_REG);
680a7524
YL
700 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
701 iommu->seq_id,
5b6985ce
FY
702 (unsigned long long)drhd->reg_base_addr,
703 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
704 (unsigned long long)iommu->cap,
705 (unsigned long long)iommu->ecap);
e61d98d8 706
3a93c841
TI
707 /* Reflect status in gcmd */
708 sts = readl(iommu->reg + DMAR_GSTS_REG);
709 if (sts & DMA_GSTS_IRES)
710 iommu->gcmd |= DMA_GCMD_IRE;
711 if (sts & DMA_GSTS_TES)
712 iommu->gcmd |= DMA_GCMD_TE;
713 if (sts & DMA_GSTS_QIES)
714 iommu->gcmd |= DMA_GCMD_QIE;
715
1f5b3c3f 716 raw_spin_lock_init(&iommu->register_lock);
e61d98d8
SS
717
718 drhd->iommu = iommu;
1886e8a9 719 return 0;
0815565a
DW
720
721 err_unmap:
6f5cf521 722 unmap_iommu(iommu);
0815565a 723 error:
e61d98d8 724 kfree(iommu);
6f5cf521 725 return err;
e61d98d8
SS
726}
727
728void free_iommu(struct intel_iommu *iommu)
729{
730 if (!iommu)
731 return;
732
e61d98d8 733 free_dmar_iommu(iommu);
e61d98d8
SS
734
735 if (iommu->reg)
6f5cf521
DD
736 unmap_iommu(iommu);
737
e61d98d8
SS
738 kfree(iommu);
739}
fe962e90
SS
740
741/*
742 * Reclaim all the submitted descriptors which have completed its work.
743 */
744static inline void reclaim_free_desc(struct q_inval *qi)
745{
6ba6c3a4
YZ
746 while (qi->desc_status[qi->free_tail] == QI_DONE ||
747 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
748 qi->desc_status[qi->free_tail] = QI_FREE;
749 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
750 qi->free_cnt++;
751 }
752}
753
704126ad
YZ
754static int qi_check_fault(struct intel_iommu *iommu, int index)
755{
756 u32 fault;
6ba6c3a4 757 int head, tail;
704126ad
YZ
758 struct q_inval *qi = iommu->qi;
759 int wait_index = (index + 1) % QI_LENGTH;
760
6ba6c3a4
YZ
761 if (qi->desc_status[wait_index] == QI_ABORT)
762 return -EAGAIN;
763
704126ad
YZ
764 fault = readl(iommu->reg + DMAR_FSTS_REG);
765
766 /*
767 * If IQE happens, the head points to the descriptor associated
768 * with the error. No new descriptors are fetched until the IQE
769 * is cleared.
770 */
771 if (fault & DMA_FSTS_IQE) {
772 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4 773 if ((head >> DMAR_IQ_SHIFT) == index) {
bf947fcb 774 pr_err("VT-d detected invalid descriptor: "
6ba6c3a4
YZ
775 "low=%llx, high=%llx\n",
776 (unsigned long long)qi->desc[index].low,
777 (unsigned long long)qi->desc[index].high);
704126ad
YZ
778 memcpy(&qi->desc[index], &qi->desc[wait_index],
779 sizeof(struct qi_desc));
780 __iommu_flush_cache(iommu, &qi->desc[index],
781 sizeof(struct qi_desc));
782 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
783 return -EINVAL;
784 }
785 }
786
6ba6c3a4
YZ
787 /*
788 * If ITE happens, all pending wait_desc commands are aborted.
789 * No new descriptors are fetched until the ITE is cleared.
790 */
791 if (fault & DMA_FSTS_ITE) {
792 head = readl(iommu->reg + DMAR_IQH_REG);
793 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
794 head |= 1;
795 tail = readl(iommu->reg + DMAR_IQT_REG);
796 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
797
798 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
799
800 do {
801 if (qi->desc_status[head] == QI_IN_USE)
802 qi->desc_status[head] = QI_ABORT;
803 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
804 } while (head != tail);
805
806 if (qi->desc_status[wait_index] == QI_ABORT)
807 return -EAGAIN;
808 }
809
810 if (fault & DMA_FSTS_ICE)
811 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
812
704126ad
YZ
813 return 0;
814}
815
fe962e90
SS
816/*
817 * Submit the queued invalidation descriptor to the remapping
818 * hardware unit and wait for its completion.
819 */
704126ad 820int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 821{
6ba6c3a4 822 int rc;
fe962e90
SS
823 struct q_inval *qi = iommu->qi;
824 struct qi_desc *hw, wait_desc;
825 int wait_index, index;
826 unsigned long flags;
827
828 if (!qi)
704126ad 829 return 0;
fe962e90
SS
830
831 hw = qi->desc;
832
6ba6c3a4
YZ
833restart:
834 rc = 0;
835
3b8f4048 836 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 837 while (qi->free_cnt < 3) {
3b8f4048 838 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 839 cpu_relax();
3b8f4048 840 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
841 }
842
843 index = qi->free_head;
844 wait_index = (index + 1) % QI_LENGTH;
845
846 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
847
848 hw[index] = *desc;
849
704126ad
YZ
850 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
851 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
852 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
853
854 hw[wait_index] = wait_desc;
855
856 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
857 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
858
859 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
860 qi->free_cnt -= 2;
861
fe962e90
SS
862 /*
863 * update the HW tail register indicating the presence of
864 * new descriptors.
865 */
6ba6c3a4 866 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
867
868 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
869 /*
870 * We will leave the interrupts disabled, to prevent interrupt
871 * context to queue another cmd while a cmd is already submitted
872 * and waiting for completion on this cpu. This is to avoid
873 * a deadlock where the interrupt context can wait indefinitely
874 * for free slots in the queue.
875 */
704126ad
YZ
876 rc = qi_check_fault(iommu, index);
877 if (rc)
6ba6c3a4 878 break;
704126ad 879
3b8f4048 880 raw_spin_unlock(&qi->q_lock);
fe962e90 881 cpu_relax();
3b8f4048 882 raw_spin_lock(&qi->q_lock);
fe962e90 883 }
6ba6c3a4
YZ
884
885 qi->desc_status[index] = QI_DONE;
fe962e90
SS
886
887 reclaim_free_desc(qi);
3b8f4048 888 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 889
6ba6c3a4
YZ
890 if (rc == -EAGAIN)
891 goto restart;
892
704126ad 893 return rc;
fe962e90
SS
894}
895
896/*
897 * Flush the global interrupt entry cache.
898 */
899void qi_global_iec(struct intel_iommu *iommu)
900{
901 struct qi_desc desc;
902
903 desc.low = QI_IEC_TYPE;
904 desc.high = 0;
905
704126ad 906 /* should never fail */
fe962e90
SS
907 qi_submit_sync(&desc, iommu);
908}
909
4c25a2c1
DW
910void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
911 u64 type)
3481f210 912{
3481f210
YS
913 struct qi_desc desc;
914
3481f210
YS
915 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
916 | QI_CC_GRAN(type) | QI_CC_TYPE;
917 desc.high = 0;
918
4c25a2c1 919 qi_submit_sync(&desc, iommu);
3481f210
YS
920}
921
1f0ef2aa
DW
922void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
923 unsigned int size_order, u64 type)
3481f210
YS
924{
925 u8 dw = 0, dr = 0;
926
927 struct qi_desc desc;
928 int ih = 0;
929
3481f210
YS
930 if (cap_write_drain(iommu->cap))
931 dw = 1;
932
933 if (cap_read_drain(iommu->cap))
934 dr = 1;
935
936 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
937 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
938 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
939 | QI_IOTLB_AM(size_order);
940
1f0ef2aa 941 qi_submit_sync(&desc, iommu);
3481f210
YS
942}
943
6ba6c3a4
YZ
944void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
945 u64 addr, unsigned mask)
946{
947 struct qi_desc desc;
948
949 if (mask) {
950 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
951 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
952 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
953 } else
954 desc.high = QI_DEV_IOTLB_ADDR(addr);
955
956 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
957 qdep = 0;
958
959 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
960 QI_DIOTLB_TYPE;
961
962 qi_submit_sync(&desc, iommu);
963}
964
eba67e5d
SS
965/*
966 * Disable Queued Invalidation interface.
967 */
968void dmar_disable_qi(struct intel_iommu *iommu)
969{
970 unsigned long flags;
971 u32 sts;
972 cycles_t start_time = get_cycles();
973
974 if (!ecap_qis(iommu->ecap))
975 return;
976
1f5b3c3f 977 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5d
SS
978
979 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
980 if (!(sts & DMA_GSTS_QIES))
981 goto end;
982
983 /*
984 * Give a chance to HW to complete the pending invalidation requests.
985 */
986 while ((readl(iommu->reg + DMAR_IQT_REG) !=
987 readl(iommu->reg + DMAR_IQH_REG)) &&
988 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
989 cpu_relax();
990
991 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
992 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
993
994 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
995 !(sts & DMA_GSTS_QIES), sts);
996end:
1f5b3c3f 997 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5d
SS
998}
999
eb4a52bc
FY
1000/*
1001 * Enable queued invalidation.
1002 */
1003static void __dmar_enable_qi(struct intel_iommu *iommu)
1004{
c416daa9 1005 u32 sts;
eb4a52bc
FY
1006 unsigned long flags;
1007 struct q_inval *qi = iommu->qi;
1008
1009 qi->free_head = qi->free_tail = 0;
1010 qi->free_cnt = QI_LENGTH;
1011
1f5b3c3f 1012 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eb4a52bc
FY
1013
1014 /* write zero to the tail reg */
1015 writel(0, iommu->reg + DMAR_IQT_REG);
1016
1017 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1018
eb4a52bc 1019 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1020 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1021
1022 /* Make sure hardware complete it */
1023 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1024
1f5b3c3f 1025 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eb4a52bc
FY
1026}
1027
fe962e90
SS
1028/*
1029 * Enable Queued Invalidation interface. This is a must to support
1030 * interrupt-remapping. Also used by DMA-remapping, which replaces
1031 * register based IOTLB invalidation.
1032 */
1033int dmar_enable_qi(struct intel_iommu *iommu)
1034{
fe962e90 1035 struct q_inval *qi;
751cafe3 1036 struct page *desc_page;
fe962e90
SS
1037
1038 if (!ecap_qis(iommu->ecap))
1039 return -ENOENT;
1040
1041 /*
1042 * queued invalidation is already setup and enabled.
1043 */
1044 if (iommu->qi)
1045 return 0;
1046
fa4b57cc 1047 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1048 if (!iommu->qi)
1049 return -ENOMEM;
1050
1051 qi = iommu->qi;
1052
751cafe3
SS
1053
1054 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1055 if (!desc_page) {
fe962e90
SS
1056 kfree(qi);
1057 iommu->qi = 0;
1058 return -ENOMEM;
1059 }
1060
751cafe3
SS
1061 qi->desc = page_address(desc_page);
1062
37a40710 1063 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1064 if (!qi->desc_status) {
1065 free_page((unsigned long) qi->desc);
1066 kfree(qi);
1067 iommu->qi = 0;
1068 return -ENOMEM;
1069 }
1070
1071 qi->free_head = qi->free_tail = 0;
1072 qi->free_cnt = QI_LENGTH;
1073
3b8f4048 1074 raw_spin_lock_init(&qi->q_lock);
fe962e90 1075
eb4a52bc 1076 __dmar_enable_qi(iommu);
fe962e90
SS
1077
1078 return 0;
1079}
0ac2491f
SS
1080
1081/* iommu interrupt handling. Most stuff are MSI-like. */
1082
9d783ba0
SS
1083enum faulttype {
1084 DMA_REMAP,
1085 INTR_REMAP,
1086 UNKNOWN,
1087};
1088
1089static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1090{
1091 "Software",
1092 "Present bit in root entry is clear",
1093 "Present bit in context entry is clear",
1094 "Invalid context entry",
1095 "Access beyond MGAW",
1096 "PTE Write access is not set",
1097 "PTE Read access is not set",
1098 "Next page table ptr is invalid",
1099 "Root table address invalid",
1100 "Context table ptr is invalid",
1101 "non-zero reserved fields in RTP",
1102 "non-zero reserved fields in CTP",
1103 "non-zero reserved fields in PTE",
4ecccd9e 1104 "PCE for translation request specifies blocking",
0ac2491f 1105};
9d783ba0 1106
95a02e97 1107static const char *irq_remap_fault_reasons[] =
9d783ba0
SS
1108{
1109 "Detected reserved fields in the decoded interrupt-remapped request",
1110 "Interrupt index exceeded the interrupt-remapping table size",
1111 "Present field in the IRTE entry is clear",
1112 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1113 "Detected reserved fields in the IRTE entry",
1114 "Blocked a compatibility format interrupt request",
1115 "Blocked an interrupt request due to source-id verification failure",
1116};
1117
21004dcd 1118static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1119{
fefe1ed1
DC
1120 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1121 ARRAY_SIZE(irq_remap_fault_reasons))) {
9d783ba0 1122 *fault_type = INTR_REMAP;
95a02e97 1123 return irq_remap_fault_reasons[fault_reason - 0x20];
9d783ba0
SS
1124 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1125 *fault_type = DMA_REMAP;
1126 return dma_remap_fault_reasons[fault_reason];
1127 } else {
1128 *fault_type = UNKNOWN;
0ac2491f 1129 return "Unknown";
9d783ba0 1130 }
0ac2491f
SS
1131}
1132
5c2837fb 1133void dmar_msi_unmask(struct irq_data *data)
0ac2491f 1134{
dced35ae 1135 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1136 unsigned long flag;
1137
1138 /* unmask it */
1f5b3c3f 1139 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1140 writel(0, iommu->reg + DMAR_FECTL_REG);
1141 /* Read a reg to force flush the post write */
1142 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1143 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1144}
1145
5c2837fb 1146void dmar_msi_mask(struct irq_data *data)
0ac2491f
SS
1147{
1148 unsigned long flag;
dced35ae 1149 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1150
1151 /* mask it */
1f5b3c3f 1152 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1153 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1154 /* Read a reg to force flush the post write */
1155 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1156 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1157}
1158
1159void dmar_msi_write(int irq, struct msi_msg *msg)
1160{
dced35ae 1161 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1162 unsigned long flag;
1163
1f5b3c3f 1164 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1165 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1166 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1167 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1168 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1169}
1170
1171void dmar_msi_read(int irq, struct msi_msg *msg)
1172{
dced35ae 1173 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1174 unsigned long flag;
1175
1f5b3c3f 1176 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1177 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1178 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1179 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1180 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1181}
1182
1183static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1184 u8 fault_reason, u16 source_id, unsigned long long addr)
1185{
1186 const char *reason;
9d783ba0 1187 int fault_type;
0ac2491f 1188
9d783ba0 1189 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1190
9d783ba0 1191 if (fault_type == INTR_REMAP)
bf947fcb 1192 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
9d783ba0
SS
1193 "fault index %llx\n"
1194 "INTR-REMAP:[fault reason %02d] %s\n",
1195 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1196 PCI_FUNC(source_id & 0xFF), addr >> 48,
1197 fault_reason, reason);
1198 else
bf947fcb 1199 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
9d783ba0
SS
1200 "fault addr %llx \n"
1201 "DMAR:[fault reason %02d] %s\n",
1202 (type ? "DMA Read" : "DMA Write"),
1203 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1204 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1205 return 0;
1206}
1207
1208#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1209irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1210{
1211 struct intel_iommu *iommu = dev_id;
1212 int reg, fault_index;
1213 u32 fault_status;
1214 unsigned long flag;
1215
1f5b3c3f 1216 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1217 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1218 if (fault_status)
bf947fcb 1219 pr_err("DRHD: handling fault status reg %x\n", fault_status);
0ac2491f
SS
1220
1221 /* TBD: ignore advanced fault log currently */
1222 if (!(fault_status & DMA_FSTS_PPF))
bd5cdad0 1223 goto unlock_exit;
0ac2491f
SS
1224
1225 fault_index = dma_fsts_fault_record_index(fault_status);
1226 reg = cap_fault_reg_offset(iommu->cap);
1227 while (1) {
1228 u8 fault_reason;
1229 u16 source_id;
1230 u64 guest_addr;
1231 int type;
1232 u32 data;
1233
1234 /* highest 32 bits */
1235 data = readl(iommu->reg + reg +
1236 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1237 if (!(data & DMA_FRCD_F))
1238 break;
1239
1240 fault_reason = dma_frcd_fault_reason(data);
1241 type = dma_frcd_type(data);
1242
1243 data = readl(iommu->reg + reg +
1244 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1245 source_id = dma_frcd_source_id(data);
1246
1247 guest_addr = dmar_readq(iommu->reg + reg +
1248 fault_index * PRIMARY_FAULT_REG_LEN);
1249 guest_addr = dma_frcd_page_addr(guest_addr);
1250 /* clear the fault */
1251 writel(DMA_FRCD_F, iommu->reg + reg +
1252 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1253
1f5b3c3f 1254 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1255
1256 dmar_fault_do_one(iommu, type, fault_reason,
1257 source_id, guest_addr);
1258
1259 fault_index++;
8211a7b5 1260 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f 1261 fault_index = 0;
1f5b3c3f 1262 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1263 }
0ac2491f 1264
bd5cdad0
LZH
1265 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1266
1267unlock_exit:
1f5b3c3f 1268 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1269 return IRQ_HANDLED;
1270}
1271
1272int dmar_set_interrupt(struct intel_iommu *iommu)
1273{
1274 int irq, ret;
1275
9d783ba0
SS
1276 /*
1277 * Check if the fault interrupt is already initialized.
1278 */
1279 if (iommu->irq)
1280 return 0;
1281
0ac2491f
SS
1282 irq = create_irq();
1283 if (!irq) {
bf947fcb 1284 pr_err("IOMMU: no free vectors\n");
0ac2491f
SS
1285 return -EINVAL;
1286 }
1287
dced35ae 1288 irq_set_handler_data(irq, iommu);
0ac2491f
SS
1289 iommu->irq = irq;
1290
1291 ret = arch_setup_dmar_msi(irq);
1292 if (ret) {
dced35ae 1293 irq_set_handler_data(irq, NULL);
0ac2491f
SS
1294 iommu->irq = 0;
1295 destroy_irq(irq);
dd726435 1296 return ret;
0ac2491f
SS
1297 }
1298
477694e7 1299 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
0ac2491f 1300 if (ret)
bf947fcb 1301 pr_err("IOMMU: can't request irq\n");
0ac2491f
SS
1302 return ret;
1303}
9d783ba0
SS
1304
1305int __init enable_drhd_fault_handling(void)
1306{
1307 struct dmar_drhd_unit *drhd;
7c919779 1308 struct intel_iommu *iommu;
9d783ba0
SS
1309
1310 /*
1311 * Enable fault control interrupt.
1312 */
7c919779 1313 for_each_iommu(iommu, drhd) {
bd5cdad0 1314 u32 fault_status;
7c919779 1315 int ret = dmar_set_interrupt(iommu);
9d783ba0
SS
1316
1317 if (ret) {
e9071b0b 1318 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
9d783ba0
SS
1319 (unsigned long long)drhd->reg_base_addr, ret);
1320 return -1;
1321 }
7f99d946
SS
1322
1323 /*
1324 * Clear any previous faults.
1325 */
1326 dmar_fault(iommu->irq, iommu);
bd5cdad0
LZH
1327 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1328 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1329 }
1330
1331 return 0;
1332}
eb4a52bc
FY
1333
1334/*
1335 * Re-enable Queued Invalidation interface.
1336 */
1337int dmar_reenable_qi(struct intel_iommu *iommu)
1338{
1339 if (!ecap_qis(iommu->ecap))
1340 return -ENOENT;
1341
1342 if (!iommu->qi)
1343 return -ENOENT;
1344
1345 /*
1346 * First disable queued invalidation.
1347 */
1348 dmar_disable_qi(iommu);
1349 /*
1350 * Then enable queued invalidation again. Since there is no pending
1351 * invalidation requests now, it's safe to re-enable queued
1352 * invalidation.
1353 */
1354 __dmar_enable_qi(iommu);
1355
1356 return 0;
1357}
074835f0
YS
1358
1359/*
1360 * Check interrupt remapping support in DMAR table description.
1361 */
0b8973a8 1362int __init dmar_ir_support(void)
074835f0
YS
1363{
1364 struct acpi_table_dmar *dmar;
1365 dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07
AP
1366 if (!dmar)
1367 return 0;
074835f0
YS
1368 return dmar->flags & 0x1;
1369}
694835dc 1370
4db77ff3 1371IOMMU_INIT_POST(detect_intel_iommu);
This page took 0.52266 seconds and 5 git commands to generate.