iommu/vt-d: release invalidation queue when destroying IOMMU unit
[deliverable/linux.git] / drivers / iommu / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
e9071b0b
DD
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
10e5247f
KA
31#include <linux/pci.h>
32#include <linux/dmar.h>
38717946
KA
33#include <linux/iova.h>
34#include <linux/intel-iommu.h>
fe962e90 35#include <linux/timer.h>
0ac2491f
SS
36#include <linux/irq.h>
37#include <linux/interrupt.h>
69575d38 38#include <linux/tboot.h>
eb27cae8 39#include <linux/dmi.h>
5a0e3ad6 40#include <linux/slab.h>
8a8f422d 41#include <asm/irq_remapping.h>
4db77ff3 42#include <asm/iommu_table.h>
10e5247f 43
078e1ee2
JR
44#include "irq_remapping.h"
45
10e5247f
KA
46/* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
49 */
50LIST_HEAD(dmar_drhd_units);
10e5247f 51
41750d31 52struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 53static acpi_size dmar_tbl_size;
10e5247f 54
694835dc 55static int alloc_iommu(struct dmar_drhd_unit *drhd);
a868e6b7 56static void free_iommu(struct intel_iommu *iommu);
694835dc 57
10e5247f
KA
58static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
59{
60 /*
61 * add INCLUDE_ALL at the tail, so scan the list will find it at
62 * the very end.
63 */
64 if (drhd->include_all)
65 list_add_tail(&drhd->list, &dmar_drhd_units);
66 else
67 list_add(&drhd->list, &dmar_drhd_units);
68}
69
10e5247f
KA
70static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
71 struct pci_dev **dev, u16 segment)
72{
73 struct pci_bus *bus;
74 struct pci_dev *pdev = NULL;
75 struct acpi_dmar_pci_path *path;
76 int count;
77
78 bus = pci_find_bus(segment, scope->bus);
79 path = (struct acpi_dmar_pci_path *)(scope + 1);
80 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
81 / sizeof(struct acpi_dmar_pci_path);
82
83 while (count) {
84 if (pdev)
85 pci_dev_put(pdev);
86 /*
87 * Some BIOSes list non-exist devices in DMAR table, just
88 * ignore it
89 */
90 if (!bus) {
e9071b0b 91 pr_warn("Device scope bus [%d] not found\n", scope->bus);
10e5247f
KA
92 break;
93 }
fa5f508f 94 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
10e5247f 95 if (!pdev) {
e9071b0b 96 /* warning will be printed below */
10e5247f
KA
97 break;
98 }
99 path ++;
100 count --;
101 bus = pdev->subordinate;
102 }
103 if (!pdev) {
e9071b0b 104 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
fa5f508f 105 segment, scope->bus, path->device, path->function);
10e5247f
KA
106 return 0;
107 }
108 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
109 pdev->subordinate) || (scope->entry_type == \
110 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111 pci_dev_put(pdev);
e9071b0b
DD
112 pr_warn("Device scope type does not match for %s\n",
113 pci_name(pdev));
10e5247f
KA
114 return -EINVAL;
115 }
116 *dev = pdev;
117 return 0;
118}
119
318fe7df
SS
120int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
121 struct pci_dev ***devices, u16 segment)
10e5247f
KA
122{
123 struct acpi_dmar_device_scope *scope;
124 void * tmp = start;
125 int index;
126 int ret;
127
128 *cnt = 0;
129 while (start < end) {
130 scope = start;
131 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
132 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
133 (*cnt)++;
ae3e7f3a
LC
134 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
135 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
e9071b0b 136 pr_warn("Unsupported device scope\n");
5715f0f9 137 }
10e5247f
KA
138 start += scope->length;
139 }
140 if (*cnt == 0)
141 return 0;
142
143 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
144 if (!*devices)
145 return -ENOMEM;
146
147 start = tmp;
148 index = 0;
149 while (start < end) {
150 scope = start;
151 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
152 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
153 ret = dmar_parse_one_dev_scope(scope,
154 &(*devices)[index], segment);
155 if (ret) {
ada4d4b2 156 dmar_free_dev_scope(devices, cnt);
10e5247f
KA
157 return ret;
158 }
159 index ++;
160 }
161 start += scope->length;
162 }
163
164 return 0;
165}
166
ada4d4b2
JL
167void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
168{
169 if (*devices && *cnt) {
170 while (--*cnt >= 0)
171 pci_dev_put((*devices)[*cnt]);
172 kfree(*devices);
173 *devices = NULL;
174 *cnt = 0;
175 }
176}
177
10e5247f
KA
178/**
179 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
180 * structure which uniquely represent one DMA remapping hardware unit
181 * present in the platform
182 */
183static int __init
184dmar_parse_one_drhd(struct acpi_dmar_header *header)
185{
186 struct acpi_dmar_hardware_unit *drhd;
187 struct dmar_drhd_unit *dmaru;
188 int ret = 0;
10e5247f 189
e523b38e 190 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
191 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
192 if (!dmaru)
193 return -ENOMEM;
194
1886e8a9 195 dmaru->hdr = header;
10e5247f 196 dmaru->reg_base_addr = drhd->address;
276dbf99 197 dmaru->segment = drhd->segment;
10e5247f
KA
198 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
199
1886e8a9
SS
200 ret = alloc_iommu(dmaru);
201 if (ret) {
202 kfree(dmaru);
203 return ret;
204 }
205 dmar_register_drhd_unit(dmaru);
206 return 0;
207}
208
a868e6b7
JL
209static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
210{
211 if (dmaru->devices && dmaru->devices_cnt)
212 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
213 if (dmaru->iommu)
214 free_iommu(dmaru->iommu);
215 kfree(dmaru);
216}
217
f82851a8 218static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
219{
220 struct acpi_dmar_hardware_unit *drhd;
1886e8a9
SS
221
222 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
223
2e824f79
YZ
224 if (dmaru->include_all)
225 return 0;
226
a868e6b7
JL
227 return dmar_parse_dev_scope((void *)(drhd + 1),
228 ((void *)drhd) + drhd->header.length,
229 &dmaru->devices_cnt, &dmaru->devices,
230 drhd->segment);
10e5247f
KA
231}
232
aa697079 233#ifdef CONFIG_ACPI_NUMA
ee34b32d
SS
234static int __init
235dmar_parse_one_rhsa(struct acpi_dmar_header *header)
236{
237 struct acpi_dmar_rhsa *rhsa;
238 struct dmar_drhd_unit *drhd;
239
240 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 241 for_each_drhd_unit(drhd) {
ee34b32d
SS
242 if (drhd->reg_base_addr == rhsa->base_address) {
243 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
244
245 if (!node_online(node))
246 node = -1;
247 drhd->iommu->node = node;
aa697079
DW
248 return 0;
249 }
ee34b32d 250 }
fd0c8894
BH
251 WARN_TAINT(
252 1, TAINT_FIRMWARE_WORKAROUND,
253 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
254 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
255 drhd->reg_base_addr,
256 dmi_get_system_info(DMI_BIOS_VENDOR),
257 dmi_get_system_info(DMI_BIOS_VERSION),
258 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 259
aa697079 260 return 0;
ee34b32d 261}
aa697079 262#endif
ee34b32d 263
10e5247f
KA
264static void __init
265dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
266{
267 struct acpi_dmar_hardware_unit *drhd;
268 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 269 struct acpi_dmar_atsr *atsr;
17b60977 270 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
271
272 switch (header->type) {
273 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
274 drhd = container_of(header, struct acpi_dmar_hardware_unit,
275 header);
e9071b0b 276 pr_info("DRHD base: %#016Lx flags: %#x\n",
aa5d2b51 277 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
278 break;
279 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
280 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
281 header);
e9071b0b 282 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
283 (unsigned long long)rmrr->base_address,
284 (unsigned long long)rmrr->end_address);
10e5247f 285 break;
aa5d2b51
YZ
286 case ACPI_DMAR_TYPE_ATSR:
287 atsr = container_of(header, struct acpi_dmar_atsr, header);
e9071b0b 288 pr_info("ATSR flags: %#x\n", atsr->flags);
aa5d2b51 289 break;
17b60977
RD
290 case ACPI_DMAR_HARDWARE_AFFINITY:
291 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
e9071b0b 292 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
17b60977
RD
293 (unsigned long long)rhsa->base_address,
294 rhsa->proximity_domain);
295 break;
10e5247f
KA
296 }
297}
298
f6dd5c31
YL
299/**
300 * dmar_table_detect - checks to see if the platform supports DMAR devices
301 */
302static int __init dmar_table_detect(void)
303{
304 acpi_status status = AE_OK;
305
306 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
307 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
308 (struct acpi_table_header **)&dmar_tbl,
309 &dmar_tbl_size);
f6dd5c31
YL
310
311 if (ACPI_SUCCESS(status) && !dmar_tbl) {
e9071b0b 312 pr_warn("Unable to map DMAR\n");
f6dd5c31
YL
313 status = AE_NOT_FOUND;
314 }
315
316 return (ACPI_SUCCESS(status) ? 1 : 0);
317}
aaa9d1dd 318
10e5247f
KA
319/**
320 * parse_dmar_table - parses the DMA reporting table
321 */
322static int __init
323parse_dmar_table(void)
324{
325 struct acpi_table_dmar *dmar;
326 struct acpi_dmar_header *entry_header;
327 int ret = 0;
7cef3347 328 int drhd_count = 0;
10e5247f 329
f6dd5c31
YL
330 /*
331 * Do it again, earlier dmar_tbl mapping could be mapped with
332 * fixed map.
333 */
334 dmar_table_detect();
335
a59b50e9
JC
336 /*
337 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
338 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
339 */
340 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
341
10e5247f
KA
342 dmar = (struct acpi_table_dmar *)dmar_tbl;
343 if (!dmar)
344 return -ENODEV;
345
5b6985ce 346 if (dmar->width < PAGE_SHIFT - 1) {
e9071b0b 347 pr_warn("Invalid DMAR haw\n");
10e5247f
KA
348 return -EINVAL;
349 }
350
e9071b0b 351 pr_info("Host address width %d\n", dmar->width + 1);
10e5247f
KA
352
353 entry_header = (struct acpi_dmar_header *)(dmar + 1);
354 while (((unsigned long)entry_header) <
355 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
356 /* Avoid looping forever on bad ACPI tables */
357 if (entry_header->length == 0) {
e9071b0b 358 pr_warn("Invalid 0-length structure\n");
084eb960
TB
359 ret = -EINVAL;
360 break;
361 }
362
10e5247f
KA
363 dmar_table_print_dmar_entry(entry_header);
364
365 switch (entry_header->type) {
366 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
7cef3347 367 drhd_count++;
10e5247f
KA
368 ret = dmar_parse_one_drhd(entry_header);
369 break;
370 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
371 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
372 break;
373 case ACPI_DMAR_TYPE_ATSR:
aa5d2b51 374 ret = dmar_parse_one_atsr(entry_header);
10e5247f 375 break;
17b60977 376 case ACPI_DMAR_HARDWARE_AFFINITY:
aa697079 377#ifdef CONFIG_ACPI_NUMA
ee34b32d 378 ret = dmar_parse_one_rhsa(entry_header);
aa697079 379#endif
17b60977 380 break;
10e5247f 381 default:
e9071b0b 382 pr_warn("Unknown DMAR structure type %d\n",
4de75cf9 383 entry_header->type);
10e5247f
KA
384 ret = 0; /* for forward compatibility */
385 break;
386 }
387 if (ret)
388 break;
389
390 entry_header = ((void *)entry_header + entry_header->length);
391 }
7cef3347
LZH
392 if (drhd_count == 0)
393 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
10e5247f
KA
394 return ret;
395}
396
dda56549 397static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
e61d98d8
SS
398 struct pci_dev *dev)
399{
400 int index;
401
402 while (dev) {
403 for (index = 0; index < cnt; index++)
404 if (dev == devices[index])
405 return 1;
406
407 /* Check our parent */
408 dev = dev->bus->self;
409 }
410
411 return 0;
412}
413
414struct dmar_drhd_unit *
415dmar_find_matched_drhd_unit(struct pci_dev *dev)
416{
2e824f79
YZ
417 struct dmar_drhd_unit *dmaru = NULL;
418 struct acpi_dmar_hardware_unit *drhd;
419
dda56549
Y
420 dev = pci_physfn(dev);
421
8b161f0e 422 for_each_drhd_unit(dmaru) {
2e824f79
YZ
423 drhd = container_of(dmaru->hdr,
424 struct acpi_dmar_hardware_unit,
425 header);
426
427 if (dmaru->include_all &&
428 drhd->segment == pci_domain_nr(dev->bus))
429 return dmaru;
e61d98d8 430
2e824f79
YZ
431 if (dmar_pci_device_match(dmaru->devices,
432 dmaru->devices_cnt, dev))
433 return dmaru;
e61d98d8
SS
434 }
435
436 return NULL;
437}
438
1886e8a9
SS
439int __init dmar_dev_scope_init(void)
440{
c2c7286a 441 static int dmar_dev_scope_initialized;
a868e6b7 442 struct dmar_drhd_unit *drhd;
1886e8a9
SS
443 int ret = -ENODEV;
444
c2c7286a
SS
445 if (dmar_dev_scope_initialized)
446 return dmar_dev_scope_initialized;
447
318fe7df
SS
448 if (list_empty(&dmar_drhd_units))
449 goto fail;
450
a868e6b7 451 list_for_each_entry(drhd, &dmar_drhd_units, list) {
1886e8a9
SS
452 ret = dmar_parse_dev(drhd);
453 if (ret)
c2c7286a 454 goto fail;
1886e8a9
SS
455 }
456
318fe7df
SS
457 ret = dmar_parse_rmrr_atsr_dev();
458 if (ret)
459 goto fail;
1886e8a9 460
c2c7286a
SS
461 dmar_dev_scope_initialized = 1;
462 return 0;
463
464fail:
465 dmar_dev_scope_initialized = ret;
1886e8a9
SS
466 return ret;
467}
468
10e5247f
KA
469
470int __init dmar_table_init(void)
471{
1886e8a9 472 static int dmar_table_initialized;
093f87d2
FY
473 int ret;
474
1886e8a9
SS
475 if (dmar_table_initialized)
476 return 0;
477
478 dmar_table_initialized = 1;
479
093f87d2
FY
480 ret = parse_dmar_table();
481 if (ret) {
1886e8a9 482 if (ret != -ENODEV)
e9071b0b 483 pr_info("parse DMAR table failure.\n");
093f87d2
FY
484 return ret;
485 }
486
10e5247f 487 if (list_empty(&dmar_drhd_units)) {
e9071b0b 488 pr_info("No DMAR devices found\n");
10e5247f
KA
489 return -ENODEV;
490 }
093f87d2 491
10e5247f
KA
492 return 0;
493}
494
3a8663ee
BH
495static void warn_invalid_dmar(u64 addr, const char *message)
496{
fd0c8894
BH
497 WARN_TAINT_ONCE(
498 1, TAINT_FIRMWARE_WORKAROUND,
499 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
500 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
501 addr, message,
502 dmi_get_system_info(DMI_BIOS_VENDOR),
503 dmi_get_system_info(DMI_BIOS_VERSION),
504 dmi_get_system_info(DMI_PRODUCT_VERSION));
3a8663ee 505}
6ecbf01c 506
21004dcd 507static int __init check_zero_address(void)
86cf898e
DW
508{
509 struct acpi_table_dmar *dmar;
510 struct acpi_dmar_header *entry_header;
511 struct acpi_dmar_hardware_unit *drhd;
512
513 dmar = (struct acpi_table_dmar *)dmar_tbl;
514 entry_header = (struct acpi_dmar_header *)(dmar + 1);
515
516 while (((unsigned long)entry_header) <
517 (((unsigned long)dmar) + dmar_tbl->length)) {
518 /* Avoid looping forever on bad ACPI tables */
519 if (entry_header->length == 0) {
e9071b0b 520 pr_warn("Invalid 0-length structure\n");
86cf898e
DW
521 return 0;
522 }
523
524 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
2c992208
CW
525 void __iomem *addr;
526 u64 cap, ecap;
527
86cf898e
DW
528 drhd = (void *)entry_header;
529 if (!drhd->address) {
3a8663ee 530 warn_invalid_dmar(0, "");
2c992208
CW
531 goto failed;
532 }
533
534 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
535 if (!addr ) {
536 printk("IOMMU: can't validate: %llx\n", drhd->address);
537 goto failed;
538 }
539 cap = dmar_readq(addr + DMAR_CAP_REG);
540 ecap = dmar_readq(addr + DMAR_ECAP_REG);
541 early_iounmap(addr, VTD_PAGE_SIZE);
542 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
3a8663ee
BH
543 warn_invalid_dmar(drhd->address,
544 " returns all ones");
2c992208 545 goto failed;
86cf898e 546 }
86cf898e
DW
547 }
548
549 entry_header = ((void *)entry_header + entry_header->length);
550 }
551 return 1;
2c992208
CW
552
553failed:
2c992208 554 return 0;
86cf898e
DW
555}
556
480125ba 557int __init detect_intel_iommu(void)
2ae21010
SS
558{
559 int ret;
560
f6dd5c31 561 ret = dmar_table_detect();
86cf898e
DW
562 if (ret)
563 ret = check_zero_address();
2ae21010 564 {
11bd04f6 565 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
2ae21010 566 iommu_detected = 1;
5d990b62
CW
567 /* Make sure ACS will be enabled */
568 pci_request_acs();
569 }
f5d1b97b 570
9d5ce73a
FT
571#ifdef CONFIG_X86
572 if (ret)
573 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 574#endif
cacd4213 575 }
8e1568f3 576 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
f6dd5c31 577 dmar_tbl = NULL;
480125ba 578
4db77ff3 579 return ret ? 1 : -ENODEV;
2ae21010
SS
580}
581
582
6f5cf521
DD
583static void unmap_iommu(struct intel_iommu *iommu)
584{
585 iounmap(iommu->reg);
586 release_mem_region(iommu->reg_phys, iommu->reg_size);
587}
588
589/**
590 * map_iommu: map the iommu's registers
591 * @iommu: the iommu to map
592 * @phys_addr: the physical address of the base resgister
e9071b0b 593 *
6f5cf521 594 * Memory map the iommu's registers. Start w/ a single page, and
e9071b0b 595 * possibly expand if that turns out to be insufficent.
6f5cf521
DD
596 */
597static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
598{
599 int map_size, err=0;
600
601 iommu->reg_phys = phys_addr;
602 iommu->reg_size = VTD_PAGE_SIZE;
603
604 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
605 pr_err("IOMMU: can't reserve memory\n");
606 err = -EBUSY;
607 goto out;
608 }
609
610 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
611 if (!iommu->reg) {
612 pr_err("IOMMU: can't map the region\n");
613 err = -ENOMEM;
614 goto release;
615 }
616
617 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
618 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
619
620 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
621 err = -EINVAL;
622 warn_invalid_dmar(phys_addr, " returns all ones");
623 goto unmap;
624 }
625
626 /* the registers might be more than one page */
627 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
628 cap_max_fault_reg_offset(iommu->cap));
629 map_size = VTD_PAGE_ALIGN(map_size);
630 if (map_size > iommu->reg_size) {
631 iounmap(iommu->reg);
632 release_mem_region(iommu->reg_phys, iommu->reg_size);
633 iommu->reg_size = map_size;
634 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
635 iommu->name)) {
636 pr_err("IOMMU: can't reserve memory\n");
637 err = -EBUSY;
638 goto out;
639 }
640 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
641 if (!iommu->reg) {
642 pr_err("IOMMU: can't map the region\n");
643 err = -ENOMEM;
644 goto release;
645 }
646 }
647 err = 0;
648 goto out;
649
650unmap:
651 iounmap(iommu->reg);
652release:
653 release_mem_region(iommu->reg_phys, iommu->reg_size);
654out:
655 return err;
656}
657
694835dc 658static int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 659{
c42d9f32 660 struct intel_iommu *iommu;
3a93c841 661 u32 ver, sts;
c42d9f32 662 static int iommu_allocated = 0;
43f7392b 663 int agaw = 0;
4ed0d3e6 664 int msagaw = 0;
6f5cf521 665 int err;
c42d9f32 666
6ecbf01c 667 if (!drhd->reg_base_addr) {
3a8663ee 668 warn_invalid_dmar(0, "");
6ecbf01c
DW
669 return -EINVAL;
670 }
671
c42d9f32
SS
672 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
673 if (!iommu)
1886e8a9 674 return -ENOMEM;
c42d9f32
SS
675
676 iommu->seq_id = iommu_allocated++;
9d783ba0 677 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 678
6f5cf521
DD
679 err = map_iommu(iommu, drhd->reg_base_addr);
680 if (err) {
681 pr_err("IOMMU: failed to map %s\n", iommu->name);
e61d98d8
SS
682 goto error;
683 }
0815565a 684
6f5cf521 685 err = -EINVAL;
1b573683
WH
686 agaw = iommu_calculate_agaw(iommu);
687 if (agaw < 0) {
bf947fcb
DD
688 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
689 iommu->seq_id);
0815565a 690 goto err_unmap;
4ed0d3e6
FY
691 }
692 msagaw = iommu_calculate_max_sagaw(iommu);
693 if (msagaw < 0) {
bf947fcb 694 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 695 iommu->seq_id);
0815565a 696 goto err_unmap;
1b573683
WH
697 }
698 iommu->agaw = agaw;
4ed0d3e6 699 iommu->msagaw = msagaw;
1b573683 700
ee34b32d
SS
701 iommu->node = -1;
702
e61d98d8 703 ver = readl(iommu->reg + DMAR_VER_REG);
680a7524
YL
704 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
705 iommu->seq_id,
5b6985ce
FY
706 (unsigned long long)drhd->reg_base_addr,
707 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
708 (unsigned long long)iommu->cap,
709 (unsigned long long)iommu->ecap);
e61d98d8 710
3a93c841
TI
711 /* Reflect status in gcmd */
712 sts = readl(iommu->reg + DMAR_GSTS_REG);
713 if (sts & DMA_GSTS_IRES)
714 iommu->gcmd |= DMA_GCMD_IRE;
715 if (sts & DMA_GSTS_TES)
716 iommu->gcmd |= DMA_GCMD_TE;
717 if (sts & DMA_GSTS_QIES)
718 iommu->gcmd |= DMA_GCMD_QIE;
719
1f5b3c3f 720 raw_spin_lock_init(&iommu->register_lock);
e61d98d8
SS
721
722 drhd->iommu = iommu;
1886e8a9 723 return 0;
0815565a
DW
724
725 err_unmap:
6f5cf521 726 unmap_iommu(iommu);
0815565a 727 error:
e61d98d8 728 kfree(iommu);
6f5cf521 729 return err;
e61d98d8
SS
730}
731
a868e6b7 732static void free_iommu(struct intel_iommu *iommu)
e61d98d8 733{
a868e6b7
JL
734 if (iommu->irq) {
735 free_irq(iommu->irq, iommu);
736 irq_set_handler_data(iommu->irq, NULL);
737 destroy_irq(iommu->irq);
738 }
e61d98d8 739
a84da70b
JL
740 if (iommu->qi) {
741 free_page((unsigned long)iommu->qi->desc);
742 kfree(iommu->qi->desc_status);
743 kfree(iommu->qi);
744 }
745
e61d98d8 746 if (iommu->reg)
6f5cf521
DD
747 unmap_iommu(iommu);
748
e61d98d8
SS
749 kfree(iommu);
750}
fe962e90
SS
751
752/*
753 * Reclaim all the submitted descriptors which have completed its work.
754 */
755static inline void reclaim_free_desc(struct q_inval *qi)
756{
6ba6c3a4
YZ
757 while (qi->desc_status[qi->free_tail] == QI_DONE ||
758 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
759 qi->desc_status[qi->free_tail] = QI_FREE;
760 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
761 qi->free_cnt++;
762 }
763}
764
704126ad
YZ
765static int qi_check_fault(struct intel_iommu *iommu, int index)
766{
767 u32 fault;
6ba6c3a4 768 int head, tail;
704126ad
YZ
769 struct q_inval *qi = iommu->qi;
770 int wait_index = (index + 1) % QI_LENGTH;
771
6ba6c3a4
YZ
772 if (qi->desc_status[wait_index] == QI_ABORT)
773 return -EAGAIN;
774
704126ad
YZ
775 fault = readl(iommu->reg + DMAR_FSTS_REG);
776
777 /*
778 * If IQE happens, the head points to the descriptor associated
779 * with the error. No new descriptors are fetched until the IQE
780 * is cleared.
781 */
782 if (fault & DMA_FSTS_IQE) {
783 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4 784 if ((head >> DMAR_IQ_SHIFT) == index) {
bf947fcb 785 pr_err("VT-d detected invalid descriptor: "
6ba6c3a4
YZ
786 "low=%llx, high=%llx\n",
787 (unsigned long long)qi->desc[index].low,
788 (unsigned long long)qi->desc[index].high);
704126ad
YZ
789 memcpy(&qi->desc[index], &qi->desc[wait_index],
790 sizeof(struct qi_desc));
791 __iommu_flush_cache(iommu, &qi->desc[index],
792 sizeof(struct qi_desc));
793 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
794 return -EINVAL;
795 }
796 }
797
6ba6c3a4
YZ
798 /*
799 * If ITE happens, all pending wait_desc commands are aborted.
800 * No new descriptors are fetched until the ITE is cleared.
801 */
802 if (fault & DMA_FSTS_ITE) {
803 head = readl(iommu->reg + DMAR_IQH_REG);
804 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
805 head |= 1;
806 tail = readl(iommu->reg + DMAR_IQT_REG);
807 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
808
809 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
810
811 do {
812 if (qi->desc_status[head] == QI_IN_USE)
813 qi->desc_status[head] = QI_ABORT;
814 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
815 } while (head != tail);
816
817 if (qi->desc_status[wait_index] == QI_ABORT)
818 return -EAGAIN;
819 }
820
821 if (fault & DMA_FSTS_ICE)
822 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
823
704126ad
YZ
824 return 0;
825}
826
fe962e90
SS
827/*
828 * Submit the queued invalidation descriptor to the remapping
829 * hardware unit and wait for its completion.
830 */
704126ad 831int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 832{
6ba6c3a4 833 int rc;
fe962e90
SS
834 struct q_inval *qi = iommu->qi;
835 struct qi_desc *hw, wait_desc;
836 int wait_index, index;
837 unsigned long flags;
838
839 if (!qi)
704126ad 840 return 0;
fe962e90
SS
841
842 hw = qi->desc;
843
6ba6c3a4
YZ
844restart:
845 rc = 0;
846
3b8f4048 847 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 848 while (qi->free_cnt < 3) {
3b8f4048 849 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 850 cpu_relax();
3b8f4048 851 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
852 }
853
854 index = qi->free_head;
855 wait_index = (index + 1) % QI_LENGTH;
856
857 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
858
859 hw[index] = *desc;
860
704126ad
YZ
861 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
862 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
863 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
864
865 hw[wait_index] = wait_desc;
866
867 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
868 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
869
870 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
871 qi->free_cnt -= 2;
872
fe962e90
SS
873 /*
874 * update the HW tail register indicating the presence of
875 * new descriptors.
876 */
6ba6c3a4 877 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
878
879 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
880 /*
881 * We will leave the interrupts disabled, to prevent interrupt
882 * context to queue another cmd while a cmd is already submitted
883 * and waiting for completion on this cpu. This is to avoid
884 * a deadlock where the interrupt context can wait indefinitely
885 * for free slots in the queue.
886 */
704126ad
YZ
887 rc = qi_check_fault(iommu, index);
888 if (rc)
6ba6c3a4 889 break;
704126ad 890
3b8f4048 891 raw_spin_unlock(&qi->q_lock);
fe962e90 892 cpu_relax();
3b8f4048 893 raw_spin_lock(&qi->q_lock);
fe962e90 894 }
6ba6c3a4
YZ
895
896 qi->desc_status[index] = QI_DONE;
fe962e90
SS
897
898 reclaim_free_desc(qi);
3b8f4048 899 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 900
6ba6c3a4
YZ
901 if (rc == -EAGAIN)
902 goto restart;
903
704126ad 904 return rc;
fe962e90
SS
905}
906
907/*
908 * Flush the global interrupt entry cache.
909 */
910void qi_global_iec(struct intel_iommu *iommu)
911{
912 struct qi_desc desc;
913
914 desc.low = QI_IEC_TYPE;
915 desc.high = 0;
916
704126ad 917 /* should never fail */
fe962e90
SS
918 qi_submit_sync(&desc, iommu);
919}
920
4c25a2c1
DW
921void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
922 u64 type)
3481f210 923{
3481f210
YS
924 struct qi_desc desc;
925
3481f210
YS
926 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
927 | QI_CC_GRAN(type) | QI_CC_TYPE;
928 desc.high = 0;
929
4c25a2c1 930 qi_submit_sync(&desc, iommu);
3481f210
YS
931}
932
1f0ef2aa
DW
933void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
934 unsigned int size_order, u64 type)
3481f210
YS
935{
936 u8 dw = 0, dr = 0;
937
938 struct qi_desc desc;
939 int ih = 0;
940
3481f210
YS
941 if (cap_write_drain(iommu->cap))
942 dw = 1;
943
944 if (cap_read_drain(iommu->cap))
945 dr = 1;
946
947 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
948 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
949 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
950 | QI_IOTLB_AM(size_order);
951
1f0ef2aa 952 qi_submit_sync(&desc, iommu);
3481f210
YS
953}
954
6ba6c3a4
YZ
955void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
956 u64 addr, unsigned mask)
957{
958 struct qi_desc desc;
959
960 if (mask) {
961 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
962 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
963 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
964 } else
965 desc.high = QI_DEV_IOTLB_ADDR(addr);
966
967 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
968 qdep = 0;
969
970 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
971 QI_DIOTLB_TYPE;
972
973 qi_submit_sync(&desc, iommu);
974}
975
eba67e5d
SS
976/*
977 * Disable Queued Invalidation interface.
978 */
979void dmar_disable_qi(struct intel_iommu *iommu)
980{
981 unsigned long flags;
982 u32 sts;
983 cycles_t start_time = get_cycles();
984
985 if (!ecap_qis(iommu->ecap))
986 return;
987
1f5b3c3f 988 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5d
SS
989
990 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
991 if (!(sts & DMA_GSTS_QIES))
992 goto end;
993
994 /*
995 * Give a chance to HW to complete the pending invalidation requests.
996 */
997 while ((readl(iommu->reg + DMAR_IQT_REG) !=
998 readl(iommu->reg + DMAR_IQH_REG)) &&
999 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1000 cpu_relax();
1001
1002 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
1003 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1004
1005 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1006 !(sts & DMA_GSTS_QIES), sts);
1007end:
1f5b3c3f 1008 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5d
SS
1009}
1010
eb4a52bc
FY
1011/*
1012 * Enable queued invalidation.
1013 */
1014static void __dmar_enable_qi(struct intel_iommu *iommu)
1015{
c416daa9 1016 u32 sts;
eb4a52bc
FY
1017 unsigned long flags;
1018 struct q_inval *qi = iommu->qi;
1019
1020 qi->free_head = qi->free_tail = 0;
1021 qi->free_cnt = QI_LENGTH;
1022
1f5b3c3f 1023 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eb4a52bc
FY
1024
1025 /* write zero to the tail reg */
1026 writel(0, iommu->reg + DMAR_IQT_REG);
1027
1028 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1029
eb4a52bc 1030 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1031 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1032
1033 /* Make sure hardware complete it */
1034 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1035
1f5b3c3f 1036 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eb4a52bc
FY
1037}
1038
fe962e90
SS
1039/*
1040 * Enable Queued Invalidation interface. This is a must to support
1041 * interrupt-remapping. Also used by DMA-remapping, which replaces
1042 * register based IOTLB invalidation.
1043 */
1044int dmar_enable_qi(struct intel_iommu *iommu)
1045{
fe962e90 1046 struct q_inval *qi;
751cafe3 1047 struct page *desc_page;
fe962e90
SS
1048
1049 if (!ecap_qis(iommu->ecap))
1050 return -ENOENT;
1051
1052 /*
1053 * queued invalidation is already setup and enabled.
1054 */
1055 if (iommu->qi)
1056 return 0;
1057
fa4b57cc 1058 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1059 if (!iommu->qi)
1060 return -ENOMEM;
1061
1062 qi = iommu->qi;
1063
751cafe3
SS
1064
1065 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1066 if (!desc_page) {
fe962e90
SS
1067 kfree(qi);
1068 iommu->qi = 0;
1069 return -ENOMEM;
1070 }
1071
751cafe3
SS
1072 qi->desc = page_address(desc_page);
1073
37a40710 1074 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1075 if (!qi->desc_status) {
1076 free_page((unsigned long) qi->desc);
1077 kfree(qi);
1078 iommu->qi = 0;
1079 return -ENOMEM;
1080 }
1081
1082 qi->free_head = qi->free_tail = 0;
1083 qi->free_cnt = QI_LENGTH;
1084
3b8f4048 1085 raw_spin_lock_init(&qi->q_lock);
fe962e90 1086
eb4a52bc 1087 __dmar_enable_qi(iommu);
fe962e90
SS
1088
1089 return 0;
1090}
0ac2491f
SS
1091
1092/* iommu interrupt handling. Most stuff are MSI-like. */
1093
9d783ba0
SS
1094enum faulttype {
1095 DMA_REMAP,
1096 INTR_REMAP,
1097 UNKNOWN,
1098};
1099
1100static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1101{
1102 "Software",
1103 "Present bit in root entry is clear",
1104 "Present bit in context entry is clear",
1105 "Invalid context entry",
1106 "Access beyond MGAW",
1107 "PTE Write access is not set",
1108 "PTE Read access is not set",
1109 "Next page table ptr is invalid",
1110 "Root table address invalid",
1111 "Context table ptr is invalid",
1112 "non-zero reserved fields in RTP",
1113 "non-zero reserved fields in CTP",
1114 "non-zero reserved fields in PTE",
4ecccd9e 1115 "PCE for translation request specifies blocking",
0ac2491f 1116};
9d783ba0 1117
95a02e97 1118static const char *irq_remap_fault_reasons[] =
9d783ba0
SS
1119{
1120 "Detected reserved fields in the decoded interrupt-remapped request",
1121 "Interrupt index exceeded the interrupt-remapping table size",
1122 "Present field in the IRTE entry is clear",
1123 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1124 "Detected reserved fields in the IRTE entry",
1125 "Blocked a compatibility format interrupt request",
1126 "Blocked an interrupt request due to source-id verification failure",
1127};
1128
21004dcd 1129static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1130{
fefe1ed1
DC
1131 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1132 ARRAY_SIZE(irq_remap_fault_reasons))) {
9d783ba0 1133 *fault_type = INTR_REMAP;
95a02e97 1134 return irq_remap_fault_reasons[fault_reason - 0x20];
9d783ba0
SS
1135 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1136 *fault_type = DMA_REMAP;
1137 return dma_remap_fault_reasons[fault_reason];
1138 } else {
1139 *fault_type = UNKNOWN;
0ac2491f 1140 return "Unknown";
9d783ba0 1141 }
0ac2491f
SS
1142}
1143
5c2837fb 1144void dmar_msi_unmask(struct irq_data *data)
0ac2491f 1145{
dced35ae 1146 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1147 unsigned long flag;
1148
1149 /* unmask it */
1f5b3c3f 1150 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1151 writel(0, iommu->reg + DMAR_FECTL_REG);
1152 /* Read a reg to force flush the post write */
1153 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1154 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1155}
1156
5c2837fb 1157void dmar_msi_mask(struct irq_data *data)
0ac2491f
SS
1158{
1159 unsigned long flag;
dced35ae 1160 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1161
1162 /* mask it */
1f5b3c3f 1163 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1164 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1165 /* Read a reg to force flush the post write */
1166 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1167 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1168}
1169
1170void dmar_msi_write(int irq, struct msi_msg *msg)
1171{
dced35ae 1172 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1173 unsigned long flag;
1174
1f5b3c3f 1175 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1176 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1177 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1178 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1179 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1180}
1181
1182void dmar_msi_read(int irq, struct msi_msg *msg)
1183{
dced35ae 1184 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1185 unsigned long flag;
1186
1f5b3c3f 1187 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1188 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1189 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1190 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1191 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1192}
1193
1194static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1195 u8 fault_reason, u16 source_id, unsigned long long addr)
1196{
1197 const char *reason;
9d783ba0 1198 int fault_type;
0ac2491f 1199
9d783ba0 1200 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1201
9d783ba0 1202 if (fault_type == INTR_REMAP)
bf947fcb 1203 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
9d783ba0
SS
1204 "fault index %llx\n"
1205 "INTR-REMAP:[fault reason %02d] %s\n",
1206 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1207 PCI_FUNC(source_id & 0xFF), addr >> 48,
1208 fault_reason, reason);
1209 else
bf947fcb 1210 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
9d783ba0
SS
1211 "fault addr %llx \n"
1212 "DMAR:[fault reason %02d] %s\n",
1213 (type ? "DMA Read" : "DMA Write"),
1214 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1215 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1216 return 0;
1217}
1218
1219#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1220irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1221{
1222 struct intel_iommu *iommu = dev_id;
1223 int reg, fault_index;
1224 u32 fault_status;
1225 unsigned long flag;
1226
1f5b3c3f 1227 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1228 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1229 if (fault_status)
bf947fcb 1230 pr_err("DRHD: handling fault status reg %x\n", fault_status);
0ac2491f
SS
1231
1232 /* TBD: ignore advanced fault log currently */
1233 if (!(fault_status & DMA_FSTS_PPF))
bd5cdad0 1234 goto unlock_exit;
0ac2491f
SS
1235
1236 fault_index = dma_fsts_fault_record_index(fault_status);
1237 reg = cap_fault_reg_offset(iommu->cap);
1238 while (1) {
1239 u8 fault_reason;
1240 u16 source_id;
1241 u64 guest_addr;
1242 int type;
1243 u32 data;
1244
1245 /* highest 32 bits */
1246 data = readl(iommu->reg + reg +
1247 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1248 if (!(data & DMA_FRCD_F))
1249 break;
1250
1251 fault_reason = dma_frcd_fault_reason(data);
1252 type = dma_frcd_type(data);
1253
1254 data = readl(iommu->reg + reg +
1255 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1256 source_id = dma_frcd_source_id(data);
1257
1258 guest_addr = dmar_readq(iommu->reg + reg +
1259 fault_index * PRIMARY_FAULT_REG_LEN);
1260 guest_addr = dma_frcd_page_addr(guest_addr);
1261 /* clear the fault */
1262 writel(DMA_FRCD_F, iommu->reg + reg +
1263 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1264
1f5b3c3f 1265 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1266
1267 dmar_fault_do_one(iommu, type, fault_reason,
1268 source_id, guest_addr);
1269
1270 fault_index++;
8211a7b5 1271 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f 1272 fault_index = 0;
1f5b3c3f 1273 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1274 }
0ac2491f 1275
bd5cdad0
LZH
1276 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1277
1278unlock_exit:
1f5b3c3f 1279 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1280 return IRQ_HANDLED;
1281}
1282
1283int dmar_set_interrupt(struct intel_iommu *iommu)
1284{
1285 int irq, ret;
1286
9d783ba0
SS
1287 /*
1288 * Check if the fault interrupt is already initialized.
1289 */
1290 if (iommu->irq)
1291 return 0;
1292
0ac2491f
SS
1293 irq = create_irq();
1294 if (!irq) {
bf947fcb 1295 pr_err("IOMMU: no free vectors\n");
0ac2491f
SS
1296 return -EINVAL;
1297 }
1298
dced35ae 1299 irq_set_handler_data(irq, iommu);
0ac2491f
SS
1300 iommu->irq = irq;
1301
1302 ret = arch_setup_dmar_msi(irq);
1303 if (ret) {
dced35ae 1304 irq_set_handler_data(irq, NULL);
0ac2491f
SS
1305 iommu->irq = 0;
1306 destroy_irq(irq);
dd726435 1307 return ret;
0ac2491f
SS
1308 }
1309
477694e7 1310 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
0ac2491f 1311 if (ret)
bf947fcb 1312 pr_err("IOMMU: can't request irq\n");
0ac2491f
SS
1313 return ret;
1314}
9d783ba0
SS
1315
1316int __init enable_drhd_fault_handling(void)
1317{
1318 struct dmar_drhd_unit *drhd;
7c919779 1319 struct intel_iommu *iommu;
9d783ba0
SS
1320
1321 /*
1322 * Enable fault control interrupt.
1323 */
7c919779 1324 for_each_iommu(iommu, drhd) {
bd5cdad0 1325 u32 fault_status;
7c919779 1326 int ret = dmar_set_interrupt(iommu);
9d783ba0
SS
1327
1328 if (ret) {
e9071b0b 1329 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
9d783ba0
SS
1330 (unsigned long long)drhd->reg_base_addr, ret);
1331 return -1;
1332 }
7f99d946
SS
1333
1334 /*
1335 * Clear any previous faults.
1336 */
1337 dmar_fault(iommu->irq, iommu);
bd5cdad0
LZH
1338 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1339 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1340 }
1341
1342 return 0;
1343}
eb4a52bc
FY
1344
1345/*
1346 * Re-enable Queued Invalidation interface.
1347 */
1348int dmar_reenable_qi(struct intel_iommu *iommu)
1349{
1350 if (!ecap_qis(iommu->ecap))
1351 return -ENOENT;
1352
1353 if (!iommu->qi)
1354 return -ENOENT;
1355
1356 /*
1357 * First disable queued invalidation.
1358 */
1359 dmar_disable_qi(iommu);
1360 /*
1361 * Then enable queued invalidation again. Since there is no pending
1362 * invalidation requests now, it's safe to re-enable queued
1363 * invalidation.
1364 */
1365 __dmar_enable_qi(iommu);
1366
1367 return 0;
1368}
074835f0
YS
1369
1370/*
1371 * Check interrupt remapping support in DMAR table description.
1372 */
0b8973a8 1373int __init dmar_ir_support(void)
074835f0
YS
1374{
1375 struct acpi_table_dmar *dmar;
1376 dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07
AP
1377 if (!dmar)
1378 return 0;
074835f0
YS
1379 return dmar->flags & 0x1;
1380}
694835dc 1381
a868e6b7
JL
1382static int __init dmar_free_unused_resources(void)
1383{
1384 struct dmar_drhd_unit *dmaru, *dmaru_n;
1385
1386 /* DMAR units are in use */
1387 if (irq_remapping_enabled || intel_iommu_enabled)
1388 return 0;
1389
1390 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1391 list_del(&dmaru->list);
1392 dmar_free_drhd(dmaru);
1393 }
1394
1395 return 0;
1396}
1397
1398late_initcall(dmar_free_unused_resources);
4db77ff3 1399IOMMU_INIT_POST(detect_intel_iommu);
This page took 0.455136 seconds and 5 git commands to generate.