Revert "mm: correctly synchronize rss-counters at exit/exec"
[deliverable/linux.git] / drivers / iommu / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
38717946
KA
31#include <linux/iova.h>
32#include <linux/intel-iommu.h>
fe962e90 33#include <linux/timer.h>
0ac2491f
SS
34#include <linux/irq.h>
35#include <linux/interrupt.h>
69575d38 36#include <linux/tboot.h>
eb27cae8 37#include <linux/dmi.h>
5a0e3ad6 38#include <linux/slab.h>
8a8f422d 39#include <asm/irq_remapping.h>
4db77ff3 40#include <asm/iommu_table.h>
10e5247f 41
a192a958 42#define PREFIX "DMAR: "
10e5247f
KA
43
44/* No locks are needed as DMA remapping hardware unit
45 * list is constructed at boot time and hotplug of
46 * these units are not supported by the architecture.
47 */
48LIST_HEAD(dmar_drhd_units);
10e5247f 49
41750d31 50struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 51static acpi_size dmar_tbl_size;
10e5247f
KA
52
53static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
54{
55 /*
56 * add INCLUDE_ALL at the tail, so scan the list will find it at
57 * the very end.
58 */
59 if (drhd->include_all)
60 list_add_tail(&drhd->list, &dmar_drhd_units);
61 else
62 list_add(&drhd->list, &dmar_drhd_units);
63}
64
10e5247f
KA
65static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
66 struct pci_dev **dev, u16 segment)
67{
68 struct pci_bus *bus;
69 struct pci_dev *pdev = NULL;
70 struct acpi_dmar_pci_path *path;
71 int count;
72
73 bus = pci_find_bus(segment, scope->bus);
74 path = (struct acpi_dmar_pci_path *)(scope + 1);
75 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
76 / sizeof(struct acpi_dmar_pci_path);
77
78 while (count) {
79 if (pdev)
80 pci_dev_put(pdev);
81 /*
82 * Some BIOSes list non-exist devices in DMAR table, just
83 * ignore it
84 */
85 if (!bus) {
86 printk(KERN_WARNING
87 PREFIX "Device scope bus [%d] not found\n",
88 scope->bus);
89 break;
90 }
91 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
92 if (!pdev) {
93 printk(KERN_WARNING PREFIX
94 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
95 segment, bus->number, path->dev, path->fn);
96 break;
97 }
98 path ++;
99 count --;
100 bus = pdev->subordinate;
101 }
102 if (!pdev) {
103 printk(KERN_WARNING PREFIX
104 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
105 segment, scope->bus, path->dev, path->fn);
106 *dev = NULL;
107 return 0;
108 }
109 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
110 pdev->subordinate) || (scope->entry_type == \
111 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
112 pci_dev_put(pdev);
113 printk(KERN_WARNING PREFIX
114 "Device scope type does not match for %s\n",
115 pci_name(pdev));
116 return -EINVAL;
117 }
118 *dev = pdev;
119 return 0;
120}
121
318fe7df
SS
122int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
123 struct pci_dev ***devices, u16 segment)
10e5247f
KA
124{
125 struct acpi_dmar_device_scope *scope;
126 void * tmp = start;
127 int index;
128 int ret;
129
130 *cnt = 0;
131 while (start < end) {
132 scope = start;
133 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
134 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
135 (*cnt)++;
5715f0f9 136 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
10e5247f 137 printk(KERN_WARNING PREFIX
5715f0f9
YL
138 "Unsupported device scope\n");
139 }
10e5247f
KA
140 start += scope->length;
141 }
142 if (*cnt == 0)
143 return 0;
144
145 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
146 if (!*devices)
147 return -ENOMEM;
148
149 start = tmp;
150 index = 0;
151 while (start < end) {
152 scope = start;
153 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
154 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
155 ret = dmar_parse_one_dev_scope(scope,
156 &(*devices)[index], segment);
157 if (ret) {
158 kfree(*devices);
159 return ret;
160 }
161 index ++;
162 }
163 start += scope->length;
164 }
165
166 return 0;
167}
168
169/**
170 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
171 * structure which uniquely represent one DMA remapping hardware unit
172 * present in the platform
173 */
174static int __init
175dmar_parse_one_drhd(struct acpi_dmar_header *header)
176{
177 struct acpi_dmar_hardware_unit *drhd;
178 struct dmar_drhd_unit *dmaru;
179 int ret = 0;
10e5247f 180
e523b38e 181 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
182 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
183 if (!dmaru)
184 return -ENOMEM;
185
1886e8a9 186 dmaru->hdr = header;
10e5247f 187 dmaru->reg_base_addr = drhd->address;
276dbf99 188 dmaru->segment = drhd->segment;
10e5247f
KA
189 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
190
1886e8a9
SS
191 ret = alloc_iommu(dmaru);
192 if (ret) {
193 kfree(dmaru);
194 return ret;
195 }
196 dmar_register_drhd_unit(dmaru);
197 return 0;
198}
199
f82851a8 200static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
201{
202 struct acpi_dmar_hardware_unit *drhd;
f82851a8 203 int ret = 0;
1886e8a9
SS
204
205 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
206
2e824f79
YZ
207 if (dmaru->include_all)
208 return 0;
209
210 ret = dmar_parse_dev_scope((void *)(drhd + 1),
1886e8a9 211 ((void *)drhd) + drhd->header.length,
10e5247f
KA
212 &dmaru->devices_cnt, &dmaru->devices,
213 drhd->segment);
1c7d1bca 214 if (ret) {
1886e8a9 215 list_del(&dmaru->list);
10e5247f 216 kfree(dmaru);
1886e8a9 217 }
10e5247f
KA
218 return ret;
219}
220
aa697079 221#ifdef CONFIG_ACPI_NUMA
ee34b32d
SS
222static int __init
223dmar_parse_one_rhsa(struct acpi_dmar_header *header)
224{
225 struct acpi_dmar_rhsa *rhsa;
226 struct dmar_drhd_unit *drhd;
227
228 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 229 for_each_drhd_unit(drhd) {
ee34b32d
SS
230 if (drhd->reg_base_addr == rhsa->base_address) {
231 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
232
233 if (!node_online(node))
234 node = -1;
235 drhd->iommu->node = node;
aa697079
DW
236 return 0;
237 }
ee34b32d 238 }
fd0c8894
BH
239 WARN_TAINT(
240 1, TAINT_FIRMWARE_WORKAROUND,
241 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
242 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
243 drhd->reg_base_addr,
244 dmi_get_system_info(DMI_BIOS_VENDOR),
245 dmi_get_system_info(DMI_BIOS_VERSION),
246 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 247
aa697079 248 return 0;
ee34b32d 249}
aa697079 250#endif
ee34b32d 251
10e5247f
KA
252static void __init
253dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
254{
255 struct acpi_dmar_hardware_unit *drhd;
256 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 257 struct acpi_dmar_atsr *atsr;
17b60977 258 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
259
260 switch (header->type) {
261 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
262 drhd = container_of(header, struct acpi_dmar_hardware_unit,
263 header);
10e5247f 264 printk (KERN_INFO PREFIX
aa5d2b51
YZ
265 "DRHD base: %#016Lx flags: %#x\n",
266 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
267 break;
268 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
269 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
270 header);
10e5247f 271 printk (KERN_INFO PREFIX
aa5d2b51 272 "RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
273 (unsigned long long)rmrr->base_address,
274 (unsigned long long)rmrr->end_address);
10e5247f 275 break;
aa5d2b51
YZ
276 case ACPI_DMAR_TYPE_ATSR:
277 atsr = container_of(header, struct acpi_dmar_atsr, header);
278 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
279 break;
17b60977
RD
280 case ACPI_DMAR_HARDWARE_AFFINITY:
281 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
282 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
283 (unsigned long long)rhsa->base_address,
284 rhsa->proximity_domain);
285 break;
10e5247f
KA
286 }
287}
288
f6dd5c31
YL
289/**
290 * dmar_table_detect - checks to see if the platform supports DMAR devices
291 */
292static int __init dmar_table_detect(void)
293{
294 acpi_status status = AE_OK;
295
296 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
297 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
298 (struct acpi_table_header **)&dmar_tbl,
299 &dmar_tbl_size);
f6dd5c31
YL
300
301 if (ACPI_SUCCESS(status) && !dmar_tbl) {
302 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
303 status = AE_NOT_FOUND;
304 }
305
306 return (ACPI_SUCCESS(status) ? 1 : 0);
307}
aaa9d1dd 308
10e5247f
KA
309/**
310 * parse_dmar_table - parses the DMA reporting table
311 */
312static int __init
313parse_dmar_table(void)
314{
315 struct acpi_table_dmar *dmar;
316 struct acpi_dmar_header *entry_header;
317 int ret = 0;
318
f6dd5c31
YL
319 /*
320 * Do it again, earlier dmar_tbl mapping could be mapped with
321 * fixed map.
322 */
323 dmar_table_detect();
324
a59b50e9
JC
325 /*
326 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
327 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
328 */
329 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
330
10e5247f
KA
331 dmar = (struct acpi_table_dmar *)dmar_tbl;
332 if (!dmar)
333 return -ENODEV;
334
5b6985ce 335 if (dmar->width < PAGE_SHIFT - 1) {
093f87d2 336 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
10e5247f
KA
337 return -EINVAL;
338 }
339
340 printk (KERN_INFO PREFIX "Host address width %d\n",
341 dmar->width + 1);
342
343 entry_header = (struct acpi_dmar_header *)(dmar + 1);
344 while (((unsigned long)entry_header) <
345 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
346 /* Avoid looping forever on bad ACPI tables */
347 if (entry_header->length == 0) {
348 printk(KERN_WARNING PREFIX
349 "Invalid 0-length structure\n");
350 ret = -EINVAL;
351 break;
352 }
353
10e5247f
KA
354 dmar_table_print_dmar_entry(entry_header);
355
356 switch (entry_header->type) {
357 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
358 ret = dmar_parse_one_drhd(entry_header);
359 break;
360 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
361 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
362 break;
363 case ACPI_DMAR_TYPE_ATSR:
aa5d2b51 364 ret = dmar_parse_one_atsr(entry_header);
10e5247f 365 break;
17b60977 366 case ACPI_DMAR_HARDWARE_AFFINITY:
aa697079 367#ifdef CONFIG_ACPI_NUMA
ee34b32d 368 ret = dmar_parse_one_rhsa(entry_header);
aa697079 369#endif
17b60977 370 break;
10e5247f
KA
371 default:
372 printk(KERN_WARNING PREFIX
4de75cf9
RD
373 "Unknown DMAR structure type %d\n",
374 entry_header->type);
10e5247f
KA
375 ret = 0; /* for forward compatibility */
376 break;
377 }
378 if (ret)
379 break;
380
381 entry_header = ((void *)entry_header + entry_header->length);
382 }
383 return ret;
384}
385
dda56549 386static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
e61d98d8
SS
387 struct pci_dev *dev)
388{
389 int index;
390
391 while (dev) {
392 for (index = 0; index < cnt; index++)
393 if (dev == devices[index])
394 return 1;
395
396 /* Check our parent */
397 dev = dev->bus->self;
398 }
399
400 return 0;
401}
402
403struct dmar_drhd_unit *
404dmar_find_matched_drhd_unit(struct pci_dev *dev)
405{
2e824f79
YZ
406 struct dmar_drhd_unit *dmaru = NULL;
407 struct acpi_dmar_hardware_unit *drhd;
408
dda56549
Y
409 dev = pci_physfn(dev);
410
2e824f79
YZ
411 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
412 drhd = container_of(dmaru->hdr,
413 struct acpi_dmar_hardware_unit,
414 header);
415
416 if (dmaru->include_all &&
417 drhd->segment == pci_domain_nr(dev->bus))
418 return dmaru;
e61d98d8 419
2e824f79
YZ
420 if (dmar_pci_device_match(dmaru->devices,
421 dmaru->devices_cnt, dev))
422 return dmaru;
e61d98d8
SS
423 }
424
425 return NULL;
426}
427
1886e8a9
SS
428int __init dmar_dev_scope_init(void)
429{
c2c7286a 430 static int dmar_dev_scope_initialized;
04e2ea67 431 struct dmar_drhd_unit *drhd, *drhd_n;
1886e8a9
SS
432 int ret = -ENODEV;
433
c2c7286a
SS
434 if (dmar_dev_scope_initialized)
435 return dmar_dev_scope_initialized;
436
318fe7df
SS
437 if (list_empty(&dmar_drhd_units))
438 goto fail;
439
04e2ea67 440 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
1886e8a9
SS
441 ret = dmar_parse_dev(drhd);
442 if (ret)
c2c7286a 443 goto fail;
1886e8a9
SS
444 }
445
318fe7df
SS
446 ret = dmar_parse_rmrr_atsr_dev();
447 if (ret)
448 goto fail;
1886e8a9 449
c2c7286a
SS
450 dmar_dev_scope_initialized = 1;
451 return 0;
452
453fail:
454 dmar_dev_scope_initialized = ret;
1886e8a9
SS
455 return ret;
456}
457
10e5247f
KA
458
459int __init dmar_table_init(void)
460{
1886e8a9 461 static int dmar_table_initialized;
093f87d2
FY
462 int ret;
463
1886e8a9
SS
464 if (dmar_table_initialized)
465 return 0;
466
467 dmar_table_initialized = 1;
468
093f87d2
FY
469 ret = parse_dmar_table();
470 if (ret) {
1886e8a9
SS
471 if (ret != -ENODEV)
472 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
093f87d2
FY
473 return ret;
474 }
475
10e5247f
KA
476 if (list_empty(&dmar_drhd_units)) {
477 printk(KERN_INFO PREFIX "No DMAR devices found\n");
478 return -ENODEV;
479 }
093f87d2 480
10e5247f
KA
481 return 0;
482}
483
3a8663ee
BH
484static void warn_invalid_dmar(u64 addr, const char *message)
485{
fd0c8894
BH
486 WARN_TAINT_ONCE(
487 1, TAINT_FIRMWARE_WORKAROUND,
488 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
489 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
490 addr, message,
491 dmi_get_system_info(DMI_BIOS_VENDOR),
492 dmi_get_system_info(DMI_BIOS_VERSION),
493 dmi_get_system_info(DMI_PRODUCT_VERSION));
3a8663ee 494}
6ecbf01c 495
86cf898e
DW
496int __init check_zero_address(void)
497{
498 struct acpi_table_dmar *dmar;
499 struct acpi_dmar_header *entry_header;
500 struct acpi_dmar_hardware_unit *drhd;
501
502 dmar = (struct acpi_table_dmar *)dmar_tbl;
503 entry_header = (struct acpi_dmar_header *)(dmar + 1);
504
505 while (((unsigned long)entry_header) <
506 (((unsigned long)dmar) + dmar_tbl->length)) {
507 /* Avoid looping forever on bad ACPI tables */
508 if (entry_header->length == 0) {
509 printk(KERN_WARNING PREFIX
510 "Invalid 0-length structure\n");
511 return 0;
512 }
513
514 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
2c992208
CW
515 void __iomem *addr;
516 u64 cap, ecap;
517
86cf898e
DW
518 drhd = (void *)entry_header;
519 if (!drhd->address) {
3a8663ee 520 warn_invalid_dmar(0, "");
2c992208
CW
521 goto failed;
522 }
523
524 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
525 if (!addr ) {
526 printk("IOMMU: can't validate: %llx\n", drhd->address);
527 goto failed;
528 }
529 cap = dmar_readq(addr + DMAR_CAP_REG);
530 ecap = dmar_readq(addr + DMAR_ECAP_REG);
531 early_iounmap(addr, VTD_PAGE_SIZE);
532 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
3a8663ee
BH
533 warn_invalid_dmar(drhd->address,
534 " returns all ones");
2c992208 535 goto failed;
86cf898e 536 }
86cf898e
DW
537 }
538
539 entry_header = ((void *)entry_header + entry_header->length);
540 }
541 return 1;
2c992208
CW
542
543failed:
2c992208 544 return 0;
86cf898e
DW
545}
546
480125ba 547int __init detect_intel_iommu(void)
2ae21010
SS
548{
549 int ret;
550
f6dd5c31 551 ret = dmar_table_detect();
86cf898e
DW
552 if (ret)
553 ret = check_zero_address();
2ae21010 554 {
1cb11583 555 struct acpi_table_dmar *dmar;
b3a530e4 556
1cb11583 557 dmar = (struct acpi_table_dmar *) dmar_tbl;
f5d1b97b 558
95a02e97 559 if (ret && irq_remapping_enabled && cpu_has_x2apic &&
f5d1b97b 560 dmar->flags & 0x1)
1cb11583 561 printk(KERN_INFO
f5d1b97b
SS
562 "Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
563
11bd04f6 564 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
2ae21010 565 iommu_detected = 1;
5d990b62
CW
566 /* Make sure ACS will be enabled */
567 pci_request_acs();
568 }
f5d1b97b 569
9d5ce73a
FT
570#ifdef CONFIG_X86
571 if (ret)
572 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 573#endif
cacd4213 574 }
8e1568f3 575 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
f6dd5c31 576 dmar_tbl = NULL;
480125ba 577
4db77ff3 578 return ret ? 1 : -ENODEV;
2ae21010
SS
579}
580
581
1886e8a9 582int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 583{
c42d9f32 584 struct intel_iommu *iommu;
e61d98d8
SS
585 int map_size;
586 u32 ver;
c42d9f32 587 static int iommu_allocated = 0;
43f7392b 588 int agaw = 0;
4ed0d3e6 589 int msagaw = 0;
c42d9f32 590
6ecbf01c 591 if (!drhd->reg_base_addr) {
3a8663ee 592 warn_invalid_dmar(0, "");
6ecbf01c
DW
593 return -EINVAL;
594 }
595
c42d9f32
SS
596 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
597 if (!iommu)
1886e8a9 598 return -ENOMEM;
c42d9f32
SS
599
600 iommu->seq_id = iommu_allocated++;
9d783ba0 601 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 602
5b6985ce 603 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
e61d98d8
SS
604 if (!iommu->reg) {
605 printk(KERN_ERR "IOMMU: can't map the region\n");
606 goto error;
607 }
608 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
609 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
610
0815565a 611 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
3a8663ee 612 warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
0815565a
DW
613 goto err_unmap;
614 }
615
1b573683
WH
616 agaw = iommu_calculate_agaw(iommu);
617 if (agaw < 0) {
618 printk(KERN_ERR
4ed0d3e6
FY
619 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
620 iommu->seq_id);
0815565a 621 goto err_unmap;
4ed0d3e6
FY
622 }
623 msagaw = iommu_calculate_max_sagaw(iommu);
624 if (msagaw < 0) {
625 printk(KERN_ERR
626 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 627 iommu->seq_id);
0815565a 628 goto err_unmap;
1b573683
WH
629 }
630 iommu->agaw = agaw;
4ed0d3e6 631 iommu->msagaw = msagaw;
1b573683 632
ee34b32d
SS
633 iommu->node = -1;
634
e61d98d8
SS
635 /* the registers might be more than one page */
636 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
637 cap_max_fault_reg_offset(iommu->cap));
5b6985ce
FY
638 map_size = VTD_PAGE_ALIGN(map_size);
639 if (map_size > VTD_PAGE_SIZE) {
e61d98d8
SS
640 iounmap(iommu->reg);
641 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
642 if (!iommu->reg) {
643 printk(KERN_ERR "IOMMU: can't map the region\n");
644 goto error;
645 }
646 }
647
648 ver = readl(iommu->reg + DMAR_VER_REG);
680a7524
YL
649 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
650 iommu->seq_id,
5b6985ce
FY
651 (unsigned long long)drhd->reg_base_addr,
652 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
653 (unsigned long long)iommu->cap,
654 (unsigned long long)iommu->ecap);
e61d98d8 655
1f5b3c3f 656 raw_spin_lock_init(&iommu->register_lock);
e61d98d8
SS
657
658 drhd->iommu = iommu;
1886e8a9 659 return 0;
0815565a
DW
660
661 err_unmap:
662 iounmap(iommu->reg);
663 error:
e61d98d8 664 kfree(iommu);
1886e8a9 665 return -1;
e61d98d8
SS
666}
667
668void free_iommu(struct intel_iommu *iommu)
669{
670 if (!iommu)
671 return;
672
e61d98d8 673 free_dmar_iommu(iommu);
e61d98d8
SS
674
675 if (iommu->reg)
676 iounmap(iommu->reg);
677 kfree(iommu);
678}
fe962e90
SS
679
680/*
681 * Reclaim all the submitted descriptors which have completed its work.
682 */
683static inline void reclaim_free_desc(struct q_inval *qi)
684{
6ba6c3a4
YZ
685 while (qi->desc_status[qi->free_tail] == QI_DONE ||
686 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
687 qi->desc_status[qi->free_tail] = QI_FREE;
688 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
689 qi->free_cnt++;
690 }
691}
692
704126ad
YZ
693static int qi_check_fault(struct intel_iommu *iommu, int index)
694{
695 u32 fault;
6ba6c3a4 696 int head, tail;
704126ad
YZ
697 struct q_inval *qi = iommu->qi;
698 int wait_index = (index + 1) % QI_LENGTH;
699
6ba6c3a4
YZ
700 if (qi->desc_status[wait_index] == QI_ABORT)
701 return -EAGAIN;
702
704126ad
YZ
703 fault = readl(iommu->reg + DMAR_FSTS_REG);
704
705 /*
706 * If IQE happens, the head points to the descriptor associated
707 * with the error. No new descriptors are fetched until the IQE
708 * is cleared.
709 */
710 if (fault & DMA_FSTS_IQE) {
711 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4
YZ
712 if ((head >> DMAR_IQ_SHIFT) == index) {
713 printk(KERN_ERR "VT-d detected invalid descriptor: "
714 "low=%llx, high=%llx\n",
715 (unsigned long long)qi->desc[index].low,
716 (unsigned long long)qi->desc[index].high);
704126ad
YZ
717 memcpy(&qi->desc[index], &qi->desc[wait_index],
718 sizeof(struct qi_desc));
719 __iommu_flush_cache(iommu, &qi->desc[index],
720 sizeof(struct qi_desc));
721 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
722 return -EINVAL;
723 }
724 }
725
6ba6c3a4
YZ
726 /*
727 * If ITE happens, all pending wait_desc commands are aborted.
728 * No new descriptors are fetched until the ITE is cleared.
729 */
730 if (fault & DMA_FSTS_ITE) {
731 head = readl(iommu->reg + DMAR_IQH_REG);
732 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
733 head |= 1;
734 tail = readl(iommu->reg + DMAR_IQT_REG);
735 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
736
737 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
738
739 do {
740 if (qi->desc_status[head] == QI_IN_USE)
741 qi->desc_status[head] = QI_ABORT;
742 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
743 } while (head != tail);
744
745 if (qi->desc_status[wait_index] == QI_ABORT)
746 return -EAGAIN;
747 }
748
749 if (fault & DMA_FSTS_ICE)
750 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
751
704126ad
YZ
752 return 0;
753}
754
fe962e90
SS
755/*
756 * Submit the queued invalidation descriptor to the remapping
757 * hardware unit and wait for its completion.
758 */
704126ad 759int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 760{
6ba6c3a4 761 int rc;
fe962e90
SS
762 struct q_inval *qi = iommu->qi;
763 struct qi_desc *hw, wait_desc;
764 int wait_index, index;
765 unsigned long flags;
766
767 if (!qi)
704126ad 768 return 0;
fe962e90
SS
769
770 hw = qi->desc;
771
6ba6c3a4
YZ
772restart:
773 rc = 0;
774
3b8f4048 775 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 776 while (qi->free_cnt < 3) {
3b8f4048 777 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 778 cpu_relax();
3b8f4048 779 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
780 }
781
782 index = qi->free_head;
783 wait_index = (index + 1) % QI_LENGTH;
784
785 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
786
787 hw[index] = *desc;
788
704126ad
YZ
789 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
790 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
791 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
792
793 hw[wait_index] = wait_desc;
794
795 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
796 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
797
798 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
799 qi->free_cnt -= 2;
800
fe962e90
SS
801 /*
802 * update the HW tail register indicating the presence of
803 * new descriptors.
804 */
6ba6c3a4 805 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
806
807 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
808 /*
809 * We will leave the interrupts disabled, to prevent interrupt
810 * context to queue another cmd while a cmd is already submitted
811 * and waiting for completion on this cpu. This is to avoid
812 * a deadlock where the interrupt context can wait indefinitely
813 * for free slots in the queue.
814 */
704126ad
YZ
815 rc = qi_check_fault(iommu, index);
816 if (rc)
6ba6c3a4 817 break;
704126ad 818
3b8f4048 819 raw_spin_unlock(&qi->q_lock);
fe962e90 820 cpu_relax();
3b8f4048 821 raw_spin_lock(&qi->q_lock);
fe962e90 822 }
6ba6c3a4
YZ
823
824 qi->desc_status[index] = QI_DONE;
fe962e90
SS
825
826 reclaim_free_desc(qi);
3b8f4048 827 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 828
6ba6c3a4
YZ
829 if (rc == -EAGAIN)
830 goto restart;
831
704126ad 832 return rc;
fe962e90
SS
833}
834
835/*
836 * Flush the global interrupt entry cache.
837 */
838void qi_global_iec(struct intel_iommu *iommu)
839{
840 struct qi_desc desc;
841
842 desc.low = QI_IEC_TYPE;
843 desc.high = 0;
844
704126ad 845 /* should never fail */
fe962e90
SS
846 qi_submit_sync(&desc, iommu);
847}
848
4c25a2c1
DW
849void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
850 u64 type)
3481f210 851{
3481f210
YS
852 struct qi_desc desc;
853
3481f210
YS
854 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
855 | QI_CC_GRAN(type) | QI_CC_TYPE;
856 desc.high = 0;
857
4c25a2c1 858 qi_submit_sync(&desc, iommu);
3481f210
YS
859}
860
1f0ef2aa
DW
861void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
862 unsigned int size_order, u64 type)
3481f210
YS
863{
864 u8 dw = 0, dr = 0;
865
866 struct qi_desc desc;
867 int ih = 0;
868
3481f210
YS
869 if (cap_write_drain(iommu->cap))
870 dw = 1;
871
872 if (cap_read_drain(iommu->cap))
873 dr = 1;
874
875 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
876 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
877 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
878 | QI_IOTLB_AM(size_order);
879
1f0ef2aa 880 qi_submit_sync(&desc, iommu);
3481f210
YS
881}
882
6ba6c3a4
YZ
883void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
884 u64 addr, unsigned mask)
885{
886 struct qi_desc desc;
887
888 if (mask) {
889 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
890 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
891 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
892 } else
893 desc.high = QI_DEV_IOTLB_ADDR(addr);
894
895 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
896 qdep = 0;
897
898 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
899 QI_DIOTLB_TYPE;
900
901 qi_submit_sync(&desc, iommu);
902}
903
eba67e5d
SS
904/*
905 * Disable Queued Invalidation interface.
906 */
907void dmar_disable_qi(struct intel_iommu *iommu)
908{
909 unsigned long flags;
910 u32 sts;
911 cycles_t start_time = get_cycles();
912
913 if (!ecap_qis(iommu->ecap))
914 return;
915
1f5b3c3f 916 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5d
SS
917
918 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
919 if (!(sts & DMA_GSTS_QIES))
920 goto end;
921
922 /*
923 * Give a chance to HW to complete the pending invalidation requests.
924 */
925 while ((readl(iommu->reg + DMAR_IQT_REG) !=
926 readl(iommu->reg + DMAR_IQH_REG)) &&
927 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
928 cpu_relax();
929
930 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
931 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
932
933 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
934 !(sts & DMA_GSTS_QIES), sts);
935end:
1f5b3c3f 936 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5d
SS
937}
938
eb4a52bc
FY
939/*
940 * Enable queued invalidation.
941 */
942static void __dmar_enable_qi(struct intel_iommu *iommu)
943{
c416daa9 944 u32 sts;
eb4a52bc
FY
945 unsigned long flags;
946 struct q_inval *qi = iommu->qi;
947
948 qi->free_head = qi->free_tail = 0;
949 qi->free_cnt = QI_LENGTH;
950
1f5b3c3f 951 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eb4a52bc
FY
952
953 /* write zero to the tail reg */
954 writel(0, iommu->reg + DMAR_IQT_REG);
955
956 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
957
eb4a52bc 958 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 959 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
960
961 /* Make sure hardware complete it */
962 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
963
1f5b3c3f 964 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eb4a52bc
FY
965}
966
fe962e90
SS
967/*
968 * Enable Queued Invalidation interface. This is a must to support
969 * interrupt-remapping. Also used by DMA-remapping, which replaces
970 * register based IOTLB invalidation.
971 */
972int dmar_enable_qi(struct intel_iommu *iommu)
973{
fe962e90 974 struct q_inval *qi;
751cafe3 975 struct page *desc_page;
fe962e90
SS
976
977 if (!ecap_qis(iommu->ecap))
978 return -ENOENT;
979
980 /*
981 * queued invalidation is already setup and enabled.
982 */
983 if (iommu->qi)
984 return 0;
985
fa4b57cc 986 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
987 if (!iommu->qi)
988 return -ENOMEM;
989
990 qi = iommu->qi;
991
751cafe3
SS
992
993 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
994 if (!desc_page) {
fe962e90
SS
995 kfree(qi);
996 iommu->qi = 0;
997 return -ENOMEM;
998 }
999
751cafe3
SS
1000 qi->desc = page_address(desc_page);
1001
fa4b57cc 1002 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1003 if (!qi->desc_status) {
1004 free_page((unsigned long) qi->desc);
1005 kfree(qi);
1006 iommu->qi = 0;
1007 return -ENOMEM;
1008 }
1009
1010 qi->free_head = qi->free_tail = 0;
1011 qi->free_cnt = QI_LENGTH;
1012
3b8f4048 1013 raw_spin_lock_init(&qi->q_lock);
fe962e90 1014
eb4a52bc 1015 __dmar_enable_qi(iommu);
fe962e90
SS
1016
1017 return 0;
1018}
0ac2491f
SS
1019
1020/* iommu interrupt handling. Most stuff are MSI-like. */
1021
9d783ba0
SS
1022enum faulttype {
1023 DMA_REMAP,
1024 INTR_REMAP,
1025 UNKNOWN,
1026};
1027
1028static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1029{
1030 "Software",
1031 "Present bit in root entry is clear",
1032 "Present bit in context entry is clear",
1033 "Invalid context entry",
1034 "Access beyond MGAW",
1035 "PTE Write access is not set",
1036 "PTE Read access is not set",
1037 "Next page table ptr is invalid",
1038 "Root table address invalid",
1039 "Context table ptr is invalid",
1040 "non-zero reserved fields in RTP",
1041 "non-zero reserved fields in CTP",
1042 "non-zero reserved fields in PTE",
1043};
9d783ba0 1044
95a02e97 1045static const char *irq_remap_fault_reasons[] =
9d783ba0
SS
1046{
1047 "Detected reserved fields in the decoded interrupt-remapped request",
1048 "Interrupt index exceeded the interrupt-remapping table size",
1049 "Present field in the IRTE entry is clear",
1050 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1051 "Detected reserved fields in the IRTE entry",
1052 "Blocked a compatibility format interrupt request",
1053 "Blocked an interrupt request due to source-id verification failure",
1054};
1055
0ac2491f
SS
1056#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1057
9d783ba0 1058const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1059{
fefe1ed1
DC
1060 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1061 ARRAY_SIZE(irq_remap_fault_reasons))) {
9d783ba0 1062 *fault_type = INTR_REMAP;
95a02e97 1063 return irq_remap_fault_reasons[fault_reason - 0x20];
9d783ba0
SS
1064 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1065 *fault_type = DMA_REMAP;
1066 return dma_remap_fault_reasons[fault_reason];
1067 } else {
1068 *fault_type = UNKNOWN;
0ac2491f 1069 return "Unknown";
9d783ba0 1070 }
0ac2491f
SS
1071}
1072
5c2837fb 1073void dmar_msi_unmask(struct irq_data *data)
0ac2491f 1074{
dced35ae 1075 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1076 unsigned long flag;
1077
1078 /* unmask it */
1f5b3c3f 1079 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1080 writel(0, iommu->reg + DMAR_FECTL_REG);
1081 /* Read a reg to force flush the post write */
1082 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1083 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1084}
1085
5c2837fb 1086void dmar_msi_mask(struct irq_data *data)
0ac2491f
SS
1087{
1088 unsigned long flag;
dced35ae 1089 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1090
1091 /* mask it */
1f5b3c3f 1092 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1093 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1094 /* Read a reg to force flush the post write */
1095 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1096 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1097}
1098
1099void dmar_msi_write(int irq, struct msi_msg *msg)
1100{
dced35ae 1101 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1102 unsigned long flag;
1103
1f5b3c3f 1104 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1105 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1106 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1107 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1108 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1109}
1110
1111void dmar_msi_read(int irq, struct msi_msg *msg)
1112{
dced35ae 1113 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1114 unsigned long flag;
1115
1f5b3c3f 1116 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1117 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1118 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1119 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1120 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1121}
1122
1123static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1124 u8 fault_reason, u16 source_id, unsigned long long addr)
1125{
1126 const char *reason;
9d783ba0 1127 int fault_type;
0ac2491f 1128
9d783ba0 1129 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1130
9d783ba0
SS
1131 if (fault_type == INTR_REMAP)
1132 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1133 "fault index %llx\n"
1134 "INTR-REMAP:[fault reason %02d] %s\n",
1135 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1136 PCI_FUNC(source_id & 0xFF), addr >> 48,
1137 fault_reason, reason);
1138 else
1139 printk(KERN_ERR
1140 "DMAR:[%s] Request device [%02x:%02x.%d] "
1141 "fault addr %llx \n"
1142 "DMAR:[fault reason %02d] %s\n",
1143 (type ? "DMA Read" : "DMA Write"),
1144 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1145 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1146 return 0;
1147}
1148
1149#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1150irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1151{
1152 struct intel_iommu *iommu = dev_id;
1153 int reg, fault_index;
1154 u32 fault_status;
1155 unsigned long flag;
1156
1f5b3c3f 1157 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1158 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1159 if (fault_status)
1160 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1161 fault_status);
0ac2491f
SS
1162
1163 /* TBD: ignore advanced fault log currently */
1164 if (!(fault_status & DMA_FSTS_PPF))
9d783ba0 1165 goto clear_rest;
0ac2491f
SS
1166
1167 fault_index = dma_fsts_fault_record_index(fault_status);
1168 reg = cap_fault_reg_offset(iommu->cap);
1169 while (1) {
1170 u8 fault_reason;
1171 u16 source_id;
1172 u64 guest_addr;
1173 int type;
1174 u32 data;
1175
1176 /* highest 32 bits */
1177 data = readl(iommu->reg + reg +
1178 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1179 if (!(data & DMA_FRCD_F))
1180 break;
1181
1182 fault_reason = dma_frcd_fault_reason(data);
1183 type = dma_frcd_type(data);
1184
1185 data = readl(iommu->reg + reg +
1186 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1187 source_id = dma_frcd_source_id(data);
1188
1189 guest_addr = dmar_readq(iommu->reg + reg +
1190 fault_index * PRIMARY_FAULT_REG_LEN);
1191 guest_addr = dma_frcd_page_addr(guest_addr);
1192 /* clear the fault */
1193 writel(DMA_FRCD_F, iommu->reg + reg +
1194 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1195
1f5b3c3f 1196 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1197
1198 dmar_fault_do_one(iommu, type, fault_reason,
1199 source_id, guest_addr);
1200
1201 fault_index++;
8211a7b5 1202 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f 1203 fault_index = 0;
1f5b3c3f 1204 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1205 }
9d783ba0
SS
1206clear_rest:
1207 /* clear all the other faults */
0ac2491f 1208 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1209 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
0ac2491f 1210
1f5b3c3f 1211 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1212 return IRQ_HANDLED;
1213}
1214
1215int dmar_set_interrupt(struct intel_iommu *iommu)
1216{
1217 int irq, ret;
1218
9d783ba0
SS
1219 /*
1220 * Check if the fault interrupt is already initialized.
1221 */
1222 if (iommu->irq)
1223 return 0;
1224
0ac2491f
SS
1225 irq = create_irq();
1226 if (!irq) {
1227 printk(KERN_ERR "IOMMU: no free vectors\n");
1228 return -EINVAL;
1229 }
1230
dced35ae 1231 irq_set_handler_data(irq, iommu);
0ac2491f
SS
1232 iommu->irq = irq;
1233
1234 ret = arch_setup_dmar_msi(irq);
1235 if (ret) {
dced35ae 1236 irq_set_handler_data(irq, NULL);
0ac2491f
SS
1237 iommu->irq = 0;
1238 destroy_irq(irq);
dd726435 1239 return ret;
0ac2491f
SS
1240 }
1241
477694e7 1242 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
0ac2491f
SS
1243 if (ret)
1244 printk(KERN_ERR "IOMMU: can't request irq\n");
1245 return ret;
1246}
9d783ba0
SS
1247
1248int __init enable_drhd_fault_handling(void)
1249{
1250 struct dmar_drhd_unit *drhd;
1251
1252 /*
1253 * Enable fault control interrupt.
1254 */
1255 for_each_drhd_unit(drhd) {
1256 int ret;
1257 struct intel_iommu *iommu = drhd->iommu;
1258 ret = dmar_set_interrupt(iommu);
1259
1260 if (ret) {
1261 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1262 " interrupt, ret %d\n",
1263 (unsigned long long)drhd->reg_base_addr, ret);
1264 return -1;
1265 }
7f99d946
SS
1266
1267 /*
1268 * Clear any previous faults.
1269 */
1270 dmar_fault(iommu->irq, iommu);
9d783ba0
SS
1271 }
1272
1273 return 0;
1274}
eb4a52bc
FY
1275
1276/*
1277 * Re-enable Queued Invalidation interface.
1278 */
1279int dmar_reenable_qi(struct intel_iommu *iommu)
1280{
1281 if (!ecap_qis(iommu->ecap))
1282 return -ENOENT;
1283
1284 if (!iommu->qi)
1285 return -ENOENT;
1286
1287 /*
1288 * First disable queued invalidation.
1289 */
1290 dmar_disable_qi(iommu);
1291 /*
1292 * Then enable queued invalidation again. Since there is no pending
1293 * invalidation requests now, it's safe to re-enable queued
1294 * invalidation.
1295 */
1296 __dmar_enable_qi(iommu);
1297
1298 return 0;
1299}
074835f0
YS
1300
1301/*
1302 * Check interrupt remapping support in DMAR table description.
1303 */
0b8973a8 1304int __init dmar_ir_support(void)
074835f0
YS
1305{
1306 struct acpi_table_dmar *dmar;
1307 dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07
AP
1308 if (!dmar)
1309 return 0;
074835f0
YS
1310 return dmar->flags & 0x1;
1311}
4db77ff3 1312IOMMU_INIT_POST(detect_intel_iommu);
This page took 0.378626 seconds and 5 git commands to generate.