Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[deliverable/linux.git] / drivers / pci / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
38717946
KA
31#include <linux/iova.h>
32#include <linux/intel-iommu.h>
fe962e90 33#include <linux/timer.h>
0ac2491f
SS
34#include <linux/irq.h>
35#include <linux/interrupt.h>
69575d38 36#include <linux/tboot.h>
eb27cae8 37#include <linux/dmi.h>
10e5247f 38
a192a958 39#define PREFIX "DMAR: "
10e5247f
KA
40
41/* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
44 */
45LIST_HEAD(dmar_drhd_units);
10e5247f
KA
46
47static struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 48static acpi_size dmar_tbl_size;
10e5247f
KA
49
50static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
51{
52 /*
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
54 * the very end.
55 */
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
58 else
59 list_add(&drhd->list, &dmar_drhd_units);
60}
61
10e5247f
KA
62static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
64{
65 struct pci_bus *bus;
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
68 int count;
69
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
74
75 while (count) {
76 if (pdev)
77 pci_dev_put(pdev);
78 /*
79 * Some BIOSes list non-exist devices in DMAR table, just
80 * ignore it
81 */
82 if (!bus) {
83 printk(KERN_WARNING
84 PREFIX "Device scope bus [%d] not found\n",
85 scope->bus);
86 break;
87 }
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89 if (!pdev) {
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
93 break;
94 }
95 path ++;
96 count --;
97 bus = pdev->subordinate;
98 }
99 if (!pdev) {
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
103 *dev = NULL;
104 return 0;
105 }
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
112 pci_name(pdev));
113 return -EINVAL;
114 }
115 *dev = pdev;
116 return 0;
117}
118
119static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
121{
122 struct acpi_dmar_device_scope *scope;
123 void * tmp = start;
124 int index;
125 int ret;
126
127 *cnt = 0;
128 while (start < end) {
129 scope = start;
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132 (*cnt)++;
133 else
134 printk(KERN_WARNING PREFIX
135 "Unsupported device scope\n");
136 start += scope->length;
137 }
138 if (*cnt == 0)
139 return 0;
140
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142 if (!*devices)
143 return -ENOMEM;
144
145 start = tmp;
146 index = 0;
147 while (start < end) {
148 scope = start;
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
153 if (ret) {
154 kfree(*devices);
155 return ret;
156 }
157 index ++;
158 }
159 start += scope->length;
160 }
161
162 return 0;
163}
164
165/**
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
169 */
170static int __init
171dmar_parse_one_drhd(struct acpi_dmar_header *header)
172{
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
175 int ret = 0;
10e5247f 176
e523b38e 177 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179 if (!dmaru)
180 return -ENOMEM;
181
1886e8a9 182 dmaru->hdr = header;
10e5247f 183 dmaru->reg_base_addr = drhd->address;
276dbf99 184 dmaru->segment = drhd->segment;
10e5247f
KA
185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
186
1886e8a9
SS
187 ret = alloc_iommu(dmaru);
188 if (ret) {
189 kfree(dmaru);
190 return ret;
191 }
192 dmar_register_drhd_unit(dmaru);
193 return 0;
194}
195
f82851a8 196static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
197{
198 struct acpi_dmar_hardware_unit *drhd;
f82851a8 199 int ret = 0;
1886e8a9
SS
200
201 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
202
2e824f79
YZ
203 if (dmaru->include_all)
204 return 0;
205
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
1886e8a9 207 ((void *)drhd) + drhd->header.length,
10e5247f
KA
208 &dmaru->devices_cnt, &dmaru->devices,
209 drhd->segment);
1c7d1bca 210 if (ret) {
1886e8a9 211 list_del(&dmaru->list);
10e5247f 212 kfree(dmaru);
1886e8a9 213 }
10e5247f
KA
214 return ret;
215}
216
aaa9d1dd
SS
217#ifdef CONFIG_DMAR
218LIST_HEAD(dmar_rmrr_units);
219
220static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
221{
222 list_add(&rmrr->list, &dmar_rmrr_units);
223}
224
225
10e5247f
KA
226static int __init
227dmar_parse_one_rmrr(struct acpi_dmar_header *header)
228{
229 struct acpi_dmar_reserved_memory *rmrr;
230 struct dmar_rmrr_unit *rmrru;
10e5247f
KA
231
232 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
233 if (!rmrru)
234 return -ENOMEM;
235
1886e8a9 236 rmrru->hdr = header;
10e5247f
KA
237 rmrr = (struct acpi_dmar_reserved_memory *)header;
238 rmrru->base_address = rmrr->base_address;
239 rmrru->end_address = rmrr->end_address;
1886e8a9
SS
240
241 dmar_register_rmrr_unit(rmrru);
242 return 0;
243}
244
245static int __init
246rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
247{
248 struct acpi_dmar_reserved_memory *rmrr;
249 int ret;
250
251 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
10e5247f 252 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
1886e8a9 253 ((void *)rmrr) + rmrr->header.length,
10e5247f
KA
254 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
255
1886e8a9
SS
256 if (ret || (rmrru->devices_cnt == 0)) {
257 list_del(&rmrru->list);
10e5247f 258 kfree(rmrru);
1886e8a9 259 }
10e5247f
KA
260 return ret;
261}
aa5d2b51
YZ
262
263static LIST_HEAD(dmar_atsr_units);
264
265static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
266{
267 struct acpi_dmar_atsr *atsr;
268 struct dmar_atsr_unit *atsru;
269
270 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
271 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
272 if (!atsru)
273 return -ENOMEM;
274
275 atsru->hdr = hdr;
276 atsru->include_all = atsr->flags & 0x1;
277
278 list_add(&atsru->list, &dmar_atsr_units);
279
280 return 0;
281}
282
283static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
284{
285 int rc;
286 struct acpi_dmar_atsr *atsr;
287
288 if (atsru->include_all)
289 return 0;
290
291 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
292 rc = dmar_parse_dev_scope((void *)(atsr + 1),
293 (void *)atsr + atsr->header.length,
294 &atsru->devices_cnt, &atsru->devices,
295 atsr->segment);
296 if (rc || !atsru->devices_cnt) {
297 list_del(&atsru->list);
298 kfree(atsru);
299 }
300
301 return rc;
302}
303
304int dmar_find_matched_atsr_unit(struct pci_dev *dev)
305{
306 int i;
307 struct pci_bus *bus;
308 struct acpi_dmar_atsr *atsr;
309 struct dmar_atsr_unit *atsru;
310
311 list_for_each_entry(atsru, &dmar_atsr_units, list) {
312 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
313 if (atsr->segment == pci_domain_nr(dev->bus))
314 goto found;
315 }
316
317 return 0;
318
319found:
320 for (bus = dev->bus; bus; bus = bus->parent) {
321 struct pci_dev *bridge = bus->self;
322
323 if (!bridge || !bridge->is_pcie ||
324 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
325 return 0;
326
327 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
328 for (i = 0; i < atsru->devices_cnt; i++)
329 if (atsru->devices[i] == bridge)
330 return 1;
331 break;
332 }
333 }
334
335 if (atsru->include_all)
336 return 1;
337
338 return 0;
339}
aaa9d1dd 340#endif
10e5247f 341
aa697079 342#ifdef CONFIG_ACPI_NUMA
ee34b32d
SS
343static int __init
344dmar_parse_one_rhsa(struct acpi_dmar_header *header)
345{
346 struct acpi_dmar_rhsa *rhsa;
347 struct dmar_drhd_unit *drhd;
348
349 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 350 for_each_drhd_unit(drhd) {
ee34b32d
SS
351 if (drhd->reg_base_addr == rhsa->base_address) {
352 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
353
354 if (!node_online(node))
355 node = -1;
356 drhd->iommu->node = node;
aa697079
DW
357 return 0;
358 }
ee34b32d 359 }
aa697079
DW
360 WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
361 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
362 drhd->reg_base_addr,
363 dmi_get_system_info(DMI_BIOS_VENDOR),
364 dmi_get_system_info(DMI_BIOS_VERSION),
365 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 366
aa697079 367 return 0;
ee34b32d 368}
aa697079 369#endif
ee34b32d 370
10e5247f
KA
371static void __init
372dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
373{
374 struct acpi_dmar_hardware_unit *drhd;
375 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 376 struct acpi_dmar_atsr *atsr;
17b60977 377 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
378
379 switch (header->type) {
380 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
381 drhd = container_of(header, struct acpi_dmar_hardware_unit,
382 header);
10e5247f 383 printk (KERN_INFO PREFIX
aa5d2b51
YZ
384 "DRHD base: %#016Lx flags: %#x\n",
385 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
386 break;
387 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
388 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
389 header);
10e5247f 390 printk (KERN_INFO PREFIX
aa5d2b51 391 "RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
392 (unsigned long long)rmrr->base_address,
393 (unsigned long long)rmrr->end_address);
10e5247f 394 break;
aa5d2b51
YZ
395 case ACPI_DMAR_TYPE_ATSR:
396 atsr = container_of(header, struct acpi_dmar_atsr, header);
397 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
398 break;
17b60977
RD
399 case ACPI_DMAR_HARDWARE_AFFINITY:
400 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
401 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
402 (unsigned long long)rhsa->base_address,
403 rhsa->proximity_domain);
404 break;
10e5247f
KA
405 }
406}
407
f6dd5c31
YL
408/**
409 * dmar_table_detect - checks to see if the platform supports DMAR devices
410 */
411static int __init dmar_table_detect(void)
412{
413 acpi_status status = AE_OK;
414
415 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
416 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
417 (struct acpi_table_header **)&dmar_tbl,
418 &dmar_tbl_size);
f6dd5c31
YL
419
420 if (ACPI_SUCCESS(status) && !dmar_tbl) {
421 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
422 status = AE_NOT_FOUND;
423 }
424
425 return (ACPI_SUCCESS(status) ? 1 : 0);
426}
aaa9d1dd 427
10e5247f
KA
428/**
429 * parse_dmar_table - parses the DMA reporting table
430 */
431static int __init
432parse_dmar_table(void)
433{
434 struct acpi_table_dmar *dmar;
435 struct acpi_dmar_header *entry_header;
436 int ret = 0;
437
f6dd5c31
YL
438 /*
439 * Do it again, earlier dmar_tbl mapping could be mapped with
440 * fixed map.
441 */
442 dmar_table_detect();
443
a59b50e9
JC
444 /*
445 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
446 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
447 */
448 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
449
10e5247f
KA
450 dmar = (struct acpi_table_dmar *)dmar_tbl;
451 if (!dmar)
452 return -ENODEV;
453
5b6985ce 454 if (dmar->width < PAGE_SHIFT - 1) {
093f87d2 455 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
10e5247f
KA
456 return -EINVAL;
457 }
458
459 printk (KERN_INFO PREFIX "Host address width %d\n",
460 dmar->width + 1);
461
462 entry_header = (struct acpi_dmar_header *)(dmar + 1);
463 while (((unsigned long)entry_header) <
464 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
465 /* Avoid looping forever on bad ACPI tables */
466 if (entry_header->length == 0) {
467 printk(KERN_WARNING PREFIX
468 "Invalid 0-length structure\n");
469 ret = -EINVAL;
470 break;
471 }
472
10e5247f
KA
473 dmar_table_print_dmar_entry(entry_header);
474
475 switch (entry_header->type) {
476 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
477 ret = dmar_parse_one_drhd(entry_header);
478 break;
479 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aaa9d1dd 480#ifdef CONFIG_DMAR
10e5247f 481 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
482#endif
483 break;
484 case ACPI_DMAR_TYPE_ATSR:
485#ifdef CONFIG_DMAR
486 ret = dmar_parse_one_atsr(entry_header);
aaa9d1dd 487#endif
10e5247f 488 break;
17b60977 489 case ACPI_DMAR_HARDWARE_AFFINITY:
aa697079 490#ifdef CONFIG_ACPI_NUMA
ee34b32d 491 ret = dmar_parse_one_rhsa(entry_header);
aa697079 492#endif
17b60977 493 break;
10e5247f
KA
494 default:
495 printk(KERN_WARNING PREFIX
4de75cf9
RD
496 "Unknown DMAR structure type %d\n",
497 entry_header->type);
10e5247f
KA
498 ret = 0; /* for forward compatibility */
499 break;
500 }
501 if (ret)
502 break;
503
504 entry_header = ((void *)entry_header + entry_header->length);
505 }
506 return ret;
507}
508
e61d98d8
SS
509int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
510 struct pci_dev *dev)
511{
512 int index;
513
514 while (dev) {
515 for (index = 0; index < cnt; index++)
516 if (dev == devices[index])
517 return 1;
518
519 /* Check our parent */
520 dev = dev->bus->self;
521 }
522
523 return 0;
524}
525
526struct dmar_drhd_unit *
527dmar_find_matched_drhd_unit(struct pci_dev *dev)
528{
2e824f79
YZ
529 struct dmar_drhd_unit *dmaru = NULL;
530 struct acpi_dmar_hardware_unit *drhd;
531
532 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
533 drhd = container_of(dmaru->hdr,
534 struct acpi_dmar_hardware_unit,
535 header);
536
537 if (dmaru->include_all &&
538 drhd->segment == pci_domain_nr(dev->bus))
539 return dmaru;
e61d98d8 540
2e824f79
YZ
541 if (dmar_pci_device_match(dmaru->devices,
542 dmaru->devices_cnt, dev))
543 return dmaru;
e61d98d8
SS
544 }
545
546 return NULL;
547}
548
1886e8a9
SS
549int __init dmar_dev_scope_init(void)
550{
04e2ea67 551 struct dmar_drhd_unit *drhd, *drhd_n;
1886e8a9
SS
552 int ret = -ENODEV;
553
04e2ea67 554 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
1886e8a9
SS
555 ret = dmar_parse_dev(drhd);
556 if (ret)
557 return ret;
558 }
559
aaa9d1dd
SS
560#ifdef CONFIG_DMAR
561 {
04e2ea67 562 struct dmar_rmrr_unit *rmrr, *rmrr_n;
aa5d2b51
YZ
563 struct dmar_atsr_unit *atsr, *atsr_n;
564
04e2ea67 565 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
aaa9d1dd
SS
566 ret = rmrr_parse_dev(rmrr);
567 if (ret)
568 return ret;
569 }
aa5d2b51
YZ
570
571 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
572 ret = atsr_parse_dev(atsr);
573 if (ret)
574 return ret;
575 }
1886e8a9 576 }
aaa9d1dd 577#endif
1886e8a9
SS
578
579 return ret;
580}
581
10e5247f
KA
582
583int __init dmar_table_init(void)
584{
1886e8a9 585 static int dmar_table_initialized;
093f87d2
FY
586 int ret;
587
1886e8a9
SS
588 if (dmar_table_initialized)
589 return 0;
590
591 dmar_table_initialized = 1;
592
093f87d2
FY
593 ret = parse_dmar_table();
594 if (ret) {
1886e8a9
SS
595 if (ret != -ENODEV)
596 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
093f87d2
FY
597 return ret;
598 }
599
10e5247f
KA
600 if (list_empty(&dmar_drhd_units)) {
601 printk(KERN_INFO PREFIX "No DMAR devices found\n");
602 return -ENODEV;
603 }
093f87d2 604
aaa9d1dd 605#ifdef CONFIG_DMAR
2d6b5f85 606 if (list_empty(&dmar_rmrr_units))
093f87d2 607 printk(KERN_INFO PREFIX "No RMRR found\n");
aa5d2b51
YZ
608
609 if (list_empty(&dmar_atsr_units))
610 printk(KERN_INFO PREFIX "No ATSR found\n");
aaa9d1dd 611#endif
093f87d2 612
10e5247f
KA
613 return 0;
614}
615
86cf898e
DW
616int __init check_zero_address(void)
617{
618 struct acpi_table_dmar *dmar;
619 struct acpi_dmar_header *entry_header;
620 struct acpi_dmar_hardware_unit *drhd;
621
622 dmar = (struct acpi_table_dmar *)dmar_tbl;
623 entry_header = (struct acpi_dmar_header *)(dmar + 1);
624
625 while (((unsigned long)entry_header) <
626 (((unsigned long)dmar) + dmar_tbl->length)) {
627 /* Avoid looping forever on bad ACPI tables */
628 if (entry_header->length == 0) {
629 printk(KERN_WARNING PREFIX
630 "Invalid 0-length structure\n");
631 return 0;
632 }
633
634 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
635 drhd = (void *)entry_header;
636 if (!drhd->address) {
637 /* Promote an attitude of violence to a BIOS engineer today */
638 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
639 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
640 dmi_get_system_info(DMI_BIOS_VENDOR),
641 dmi_get_system_info(DMI_BIOS_VERSION),
642 dmi_get_system_info(DMI_PRODUCT_VERSION));
5854d9c8
DW
643#ifdef CONFIG_DMAR
644 dmar_disabled = 1;
645#endif
86cf898e
DW
646 return 0;
647 }
648 break;
649 }
650
651 entry_header = ((void *)entry_header + entry_header->length);
652 }
653 return 1;
654}
655
2ae21010
SS
656void __init detect_intel_iommu(void)
657{
658 int ret;
659
f6dd5c31 660 ret = dmar_table_detect();
86cf898e
DW
661 if (ret)
662 ret = check_zero_address();
2ae21010 663 {
cacd4213 664#ifdef CONFIG_INTR_REMAP
1cb11583
SS
665 struct acpi_table_dmar *dmar;
666 /*
667 * for now we will disable dma-remapping when interrupt
668 * remapping is enabled.
669 * When support for queued invalidation for IOTLB invalidation
670 * is added, we will not need this any more.
671 */
672 dmar = (struct acpi_table_dmar *) dmar_tbl;
cacd4213 673 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
1cb11583
SS
674 printk(KERN_INFO
675 "Queued invalidation will be enabled to support "
676 "x2apic and Intr-remapping.\n");
cacd4213 677#endif
cacd4213 678#ifdef CONFIG_DMAR
75f1cdf1 679 if (ret && !no_iommu && !iommu_detected && !dmar_disabled)
2ae21010 680 iommu_detected = 1;
9d5ce73a
FT
681#endif
682#ifdef CONFIG_X86
683 if (ret)
684 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 685#endif
cacd4213 686 }
8e1568f3 687 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
f6dd5c31 688 dmar_tbl = NULL;
2ae21010
SS
689}
690
691
1886e8a9 692int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 693{
c42d9f32 694 struct intel_iommu *iommu;
e61d98d8
SS
695 int map_size;
696 u32 ver;
c42d9f32 697 static int iommu_allocated = 0;
43f7392b 698 int agaw = 0;
4ed0d3e6 699 int msagaw = 0;
c42d9f32
SS
700
701 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
702 if (!iommu)
1886e8a9 703 return -ENOMEM;
c42d9f32
SS
704
705 iommu->seq_id = iommu_allocated++;
9d783ba0 706 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 707
5b6985ce 708 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
e61d98d8
SS
709 if (!iommu->reg) {
710 printk(KERN_ERR "IOMMU: can't map the region\n");
711 goto error;
712 }
713 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
714 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
715
0815565a
DW
716 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
717 /* Promote an attitude of violence to a BIOS engineer today */
718 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
719 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
720 drhd->reg_base_addr,
721 dmi_get_system_info(DMI_BIOS_VENDOR),
722 dmi_get_system_info(DMI_BIOS_VERSION),
723 dmi_get_system_info(DMI_PRODUCT_VERSION));
724 goto err_unmap;
725 }
726
43f7392b 727#ifdef CONFIG_DMAR
1b573683
WH
728 agaw = iommu_calculate_agaw(iommu);
729 if (agaw < 0) {
730 printk(KERN_ERR
4ed0d3e6
FY
731 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
732 iommu->seq_id);
0815565a 733 goto err_unmap;
4ed0d3e6
FY
734 }
735 msagaw = iommu_calculate_max_sagaw(iommu);
736 if (msagaw < 0) {
737 printk(KERN_ERR
738 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 739 iommu->seq_id);
0815565a 740 goto err_unmap;
1b573683 741 }
43f7392b 742#endif
1b573683 743 iommu->agaw = agaw;
4ed0d3e6 744 iommu->msagaw = msagaw;
1b573683 745
ee34b32d
SS
746 iommu->node = -1;
747
e61d98d8
SS
748 /* the registers might be more than one page */
749 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
750 cap_max_fault_reg_offset(iommu->cap));
5b6985ce
FY
751 map_size = VTD_PAGE_ALIGN(map_size);
752 if (map_size > VTD_PAGE_SIZE) {
e61d98d8
SS
753 iounmap(iommu->reg);
754 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
755 if (!iommu->reg) {
756 printk(KERN_ERR "IOMMU: can't map the region\n");
757 goto error;
758 }
759 }
760
761 ver = readl(iommu->reg + DMAR_VER_REG);
0815565a 762 pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
5b6985ce
FY
763 (unsigned long long)drhd->reg_base_addr,
764 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
765 (unsigned long long)iommu->cap,
766 (unsigned long long)iommu->ecap);
e61d98d8
SS
767
768 spin_lock_init(&iommu->register_lock);
769
770 drhd->iommu = iommu;
1886e8a9 771 return 0;
0815565a
DW
772
773 err_unmap:
774 iounmap(iommu->reg);
775 error:
e61d98d8 776 kfree(iommu);
1886e8a9 777 return -1;
e61d98d8
SS
778}
779
780void free_iommu(struct intel_iommu *iommu)
781{
782 if (!iommu)
783 return;
784
785#ifdef CONFIG_DMAR
786 free_dmar_iommu(iommu);
787#endif
788
789 if (iommu->reg)
790 iounmap(iommu->reg);
791 kfree(iommu);
792}
fe962e90
SS
793
794/*
795 * Reclaim all the submitted descriptors which have completed its work.
796 */
797static inline void reclaim_free_desc(struct q_inval *qi)
798{
6ba6c3a4
YZ
799 while (qi->desc_status[qi->free_tail] == QI_DONE ||
800 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
801 qi->desc_status[qi->free_tail] = QI_FREE;
802 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
803 qi->free_cnt++;
804 }
805}
806
704126ad
YZ
807static int qi_check_fault(struct intel_iommu *iommu, int index)
808{
809 u32 fault;
6ba6c3a4 810 int head, tail;
704126ad
YZ
811 struct q_inval *qi = iommu->qi;
812 int wait_index = (index + 1) % QI_LENGTH;
813
6ba6c3a4
YZ
814 if (qi->desc_status[wait_index] == QI_ABORT)
815 return -EAGAIN;
816
704126ad
YZ
817 fault = readl(iommu->reg + DMAR_FSTS_REG);
818
819 /*
820 * If IQE happens, the head points to the descriptor associated
821 * with the error. No new descriptors are fetched until the IQE
822 * is cleared.
823 */
824 if (fault & DMA_FSTS_IQE) {
825 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4
YZ
826 if ((head >> DMAR_IQ_SHIFT) == index) {
827 printk(KERN_ERR "VT-d detected invalid descriptor: "
828 "low=%llx, high=%llx\n",
829 (unsigned long long)qi->desc[index].low,
830 (unsigned long long)qi->desc[index].high);
704126ad
YZ
831 memcpy(&qi->desc[index], &qi->desc[wait_index],
832 sizeof(struct qi_desc));
833 __iommu_flush_cache(iommu, &qi->desc[index],
834 sizeof(struct qi_desc));
835 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
836 return -EINVAL;
837 }
838 }
839
6ba6c3a4
YZ
840 /*
841 * If ITE happens, all pending wait_desc commands are aborted.
842 * No new descriptors are fetched until the ITE is cleared.
843 */
844 if (fault & DMA_FSTS_ITE) {
845 head = readl(iommu->reg + DMAR_IQH_REG);
846 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
847 head |= 1;
848 tail = readl(iommu->reg + DMAR_IQT_REG);
849 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
850
851 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
852
853 do {
854 if (qi->desc_status[head] == QI_IN_USE)
855 qi->desc_status[head] = QI_ABORT;
856 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
857 } while (head != tail);
858
859 if (qi->desc_status[wait_index] == QI_ABORT)
860 return -EAGAIN;
861 }
862
863 if (fault & DMA_FSTS_ICE)
864 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
865
704126ad
YZ
866 return 0;
867}
868
fe962e90
SS
869/*
870 * Submit the queued invalidation descriptor to the remapping
871 * hardware unit and wait for its completion.
872 */
704126ad 873int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 874{
6ba6c3a4 875 int rc;
fe962e90
SS
876 struct q_inval *qi = iommu->qi;
877 struct qi_desc *hw, wait_desc;
878 int wait_index, index;
879 unsigned long flags;
880
881 if (!qi)
704126ad 882 return 0;
fe962e90
SS
883
884 hw = qi->desc;
885
6ba6c3a4
YZ
886restart:
887 rc = 0;
888
f05810c9 889 spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 890 while (qi->free_cnt < 3) {
f05810c9 891 spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 892 cpu_relax();
f05810c9 893 spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
894 }
895
896 index = qi->free_head;
897 wait_index = (index + 1) % QI_LENGTH;
898
899 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
900
901 hw[index] = *desc;
902
704126ad
YZ
903 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
904 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
905 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
906
907 hw[wait_index] = wait_desc;
908
909 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
910 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
911
912 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
913 qi->free_cnt -= 2;
914
fe962e90
SS
915 /*
916 * update the HW tail register indicating the presence of
917 * new descriptors.
918 */
6ba6c3a4 919 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
920
921 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
922 /*
923 * We will leave the interrupts disabled, to prevent interrupt
924 * context to queue another cmd while a cmd is already submitted
925 * and waiting for completion on this cpu. This is to avoid
926 * a deadlock where the interrupt context can wait indefinitely
927 * for free slots in the queue.
928 */
704126ad
YZ
929 rc = qi_check_fault(iommu, index);
930 if (rc)
6ba6c3a4 931 break;
704126ad 932
fe962e90
SS
933 spin_unlock(&qi->q_lock);
934 cpu_relax();
935 spin_lock(&qi->q_lock);
936 }
6ba6c3a4
YZ
937
938 qi->desc_status[index] = QI_DONE;
fe962e90
SS
939
940 reclaim_free_desc(qi);
f05810c9 941 spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 942
6ba6c3a4
YZ
943 if (rc == -EAGAIN)
944 goto restart;
945
704126ad 946 return rc;
fe962e90
SS
947}
948
949/*
950 * Flush the global interrupt entry cache.
951 */
952void qi_global_iec(struct intel_iommu *iommu)
953{
954 struct qi_desc desc;
955
956 desc.low = QI_IEC_TYPE;
957 desc.high = 0;
958
704126ad 959 /* should never fail */
fe962e90
SS
960 qi_submit_sync(&desc, iommu);
961}
962
4c25a2c1
DW
963void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
964 u64 type)
3481f210 965{
3481f210
YS
966 struct qi_desc desc;
967
3481f210
YS
968 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
969 | QI_CC_GRAN(type) | QI_CC_TYPE;
970 desc.high = 0;
971
4c25a2c1 972 qi_submit_sync(&desc, iommu);
3481f210
YS
973}
974
1f0ef2aa
DW
975void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
976 unsigned int size_order, u64 type)
3481f210
YS
977{
978 u8 dw = 0, dr = 0;
979
980 struct qi_desc desc;
981 int ih = 0;
982
3481f210
YS
983 if (cap_write_drain(iommu->cap))
984 dw = 1;
985
986 if (cap_read_drain(iommu->cap))
987 dr = 1;
988
989 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
990 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
991 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
992 | QI_IOTLB_AM(size_order);
993
1f0ef2aa 994 qi_submit_sync(&desc, iommu);
3481f210
YS
995}
996
6ba6c3a4
YZ
997void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
998 u64 addr, unsigned mask)
999{
1000 struct qi_desc desc;
1001
1002 if (mask) {
1003 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1004 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1005 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1006 } else
1007 desc.high = QI_DEV_IOTLB_ADDR(addr);
1008
1009 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1010 qdep = 0;
1011
1012 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1013 QI_DIOTLB_TYPE;
1014
1015 qi_submit_sync(&desc, iommu);
1016}
1017
eba67e5d
SS
1018/*
1019 * Disable Queued Invalidation interface.
1020 */
1021void dmar_disable_qi(struct intel_iommu *iommu)
1022{
1023 unsigned long flags;
1024 u32 sts;
1025 cycles_t start_time = get_cycles();
1026
1027 if (!ecap_qis(iommu->ecap))
1028 return;
1029
1030 spin_lock_irqsave(&iommu->register_lock, flags);
1031
1032 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1033 if (!(sts & DMA_GSTS_QIES))
1034 goto end;
1035
1036 /*
1037 * Give a chance to HW to complete the pending invalidation requests.
1038 */
1039 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1040 readl(iommu->reg + DMAR_IQH_REG)) &&
1041 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1042 cpu_relax();
1043
1044 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
1045 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1046
1047 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1048 !(sts & DMA_GSTS_QIES), sts);
1049end:
1050 spin_unlock_irqrestore(&iommu->register_lock, flags);
1051}
1052
eb4a52bc
FY
1053/*
1054 * Enable queued invalidation.
1055 */
1056static void __dmar_enable_qi(struct intel_iommu *iommu)
1057{
c416daa9 1058 u32 sts;
eb4a52bc
FY
1059 unsigned long flags;
1060 struct q_inval *qi = iommu->qi;
1061
1062 qi->free_head = qi->free_tail = 0;
1063 qi->free_cnt = QI_LENGTH;
1064
1065 spin_lock_irqsave(&iommu->register_lock, flags);
1066
1067 /* write zero to the tail reg */
1068 writel(0, iommu->reg + DMAR_IQT_REG);
1069
1070 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1071
eb4a52bc 1072 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1073 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1074
1075 /* Make sure hardware complete it */
1076 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1077
1078 spin_unlock_irqrestore(&iommu->register_lock, flags);
1079}
1080
fe962e90
SS
1081/*
1082 * Enable Queued Invalidation interface. This is a must to support
1083 * interrupt-remapping. Also used by DMA-remapping, which replaces
1084 * register based IOTLB invalidation.
1085 */
1086int dmar_enable_qi(struct intel_iommu *iommu)
1087{
fe962e90 1088 struct q_inval *qi;
751cafe3 1089 struct page *desc_page;
fe962e90
SS
1090
1091 if (!ecap_qis(iommu->ecap))
1092 return -ENOENT;
1093
1094 /*
1095 * queued invalidation is already setup and enabled.
1096 */
1097 if (iommu->qi)
1098 return 0;
1099
fa4b57cc 1100 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1101 if (!iommu->qi)
1102 return -ENOMEM;
1103
1104 qi = iommu->qi;
1105
751cafe3
SS
1106
1107 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1108 if (!desc_page) {
fe962e90
SS
1109 kfree(qi);
1110 iommu->qi = 0;
1111 return -ENOMEM;
1112 }
1113
751cafe3
SS
1114 qi->desc = page_address(desc_page);
1115
fa4b57cc 1116 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1117 if (!qi->desc_status) {
1118 free_page((unsigned long) qi->desc);
1119 kfree(qi);
1120 iommu->qi = 0;
1121 return -ENOMEM;
1122 }
1123
1124 qi->free_head = qi->free_tail = 0;
1125 qi->free_cnt = QI_LENGTH;
1126
1127 spin_lock_init(&qi->q_lock);
1128
eb4a52bc 1129 __dmar_enable_qi(iommu);
fe962e90
SS
1130
1131 return 0;
1132}
0ac2491f
SS
1133
1134/* iommu interrupt handling. Most stuff are MSI-like. */
1135
9d783ba0
SS
1136enum faulttype {
1137 DMA_REMAP,
1138 INTR_REMAP,
1139 UNKNOWN,
1140};
1141
1142static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1143{
1144 "Software",
1145 "Present bit in root entry is clear",
1146 "Present bit in context entry is clear",
1147 "Invalid context entry",
1148 "Access beyond MGAW",
1149 "PTE Write access is not set",
1150 "PTE Read access is not set",
1151 "Next page table ptr is invalid",
1152 "Root table address invalid",
1153 "Context table ptr is invalid",
1154 "non-zero reserved fields in RTP",
1155 "non-zero reserved fields in CTP",
1156 "non-zero reserved fields in PTE",
1157};
9d783ba0
SS
1158
1159static const char *intr_remap_fault_reasons[] =
1160{
1161 "Detected reserved fields in the decoded interrupt-remapped request",
1162 "Interrupt index exceeded the interrupt-remapping table size",
1163 "Present field in the IRTE entry is clear",
1164 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1165 "Detected reserved fields in the IRTE entry",
1166 "Blocked a compatibility format interrupt request",
1167 "Blocked an interrupt request due to source-id verification failure",
1168};
1169
0ac2491f
SS
1170#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1171
9d783ba0 1172const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1173{
9d783ba0
SS
1174 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1175 ARRAY_SIZE(intr_remap_fault_reasons))) {
1176 *fault_type = INTR_REMAP;
1177 return intr_remap_fault_reasons[fault_reason - 0x20];
1178 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1179 *fault_type = DMA_REMAP;
1180 return dma_remap_fault_reasons[fault_reason];
1181 } else {
1182 *fault_type = UNKNOWN;
0ac2491f 1183 return "Unknown";
9d783ba0 1184 }
0ac2491f
SS
1185}
1186
1187void dmar_msi_unmask(unsigned int irq)
1188{
1189 struct intel_iommu *iommu = get_irq_data(irq);
1190 unsigned long flag;
1191
1192 /* unmask it */
1193 spin_lock_irqsave(&iommu->register_lock, flag);
1194 writel(0, iommu->reg + DMAR_FECTL_REG);
1195 /* Read a reg to force flush the post write */
1196 readl(iommu->reg + DMAR_FECTL_REG);
1197 spin_unlock_irqrestore(&iommu->register_lock, flag);
1198}
1199
1200void dmar_msi_mask(unsigned int irq)
1201{
1202 unsigned long flag;
1203 struct intel_iommu *iommu = get_irq_data(irq);
1204
1205 /* mask it */
1206 spin_lock_irqsave(&iommu->register_lock, flag);
1207 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1208 /* Read a reg to force flush the post write */
1209 readl(iommu->reg + DMAR_FECTL_REG);
1210 spin_unlock_irqrestore(&iommu->register_lock, flag);
1211}
1212
1213void dmar_msi_write(int irq, struct msi_msg *msg)
1214{
1215 struct intel_iommu *iommu = get_irq_data(irq);
1216 unsigned long flag;
1217
1218 spin_lock_irqsave(&iommu->register_lock, flag);
1219 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1220 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1221 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1222 spin_unlock_irqrestore(&iommu->register_lock, flag);
1223}
1224
1225void dmar_msi_read(int irq, struct msi_msg *msg)
1226{
1227 struct intel_iommu *iommu = get_irq_data(irq);
1228 unsigned long flag;
1229
1230 spin_lock_irqsave(&iommu->register_lock, flag);
1231 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1232 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1233 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1234 spin_unlock_irqrestore(&iommu->register_lock, flag);
1235}
1236
1237static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1238 u8 fault_reason, u16 source_id, unsigned long long addr)
1239{
1240 const char *reason;
9d783ba0 1241 int fault_type;
0ac2491f 1242
9d783ba0 1243 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1244
9d783ba0
SS
1245 if (fault_type == INTR_REMAP)
1246 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1247 "fault index %llx\n"
1248 "INTR-REMAP:[fault reason %02d] %s\n",
1249 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1250 PCI_FUNC(source_id & 0xFF), addr >> 48,
1251 fault_reason, reason);
1252 else
1253 printk(KERN_ERR
1254 "DMAR:[%s] Request device [%02x:%02x.%d] "
1255 "fault addr %llx \n"
1256 "DMAR:[fault reason %02d] %s\n",
1257 (type ? "DMA Read" : "DMA Write"),
1258 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1259 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1260 return 0;
1261}
1262
1263#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1264irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1265{
1266 struct intel_iommu *iommu = dev_id;
1267 int reg, fault_index;
1268 u32 fault_status;
1269 unsigned long flag;
1270
1271 spin_lock_irqsave(&iommu->register_lock, flag);
1272 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1273 if (fault_status)
1274 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1275 fault_status);
0ac2491f
SS
1276
1277 /* TBD: ignore advanced fault log currently */
1278 if (!(fault_status & DMA_FSTS_PPF))
9d783ba0 1279 goto clear_rest;
0ac2491f
SS
1280
1281 fault_index = dma_fsts_fault_record_index(fault_status);
1282 reg = cap_fault_reg_offset(iommu->cap);
1283 while (1) {
1284 u8 fault_reason;
1285 u16 source_id;
1286 u64 guest_addr;
1287 int type;
1288 u32 data;
1289
1290 /* highest 32 bits */
1291 data = readl(iommu->reg + reg +
1292 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1293 if (!(data & DMA_FRCD_F))
1294 break;
1295
1296 fault_reason = dma_frcd_fault_reason(data);
1297 type = dma_frcd_type(data);
1298
1299 data = readl(iommu->reg + reg +
1300 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1301 source_id = dma_frcd_source_id(data);
1302
1303 guest_addr = dmar_readq(iommu->reg + reg +
1304 fault_index * PRIMARY_FAULT_REG_LEN);
1305 guest_addr = dma_frcd_page_addr(guest_addr);
1306 /* clear the fault */
1307 writel(DMA_FRCD_F, iommu->reg + reg +
1308 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1309
1310 spin_unlock_irqrestore(&iommu->register_lock, flag);
1311
1312 dmar_fault_do_one(iommu, type, fault_reason,
1313 source_id, guest_addr);
1314
1315 fault_index++;
8211a7b5 1316 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f
SS
1317 fault_index = 0;
1318 spin_lock_irqsave(&iommu->register_lock, flag);
1319 }
9d783ba0
SS
1320clear_rest:
1321 /* clear all the other faults */
0ac2491f 1322 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1323 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
0ac2491f
SS
1324
1325 spin_unlock_irqrestore(&iommu->register_lock, flag);
1326 return IRQ_HANDLED;
1327}
1328
1329int dmar_set_interrupt(struct intel_iommu *iommu)
1330{
1331 int irq, ret;
1332
9d783ba0
SS
1333 /*
1334 * Check if the fault interrupt is already initialized.
1335 */
1336 if (iommu->irq)
1337 return 0;
1338
0ac2491f
SS
1339 irq = create_irq();
1340 if (!irq) {
1341 printk(KERN_ERR "IOMMU: no free vectors\n");
1342 return -EINVAL;
1343 }
1344
1345 set_irq_data(irq, iommu);
1346 iommu->irq = irq;
1347
1348 ret = arch_setup_dmar_msi(irq);
1349 if (ret) {
1350 set_irq_data(irq, NULL);
1351 iommu->irq = 0;
1352 destroy_irq(irq);
dd726435 1353 return ret;
0ac2491f
SS
1354 }
1355
0ac2491f
SS
1356 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1357 if (ret)
1358 printk(KERN_ERR "IOMMU: can't request irq\n");
1359 return ret;
1360}
9d783ba0
SS
1361
1362int __init enable_drhd_fault_handling(void)
1363{
1364 struct dmar_drhd_unit *drhd;
1365
1366 /*
1367 * Enable fault control interrupt.
1368 */
1369 for_each_drhd_unit(drhd) {
1370 int ret;
1371 struct intel_iommu *iommu = drhd->iommu;
1372 ret = dmar_set_interrupt(iommu);
1373
1374 if (ret) {
1375 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1376 " interrupt, ret %d\n",
1377 (unsigned long long)drhd->reg_base_addr, ret);
1378 return -1;
1379 }
1380 }
1381
1382 return 0;
1383}
eb4a52bc
FY
1384
1385/*
1386 * Re-enable Queued Invalidation interface.
1387 */
1388int dmar_reenable_qi(struct intel_iommu *iommu)
1389{
1390 if (!ecap_qis(iommu->ecap))
1391 return -ENOENT;
1392
1393 if (!iommu->qi)
1394 return -ENOENT;
1395
1396 /*
1397 * First disable queued invalidation.
1398 */
1399 dmar_disable_qi(iommu);
1400 /*
1401 * Then enable queued invalidation again. Since there is no pending
1402 * invalidation requests now, it's safe to re-enable queued
1403 * invalidation.
1404 */
1405 __dmar_enable_qi(iommu);
1406
1407 return 0;
1408}
074835f0
YS
1409
1410/*
1411 * Check interrupt remapping support in DMAR table description.
1412 */
1413int dmar_ir_support(void)
1414{
1415 struct acpi_table_dmar *dmar;
1416 dmar = (struct acpi_table_dmar *)dmar_tbl;
1417 return dmar->flags & 0x1;
1418}
This page took 0.236423 seconds and 5 git commands to generate.