intel-iommu: use physfn to search drhd for VF
[deliverable/linux.git] / drivers / pci / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
38717946
KA
31#include <linux/iova.h>
32#include <linux/intel-iommu.h>
fe962e90 33#include <linux/timer.h>
0ac2491f
SS
34#include <linux/irq.h>
35#include <linux/interrupt.h>
69575d38 36#include <linux/tboot.h>
eb27cae8 37#include <linux/dmi.h>
10e5247f 38
a192a958 39#define PREFIX "DMAR: "
10e5247f
KA
40
41/* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
44 */
45LIST_HEAD(dmar_drhd_units);
10e5247f
KA
46
47static struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 48static acpi_size dmar_tbl_size;
10e5247f
KA
49
50static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
51{
52 /*
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
54 * the very end.
55 */
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
58 else
59 list_add(&drhd->list, &dmar_drhd_units);
60}
61
10e5247f
KA
62static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
64{
65 struct pci_bus *bus;
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
68 int count;
69
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
74
75 while (count) {
76 if (pdev)
77 pci_dev_put(pdev);
78 /*
79 * Some BIOSes list non-exist devices in DMAR table, just
80 * ignore it
81 */
82 if (!bus) {
83 printk(KERN_WARNING
84 PREFIX "Device scope bus [%d] not found\n",
85 scope->bus);
86 break;
87 }
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89 if (!pdev) {
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
93 break;
94 }
95 path ++;
96 count --;
97 bus = pdev->subordinate;
98 }
99 if (!pdev) {
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
103 *dev = NULL;
104 return 0;
105 }
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
112 pci_name(pdev));
113 return -EINVAL;
114 }
115 *dev = pdev;
116 return 0;
117}
118
119static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
121{
122 struct acpi_dmar_device_scope *scope;
123 void * tmp = start;
124 int index;
125 int ret;
126
127 *cnt = 0;
128 while (start < end) {
129 scope = start;
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132 (*cnt)++;
5715f0f9 133 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
10e5247f 134 printk(KERN_WARNING PREFIX
5715f0f9
YL
135 "Unsupported device scope\n");
136 }
10e5247f
KA
137 start += scope->length;
138 }
139 if (*cnt == 0)
140 return 0;
141
142 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
143 if (!*devices)
144 return -ENOMEM;
145
146 start = tmp;
147 index = 0;
148 while (start < end) {
149 scope = start;
150 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152 ret = dmar_parse_one_dev_scope(scope,
153 &(*devices)[index], segment);
154 if (ret) {
155 kfree(*devices);
156 return ret;
157 }
158 index ++;
159 }
160 start += scope->length;
161 }
162
163 return 0;
164}
165
166/**
167 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
168 * structure which uniquely represent one DMA remapping hardware unit
169 * present in the platform
170 */
171static int __init
172dmar_parse_one_drhd(struct acpi_dmar_header *header)
173{
174 struct acpi_dmar_hardware_unit *drhd;
175 struct dmar_drhd_unit *dmaru;
176 int ret = 0;
10e5247f 177
e523b38e 178 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
179 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
180 if (!dmaru)
181 return -ENOMEM;
182
1886e8a9 183 dmaru->hdr = header;
10e5247f 184 dmaru->reg_base_addr = drhd->address;
276dbf99 185 dmaru->segment = drhd->segment;
10e5247f
KA
186 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187
1886e8a9
SS
188 ret = alloc_iommu(dmaru);
189 if (ret) {
190 kfree(dmaru);
191 return ret;
192 }
193 dmar_register_drhd_unit(dmaru);
194 return 0;
195}
196
f82851a8 197static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
198{
199 struct acpi_dmar_hardware_unit *drhd;
f82851a8 200 int ret = 0;
1886e8a9
SS
201
202 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
203
2e824f79
YZ
204 if (dmaru->include_all)
205 return 0;
206
207 ret = dmar_parse_dev_scope((void *)(drhd + 1),
1886e8a9 208 ((void *)drhd) + drhd->header.length,
10e5247f
KA
209 &dmaru->devices_cnt, &dmaru->devices,
210 drhd->segment);
1c7d1bca 211 if (ret) {
1886e8a9 212 list_del(&dmaru->list);
10e5247f 213 kfree(dmaru);
1886e8a9 214 }
10e5247f
KA
215 return ret;
216}
217
aaa9d1dd
SS
218#ifdef CONFIG_DMAR
219LIST_HEAD(dmar_rmrr_units);
220
221static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
222{
223 list_add(&rmrr->list, &dmar_rmrr_units);
224}
225
226
10e5247f
KA
227static int __init
228dmar_parse_one_rmrr(struct acpi_dmar_header *header)
229{
230 struct acpi_dmar_reserved_memory *rmrr;
231 struct dmar_rmrr_unit *rmrru;
10e5247f
KA
232
233 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
234 if (!rmrru)
235 return -ENOMEM;
236
1886e8a9 237 rmrru->hdr = header;
10e5247f
KA
238 rmrr = (struct acpi_dmar_reserved_memory *)header;
239 rmrru->base_address = rmrr->base_address;
240 rmrru->end_address = rmrr->end_address;
1886e8a9
SS
241
242 dmar_register_rmrr_unit(rmrru);
243 return 0;
244}
245
246static int __init
247rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
248{
249 struct acpi_dmar_reserved_memory *rmrr;
250 int ret;
251
252 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
10e5247f 253 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
1886e8a9 254 ((void *)rmrr) + rmrr->header.length,
10e5247f
KA
255 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
256
1886e8a9
SS
257 if (ret || (rmrru->devices_cnt == 0)) {
258 list_del(&rmrru->list);
10e5247f 259 kfree(rmrru);
1886e8a9 260 }
10e5247f
KA
261 return ret;
262}
aa5d2b51
YZ
263
264static LIST_HEAD(dmar_atsr_units);
265
266static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
267{
268 struct acpi_dmar_atsr *atsr;
269 struct dmar_atsr_unit *atsru;
270
271 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
272 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
273 if (!atsru)
274 return -ENOMEM;
275
276 atsru->hdr = hdr;
277 atsru->include_all = atsr->flags & 0x1;
278
279 list_add(&atsru->list, &dmar_atsr_units);
280
281 return 0;
282}
283
284static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
285{
286 int rc;
287 struct acpi_dmar_atsr *atsr;
288
289 if (atsru->include_all)
290 return 0;
291
292 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
293 rc = dmar_parse_dev_scope((void *)(atsr + 1),
294 (void *)atsr + atsr->header.length,
295 &atsru->devices_cnt, &atsru->devices,
296 atsr->segment);
297 if (rc || !atsru->devices_cnt) {
298 list_del(&atsru->list);
299 kfree(atsru);
300 }
301
302 return rc;
303}
304
305int dmar_find_matched_atsr_unit(struct pci_dev *dev)
306{
307 int i;
308 struct pci_bus *bus;
309 struct acpi_dmar_atsr *atsr;
310 struct dmar_atsr_unit *atsru;
311
dda56549
Y
312 dev = pci_physfn(dev);
313
aa5d2b51
YZ
314 list_for_each_entry(atsru, &dmar_atsr_units, list) {
315 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
316 if (atsr->segment == pci_domain_nr(dev->bus))
317 goto found;
318 }
319
320 return 0;
321
322found:
323 for (bus = dev->bus; bus; bus = bus->parent) {
324 struct pci_dev *bridge = bus->self;
325
5f4d91a1 326 if (!bridge || !pci_is_pcie(bridge) ||
aa5d2b51
YZ
327 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
328 return 0;
329
330 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
331 for (i = 0; i < atsru->devices_cnt; i++)
332 if (atsru->devices[i] == bridge)
333 return 1;
334 break;
335 }
336 }
337
338 if (atsru->include_all)
339 return 1;
340
341 return 0;
342}
aaa9d1dd 343#endif
10e5247f 344
aa697079 345#ifdef CONFIG_ACPI_NUMA
ee34b32d
SS
346static int __init
347dmar_parse_one_rhsa(struct acpi_dmar_header *header)
348{
349 struct acpi_dmar_rhsa *rhsa;
350 struct dmar_drhd_unit *drhd;
351
352 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 353 for_each_drhd_unit(drhd) {
ee34b32d
SS
354 if (drhd->reg_base_addr == rhsa->base_address) {
355 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
356
357 if (!node_online(node))
358 node = -1;
359 drhd->iommu->node = node;
aa697079
DW
360 return 0;
361 }
ee34b32d 362 }
aa697079
DW
363 WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
364 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
365 drhd->reg_base_addr,
366 dmi_get_system_info(DMI_BIOS_VENDOR),
367 dmi_get_system_info(DMI_BIOS_VERSION),
368 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 369
aa697079 370 return 0;
ee34b32d 371}
aa697079 372#endif
ee34b32d 373
10e5247f
KA
374static void __init
375dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
376{
377 struct acpi_dmar_hardware_unit *drhd;
378 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 379 struct acpi_dmar_atsr *atsr;
17b60977 380 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
381
382 switch (header->type) {
383 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
384 drhd = container_of(header, struct acpi_dmar_hardware_unit,
385 header);
10e5247f 386 printk (KERN_INFO PREFIX
aa5d2b51
YZ
387 "DRHD base: %#016Lx flags: %#x\n",
388 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
389 break;
390 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
391 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
392 header);
10e5247f 393 printk (KERN_INFO PREFIX
aa5d2b51 394 "RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
395 (unsigned long long)rmrr->base_address,
396 (unsigned long long)rmrr->end_address);
10e5247f 397 break;
aa5d2b51
YZ
398 case ACPI_DMAR_TYPE_ATSR:
399 atsr = container_of(header, struct acpi_dmar_atsr, header);
400 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
401 break;
17b60977
RD
402 case ACPI_DMAR_HARDWARE_AFFINITY:
403 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
404 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
405 (unsigned long long)rhsa->base_address,
406 rhsa->proximity_domain);
407 break;
10e5247f
KA
408 }
409}
410
f6dd5c31
YL
411/**
412 * dmar_table_detect - checks to see if the platform supports DMAR devices
413 */
414static int __init dmar_table_detect(void)
415{
416 acpi_status status = AE_OK;
417
418 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
419 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
420 (struct acpi_table_header **)&dmar_tbl,
421 &dmar_tbl_size);
f6dd5c31
YL
422
423 if (ACPI_SUCCESS(status) && !dmar_tbl) {
424 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
425 status = AE_NOT_FOUND;
426 }
427
428 return (ACPI_SUCCESS(status) ? 1 : 0);
429}
aaa9d1dd 430
10e5247f
KA
431/**
432 * parse_dmar_table - parses the DMA reporting table
433 */
434static int __init
435parse_dmar_table(void)
436{
437 struct acpi_table_dmar *dmar;
438 struct acpi_dmar_header *entry_header;
439 int ret = 0;
440
f6dd5c31
YL
441 /*
442 * Do it again, earlier dmar_tbl mapping could be mapped with
443 * fixed map.
444 */
445 dmar_table_detect();
446
a59b50e9
JC
447 /*
448 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
449 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
450 */
451 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
452
10e5247f
KA
453 dmar = (struct acpi_table_dmar *)dmar_tbl;
454 if (!dmar)
455 return -ENODEV;
456
5b6985ce 457 if (dmar->width < PAGE_SHIFT - 1) {
093f87d2 458 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
10e5247f
KA
459 return -EINVAL;
460 }
461
462 printk (KERN_INFO PREFIX "Host address width %d\n",
463 dmar->width + 1);
464
465 entry_header = (struct acpi_dmar_header *)(dmar + 1);
466 while (((unsigned long)entry_header) <
467 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
468 /* Avoid looping forever on bad ACPI tables */
469 if (entry_header->length == 0) {
470 printk(KERN_WARNING PREFIX
471 "Invalid 0-length structure\n");
472 ret = -EINVAL;
473 break;
474 }
475
10e5247f
KA
476 dmar_table_print_dmar_entry(entry_header);
477
478 switch (entry_header->type) {
479 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
480 ret = dmar_parse_one_drhd(entry_header);
481 break;
482 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aaa9d1dd 483#ifdef CONFIG_DMAR
10e5247f 484 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
485#endif
486 break;
487 case ACPI_DMAR_TYPE_ATSR:
488#ifdef CONFIG_DMAR
489 ret = dmar_parse_one_atsr(entry_header);
aaa9d1dd 490#endif
10e5247f 491 break;
17b60977 492 case ACPI_DMAR_HARDWARE_AFFINITY:
aa697079 493#ifdef CONFIG_ACPI_NUMA
ee34b32d 494 ret = dmar_parse_one_rhsa(entry_header);
aa697079 495#endif
17b60977 496 break;
10e5247f
KA
497 default:
498 printk(KERN_WARNING PREFIX
4de75cf9
RD
499 "Unknown DMAR structure type %d\n",
500 entry_header->type);
10e5247f
KA
501 ret = 0; /* for forward compatibility */
502 break;
503 }
504 if (ret)
505 break;
506
507 entry_header = ((void *)entry_header + entry_header->length);
508 }
509 return ret;
510}
511
dda56549 512static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
e61d98d8
SS
513 struct pci_dev *dev)
514{
515 int index;
516
517 while (dev) {
518 for (index = 0; index < cnt; index++)
519 if (dev == devices[index])
520 return 1;
521
522 /* Check our parent */
523 dev = dev->bus->self;
524 }
525
526 return 0;
527}
528
529struct dmar_drhd_unit *
530dmar_find_matched_drhd_unit(struct pci_dev *dev)
531{
2e824f79
YZ
532 struct dmar_drhd_unit *dmaru = NULL;
533 struct acpi_dmar_hardware_unit *drhd;
534
dda56549
Y
535 dev = pci_physfn(dev);
536
2e824f79
YZ
537 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
538 drhd = container_of(dmaru->hdr,
539 struct acpi_dmar_hardware_unit,
540 header);
541
542 if (dmaru->include_all &&
543 drhd->segment == pci_domain_nr(dev->bus))
544 return dmaru;
e61d98d8 545
2e824f79
YZ
546 if (dmar_pci_device_match(dmaru->devices,
547 dmaru->devices_cnt, dev))
548 return dmaru;
e61d98d8
SS
549 }
550
551 return NULL;
552}
553
1886e8a9
SS
554int __init dmar_dev_scope_init(void)
555{
04e2ea67 556 struct dmar_drhd_unit *drhd, *drhd_n;
1886e8a9
SS
557 int ret = -ENODEV;
558
04e2ea67 559 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
1886e8a9
SS
560 ret = dmar_parse_dev(drhd);
561 if (ret)
562 return ret;
563 }
564
aaa9d1dd
SS
565#ifdef CONFIG_DMAR
566 {
04e2ea67 567 struct dmar_rmrr_unit *rmrr, *rmrr_n;
aa5d2b51
YZ
568 struct dmar_atsr_unit *atsr, *atsr_n;
569
04e2ea67 570 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
aaa9d1dd
SS
571 ret = rmrr_parse_dev(rmrr);
572 if (ret)
573 return ret;
574 }
aa5d2b51
YZ
575
576 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
577 ret = atsr_parse_dev(atsr);
578 if (ret)
579 return ret;
580 }
1886e8a9 581 }
aaa9d1dd 582#endif
1886e8a9
SS
583
584 return ret;
585}
586
10e5247f
KA
587
588int __init dmar_table_init(void)
589{
1886e8a9 590 static int dmar_table_initialized;
093f87d2
FY
591 int ret;
592
1886e8a9
SS
593 if (dmar_table_initialized)
594 return 0;
595
596 dmar_table_initialized = 1;
597
093f87d2
FY
598 ret = parse_dmar_table();
599 if (ret) {
1886e8a9
SS
600 if (ret != -ENODEV)
601 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
093f87d2
FY
602 return ret;
603 }
604
10e5247f
KA
605 if (list_empty(&dmar_drhd_units)) {
606 printk(KERN_INFO PREFIX "No DMAR devices found\n");
607 return -ENODEV;
608 }
093f87d2 609
aaa9d1dd 610#ifdef CONFIG_DMAR
2d6b5f85 611 if (list_empty(&dmar_rmrr_units))
093f87d2 612 printk(KERN_INFO PREFIX "No RMRR found\n");
aa5d2b51
YZ
613
614 if (list_empty(&dmar_atsr_units))
615 printk(KERN_INFO PREFIX "No ATSR found\n");
aaa9d1dd 616#endif
093f87d2 617
10e5247f
KA
618 return 0;
619}
620
6ecbf01c
DW
621static int bios_warned;
622
86cf898e
DW
623int __init check_zero_address(void)
624{
625 struct acpi_table_dmar *dmar;
626 struct acpi_dmar_header *entry_header;
627 struct acpi_dmar_hardware_unit *drhd;
628
629 dmar = (struct acpi_table_dmar *)dmar_tbl;
630 entry_header = (struct acpi_dmar_header *)(dmar + 1);
631
632 while (((unsigned long)entry_header) <
633 (((unsigned long)dmar) + dmar_tbl->length)) {
634 /* Avoid looping forever on bad ACPI tables */
635 if (entry_header->length == 0) {
636 printk(KERN_WARNING PREFIX
637 "Invalid 0-length structure\n");
638 return 0;
639 }
640
641 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
2c992208
CW
642 void __iomem *addr;
643 u64 cap, ecap;
644
86cf898e
DW
645 drhd = (void *)entry_header;
646 if (!drhd->address) {
647 /* Promote an attitude of violence to a BIOS engineer today */
648 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
649 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
650 dmi_get_system_info(DMI_BIOS_VENDOR),
651 dmi_get_system_info(DMI_BIOS_VERSION),
652 dmi_get_system_info(DMI_PRODUCT_VERSION));
6ecbf01c 653 bios_warned = 1;
2c992208
CW
654 goto failed;
655 }
656
657 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
658 if (!addr ) {
659 printk("IOMMU: can't validate: %llx\n", drhd->address);
660 goto failed;
661 }
662 cap = dmar_readq(addr + DMAR_CAP_REG);
663 ecap = dmar_readq(addr + DMAR_ECAP_REG);
664 early_iounmap(addr, VTD_PAGE_SIZE);
665 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
666 /* Promote an attitude of violence to a BIOS engineer today */
667 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
668 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
669 drhd->address,
670 dmi_get_system_info(DMI_BIOS_VENDOR),
671 dmi_get_system_info(DMI_BIOS_VERSION),
672 dmi_get_system_info(DMI_PRODUCT_VERSION));
6ecbf01c 673 bios_warned = 1;
2c992208 674 goto failed;
86cf898e 675 }
86cf898e
DW
676 }
677
678 entry_header = ((void *)entry_header + entry_header->length);
679 }
680 return 1;
2c992208
CW
681
682failed:
683#ifdef CONFIG_DMAR
684 dmar_disabled = 1;
685#endif
686 return 0;
86cf898e
DW
687}
688
2ae21010
SS
689void __init detect_intel_iommu(void)
690{
691 int ret;
692
f6dd5c31 693 ret = dmar_table_detect();
86cf898e
DW
694 if (ret)
695 ret = check_zero_address();
2ae21010 696 {
cacd4213 697#ifdef CONFIG_INTR_REMAP
1cb11583
SS
698 struct acpi_table_dmar *dmar;
699 /*
700 * for now we will disable dma-remapping when interrupt
701 * remapping is enabled.
702 * When support for queued invalidation for IOTLB invalidation
703 * is added, we will not need this any more.
704 */
705 dmar = (struct acpi_table_dmar *) dmar_tbl;
cacd4213 706 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
1cb11583
SS
707 printk(KERN_INFO
708 "Queued invalidation will be enabled to support "
709 "x2apic and Intr-remapping.\n");
cacd4213 710#endif
cacd4213 711#ifdef CONFIG_DMAR
11bd04f6 712 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
2ae21010 713 iommu_detected = 1;
5d990b62
CW
714 /* Make sure ACS will be enabled */
715 pci_request_acs();
716 }
9d5ce73a
FT
717#endif
718#ifdef CONFIG_X86
719 if (ret)
720 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 721#endif
cacd4213 722 }
8e1568f3 723 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
f6dd5c31 724 dmar_tbl = NULL;
2ae21010
SS
725}
726
727
1886e8a9 728int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 729{
c42d9f32 730 struct intel_iommu *iommu;
e61d98d8
SS
731 int map_size;
732 u32 ver;
c42d9f32 733 static int iommu_allocated = 0;
43f7392b 734 int agaw = 0;
4ed0d3e6 735 int msagaw = 0;
c42d9f32 736
6ecbf01c
DW
737 if (!drhd->reg_base_addr) {
738 if (!bios_warned) {
739 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
740 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
741 dmi_get_system_info(DMI_BIOS_VENDOR),
742 dmi_get_system_info(DMI_BIOS_VERSION),
743 dmi_get_system_info(DMI_PRODUCT_VERSION));
744 bios_warned = 1;
745 }
746 return -EINVAL;
747 }
748
c42d9f32
SS
749 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
750 if (!iommu)
1886e8a9 751 return -ENOMEM;
c42d9f32
SS
752
753 iommu->seq_id = iommu_allocated++;
9d783ba0 754 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 755
5b6985ce 756 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
e61d98d8
SS
757 if (!iommu->reg) {
758 printk(KERN_ERR "IOMMU: can't map the region\n");
759 goto error;
760 }
761 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
762 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
763
0815565a 764 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
6ecbf01c
DW
765 if (!bios_warned) {
766 /* Promote an attitude of violence to a BIOS engineer today */
767 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
768 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
769 drhd->reg_base_addr,
770 dmi_get_system_info(DMI_BIOS_VENDOR),
771 dmi_get_system_info(DMI_BIOS_VERSION),
772 dmi_get_system_info(DMI_PRODUCT_VERSION));
773 bios_warned = 1;
774 }
0815565a
DW
775 goto err_unmap;
776 }
777
43f7392b 778#ifdef CONFIG_DMAR
1b573683
WH
779 agaw = iommu_calculate_agaw(iommu);
780 if (agaw < 0) {
781 printk(KERN_ERR
4ed0d3e6
FY
782 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
783 iommu->seq_id);
0815565a 784 goto err_unmap;
4ed0d3e6
FY
785 }
786 msagaw = iommu_calculate_max_sagaw(iommu);
787 if (msagaw < 0) {
788 printk(KERN_ERR
789 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 790 iommu->seq_id);
0815565a 791 goto err_unmap;
1b573683 792 }
43f7392b 793#endif
1b573683 794 iommu->agaw = agaw;
4ed0d3e6 795 iommu->msagaw = msagaw;
1b573683 796
ee34b32d
SS
797 iommu->node = -1;
798
e61d98d8
SS
799 /* the registers might be more than one page */
800 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
801 cap_max_fault_reg_offset(iommu->cap));
5b6985ce
FY
802 map_size = VTD_PAGE_ALIGN(map_size);
803 if (map_size > VTD_PAGE_SIZE) {
e61d98d8
SS
804 iounmap(iommu->reg);
805 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
806 if (!iommu->reg) {
807 printk(KERN_ERR "IOMMU: can't map the region\n");
808 goto error;
809 }
810 }
811
812 ver = readl(iommu->reg + DMAR_VER_REG);
680a7524
YL
813 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
814 iommu->seq_id,
5b6985ce
FY
815 (unsigned long long)drhd->reg_base_addr,
816 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
817 (unsigned long long)iommu->cap,
818 (unsigned long long)iommu->ecap);
e61d98d8
SS
819
820 spin_lock_init(&iommu->register_lock);
821
822 drhd->iommu = iommu;
1886e8a9 823 return 0;
0815565a
DW
824
825 err_unmap:
826 iounmap(iommu->reg);
827 error:
e61d98d8 828 kfree(iommu);
1886e8a9 829 return -1;
e61d98d8
SS
830}
831
832void free_iommu(struct intel_iommu *iommu)
833{
834 if (!iommu)
835 return;
836
837#ifdef CONFIG_DMAR
838 free_dmar_iommu(iommu);
839#endif
840
841 if (iommu->reg)
842 iounmap(iommu->reg);
843 kfree(iommu);
844}
fe962e90
SS
845
846/*
847 * Reclaim all the submitted descriptors which have completed its work.
848 */
849static inline void reclaim_free_desc(struct q_inval *qi)
850{
6ba6c3a4
YZ
851 while (qi->desc_status[qi->free_tail] == QI_DONE ||
852 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
853 qi->desc_status[qi->free_tail] = QI_FREE;
854 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
855 qi->free_cnt++;
856 }
857}
858
704126ad
YZ
859static int qi_check_fault(struct intel_iommu *iommu, int index)
860{
861 u32 fault;
6ba6c3a4 862 int head, tail;
704126ad
YZ
863 struct q_inval *qi = iommu->qi;
864 int wait_index = (index + 1) % QI_LENGTH;
865
6ba6c3a4
YZ
866 if (qi->desc_status[wait_index] == QI_ABORT)
867 return -EAGAIN;
868
704126ad
YZ
869 fault = readl(iommu->reg + DMAR_FSTS_REG);
870
871 /*
872 * If IQE happens, the head points to the descriptor associated
873 * with the error. No new descriptors are fetched until the IQE
874 * is cleared.
875 */
876 if (fault & DMA_FSTS_IQE) {
877 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4
YZ
878 if ((head >> DMAR_IQ_SHIFT) == index) {
879 printk(KERN_ERR "VT-d detected invalid descriptor: "
880 "low=%llx, high=%llx\n",
881 (unsigned long long)qi->desc[index].low,
882 (unsigned long long)qi->desc[index].high);
704126ad
YZ
883 memcpy(&qi->desc[index], &qi->desc[wait_index],
884 sizeof(struct qi_desc));
885 __iommu_flush_cache(iommu, &qi->desc[index],
886 sizeof(struct qi_desc));
887 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
888 return -EINVAL;
889 }
890 }
891
6ba6c3a4
YZ
892 /*
893 * If ITE happens, all pending wait_desc commands are aborted.
894 * No new descriptors are fetched until the ITE is cleared.
895 */
896 if (fault & DMA_FSTS_ITE) {
897 head = readl(iommu->reg + DMAR_IQH_REG);
898 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
899 head |= 1;
900 tail = readl(iommu->reg + DMAR_IQT_REG);
901 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
902
903 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
904
905 do {
906 if (qi->desc_status[head] == QI_IN_USE)
907 qi->desc_status[head] = QI_ABORT;
908 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
909 } while (head != tail);
910
911 if (qi->desc_status[wait_index] == QI_ABORT)
912 return -EAGAIN;
913 }
914
915 if (fault & DMA_FSTS_ICE)
916 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
917
704126ad
YZ
918 return 0;
919}
920
fe962e90
SS
921/*
922 * Submit the queued invalidation descriptor to the remapping
923 * hardware unit and wait for its completion.
924 */
704126ad 925int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 926{
6ba6c3a4 927 int rc;
fe962e90
SS
928 struct q_inval *qi = iommu->qi;
929 struct qi_desc *hw, wait_desc;
930 int wait_index, index;
931 unsigned long flags;
932
933 if (!qi)
704126ad 934 return 0;
fe962e90
SS
935
936 hw = qi->desc;
937
6ba6c3a4
YZ
938restart:
939 rc = 0;
940
f05810c9 941 spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 942 while (qi->free_cnt < 3) {
f05810c9 943 spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 944 cpu_relax();
f05810c9 945 spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
946 }
947
948 index = qi->free_head;
949 wait_index = (index + 1) % QI_LENGTH;
950
951 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
952
953 hw[index] = *desc;
954
704126ad
YZ
955 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
956 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
957 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
958
959 hw[wait_index] = wait_desc;
960
961 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
962 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
963
964 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
965 qi->free_cnt -= 2;
966
fe962e90
SS
967 /*
968 * update the HW tail register indicating the presence of
969 * new descriptors.
970 */
6ba6c3a4 971 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
972
973 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
974 /*
975 * We will leave the interrupts disabled, to prevent interrupt
976 * context to queue another cmd while a cmd is already submitted
977 * and waiting for completion on this cpu. This is to avoid
978 * a deadlock where the interrupt context can wait indefinitely
979 * for free slots in the queue.
980 */
704126ad
YZ
981 rc = qi_check_fault(iommu, index);
982 if (rc)
6ba6c3a4 983 break;
704126ad 984
fe962e90
SS
985 spin_unlock(&qi->q_lock);
986 cpu_relax();
987 spin_lock(&qi->q_lock);
988 }
6ba6c3a4
YZ
989
990 qi->desc_status[index] = QI_DONE;
fe962e90
SS
991
992 reclaim_free_desc(qi);
f05810c9 993 spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 994
6ba6c3a4
YZ
995 if (rc == -EAGAIN)
996 goto restart;
997
704126ad 998 return rc;
fe962e90
SS
999}
1000
1001/*
1002 * Flush the global interrupt entry cache.
1003 */
1004void qi_global_iec(struct intel_iommu *iommu)
1005{
1006 struct qi_desc desc;
1007
1008 desc.low = QI_IEC_TYPE;
1009 desc.high = 0;
1010
704126ad 1011 /* should never fail */
fe962e90
SS
1012 qi_submit_sync(&desc, iommu);
1013}
1014
4c25a2c1
DW
1015void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1016 u64 type)
3481f210 1017{
3481f210
YS
1018 struct qi_desc desc;
1019
3481f210
YS
1020 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1021 | QI_CC_GRAN(type) | QI_CC_TYPE;
1022 desc.high = 0;
1023
4c25a2c1 1024 qi_submit_sync(&desc, iommu);
3481f210
YS
1025}
1026
1f0ef2aa
DW
1027void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1028 unsigned int size_order, u64 type)
3481f210
YS
1029{
1030 u8 dw = 0, dr = 0;
1031
1032 struct qi_desc desc;
1033 int ih = 0;
1034
3481f210
YS
1035 if (cap_write_drain(iommu->cap))
1036 dw = 1;
1037
1038 if (cap_read_drain(iommu->cap))
1039 dr = 1;
1040
1041 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1042 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1043 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1044 | QI_IOTLB_AM(size_order);
1045
1f0ef2aa 1046 qi_submit_sync(&desc, iommu);
3481f210
YS
1047}
1048
6ba6c3a4
YZ
1049void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1050 u64 addr, unsigned mask)
1051{
1052 struct qi_desc desc;
1053
1054 if (mask) {
1055 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1056 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1057 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1058 } else
1059 desc.high = QI_DEV_IOTLB_ADDR(addr);
1060
1061 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1062 qdep = 0;
1063
1064 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1065 QI_DIOTLB_TYPE;
1066
1067 qi_submit_sync(&desc, iommu);
1068}
1069
eba67e5d
SS
1070/*
1071 * Disable Queued Invalidation interface.
1072 */
1073void dmar_disable_qi(struct intel_iommu *iommu)
1074{
1075 unsigned long flags;
1076 u32 sts;
1077 cycles_t start_time = get_cycles();
1078
1079 if (!ecap_qis(iommu->ecap))
1080 return;
1081
1082 spin_lock_irqsave(&iommu->register_lock, flags);
1083
1084 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1085 if (!(sts & DMA_GSTS_QIES))
1086 goto end;
1087
1088 /*
1089 * Give a chance to HW to complete the pending invalidation requests.
1090 */
1091 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1092 readl(iommu->reg + DMAR_IQH_REG)) &&
1093 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1094 cpu_relax();
1095
1096 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
1097 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1098
1099 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1100 !(sts & DMA_GSTS_QIES), sts);
1101end:
1102 spin_unlock_irqrestore(&iommu->register_lock, flags);
1103}
1104
eb4a52bc
FY
1105/*
1106 * Enable queued invalidation.
1107 */
1108static void __dmar_enable_qi(struct intel_iommu *iommu)
1109{
c416daa9 1110 u32 sts;
eb4a52bc
FY
1111 unsigned long flags;
1112 struct q_inval *qi = iommu->qi;
1113
1114 qi->free_head = qi->free_tail = 0;
1115 qi->free_cnt = QI_LENGTH;
1116
1117 spin_lock_irqsave(&iommu->register_lock, flags);
1118
1119 /* write zero to the tail reg */
1120 writel(0, iommu->reg + DMAR_IQT_REG);
1121
1122 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1123
eb4a52bc 1124 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1125 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1126
1127 /* Make sure hardware complete it */
1128 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1129
1130 spin_unlock_irqrestore(&iommu->register_lock, flags);
1131}
1132
fe962e90
SS
1133/*
1134 * Enable Queued Invalidation interface. This is a must to support
1135 * interrupt-remapping. Also used by DMA-remapping, which replaces
1136 * register based IOTLB invalidation.
1137 */
1138int dmar_enable_qi(struct intel_iommu *iommu)
1139{
fe962e90 1140 struct q_inval *qi;
751cafe3 1141 struct page *desc_page;
fe962e90
SS
1142
1143 if (!ecap_qis(iommu->ecap))
1144 return -ENOENT;
1145
1146 /*
1147 * queued invalidation is already setup and enabled.
1148 */
1149 if (iommu->qi)
1150 return 0;
1151
fa4b57cc 1152 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1153 if (!iommu->qi)
1154 return -ENOMEM;
1155
1156 qi = iommu->qi;
1157
751cafe3
SS
1158
1159 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1160 if (!desc_page) {
fe962e90
SS
1161 kfree(qi);
1162 iommu->qi = 0;
1163 return -ENOMEM;
1164 }
1165
751cafe3
SS
1166 qi->desc = page_address(desc_page);
1167
fa4b57cc 1168 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1169 if (!qi->desc_status) {
1170 free_page((unsigned long) qi->desc);
1171 kfree(qi);
1172 iommu->qi = 0;
1173 return -ENOMEM;
1174 }
1175
1176 qi->free_head = qi->free_tail = 0;
1177 qi->free_cnt = QI_LENGTH;
1178
1179 spin_lock_init(&qi->q_lock);
1180
eb4a52bc 1181 __dmar_enable_qi(iommu);
fe962e90
SS
1182
1183 return 0;
1184}
0ac2491f
SS
1185
1186/* iommu interrupt handling. Most stuff are MSI-like. */
1187
9d783ba0
SS
1188enum faulttype {
1189 DMA_REMAP,
1190 INTR_REMAP,
1191 UNKNOWN,
1192};
1193
1194static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1195{
1196 "Software",
1197 "Present bit in root entry is clear",
1198 "Present bit in context entry is clear",
1199 "Invalid context entry",
1200 "Access beyond MGAW",
1201 "PTE Write access is not set",
1202 "PTE Read access is not set",
1203 "Next page table ptr is invalid",
1204 "Root table address invalid",
1205 "Context table ptr is invalid",
1206 "non-zero reserved fields in RTP",
1207 "non-zero reserved fields in CTP",
1208 "non-zero reserved fields in PTE",
1209};
9d783ba0
SS
1210
1211static const char *intr_remap_fault_reasons[] =
1212{
1213 "Detected reserved fields in the decoded interrupt-remapped request",
1214 "Interrupt index exceeded the interrupt-remapping table size",
1215 "Present field in the IRTE entry is clear",
1216 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1217 "Detected reserved fields in the IRTE entry",
1218 "Blocked a compatibility format interrupt request",
1219 "Blocked an interrupt request due to source-id verification failure",
1220};
1221
0ac2491f
SS
1222#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1223
9d783ba0 1224const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1225{
9d783ba0
SS
1226 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1227 ARRAY_SIZE(intr_remap_fault_reasons))) {
1228 *fault_type = INTR_REMAP;
1229 return intr_remap_fault_reasons[fault_reason - 0x20];
1230 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1231 *fault_type = DMA_REMAP;
1232 return dma_remap_fault_reasons[fault_reason];
1233 } else {
1234 *fault_type = UNKNOWN;
0ac2491f 1235 return "Unknown";
9d783ba0 1236 }
0ac2491f
SS
1237}
1238
1239void dmar_msi_unmask(unsigned int irq)
1240{
1241 struct intel_iommu *iommu = get_irq_data(irq);
1242 unsigned long flag;
1243
1244 /* unmask it */
1245 spin_lock_irqsave(&iommu->register_lock, flag);
1246 writel(0, iommu->reg + DMAR_FECTL_REG);
1247 /* Read a reg to force flush the post write */
1248 readl(iommu->reg + DMAR_FECTL_REG);
1249 spin_unlock_irqrestore(&iommu->register_lock, flag);
1250}
1251
1252void dmar_msi_mask(unsigned int irq)
1253{
1254 unsigned long flag;
1255 struct intel_iommu *iommu = get_irq_data(irq);
1256
1257 /* mask it */
1258 spin_lock_irqsave(&iommu->register_lock, flag);
1259 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1260 /* Read a reg to force flush the post write */
1261 readl(iommu->reg + DMAR_FECTL_REG);
1262 spin_unlock_irqrestore(&iommu->register_lock, flag);
1263}
1264
1265void dmar_msi_write(int irq, struct msi_msg *msg)
1266{
1267 struct intel_iommu *iommu = get_irq_data(irq);
1268 unsigned long flag;
1269
1270 spin_lock_irqsave(&iommu->register_lock, flag);
1271 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1272 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1273 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1274 spin_unlock_irqrestore(&iommu->register_lock, flag);
1275}
1276
1277void dmar_msi_read(int irq, struct msi_msg *msg)
1278{
1279 struct intel_iommu *iommu = get_irq_data(irq);
1280 unsigned long flag;
1281
1282 spin_lock_irqsave(&iommu->register_lock, flag);
1283 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1284 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1285 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1286 spin_unlock_irqrestore(&iommu->register_lock, flag);
1287}
1288
1289static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1290 u8 fault_reason, u16 source_id, unsigned long long addr)
1291{
1292 const char *reason;
9d783ba0 1293 int fault_type;
0ac2491f 1294
9d783ba0 1295 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1296
9d783ba0
SS
1297 if (fault_type == INTR_REMAP)
1298 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1299 "fault index %llx\n"
1300 "INTR-REMAP:[fault reason %02d] %s\n",
1301 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1302 PCI_FUNC(source_id & 0xFF), addr >> 48,
1303 fault_reason, reason);
1304 else
1305 printk(KERN_ERR
1306 "DMAR:[%s] Request device [%02x:%02x.%d] "
1307 "fault addr %llx \n"
1308 "DMAR:[fault reason %02d] %s\n",
1309 (type ? "DMA Read" : "DMA Write"),
1310 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1311 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1312 return 0;
1313}
1314
1315#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1316irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1317{
1318 struct intel_iommu *iommu = dev_id;
1319 int reg, fault_index;
1320 u32 fault_status;
1321 unsigned long flag;
1322
1323 spin_lock_irqsave(&iommu->register_lock, flag);
1324 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1325 if (fault_status)
1326 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1327 fault_status);
0ac2491f
SS
1328
1329 /* TBD: ignore advanced fault log currently */
1330 if (!(fault_status & DMA_FSTS_PPF))
9d783ba0 1331 goto clear_rest;
0ac2491f
SS
1332
1333 fault_index = dma_fsts_fault_record_index(fault_status);
1334 reg = cap_fault_reg_offset(iommu->cap);
1335 while (1) {
1336 u8 fault_reason;
1337 u16 source_id;
1338 u64 guest_addr;
1339 int type;
1340 u32 data;
1341
1342 /* highest 32 bits */
1343 data = readl(iommu->reg + reg +
1344 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1345 if (!(data & DMA_FRCD_F))
1346 break;
1347
1348 fault_reason = dma_frcd_fault_reason(data);
1349 type = dma_frcd_type(data);
1350
1351 data = readl(iommu->reg + reg +
1352 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1353 source_id = dma_frcd_source_id(data);
1354
1355 guest_addr = dmar_readq(iommu->reg + reg +
1356 fault_index * PRIMARY_FAULT_REG_LEN);
1357 guest_addr = dma_frcd_page_addr(guest_addr);
1358 /* clear the fault */
1359 writel(DMA_FRCD_F, iommu->reg + reg +
1360 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1361
1362 spin_unlock_irqrestore(&iommu->register_lock, flag);
1363
1364 dmar_fault_do_one(iommu, type, fault_reason,
1365 source_id, guest_addr);
1366
1367 fault_index++;
8211a7b5 1368 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f
SS
1369 fault_index = 0;
1370 spin_lock_irqsave(&iommu->register_lock, flag);
1371 }
9d783ba0
SS
1372clear_rest:
1373 /* clear all the other faults */
0ac2491f 1374 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1375 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
0ac2491f
SS
1376
1377 spin_unlock_irqrestore(&iommu->register_lock, flag);
1378 return IRQ_HANDLED;
1379}
1380
1381int dmar_set_interrupt(struct intel_iommu *iommu)
1382{
1383 int irq, ret;
1384
9d783ba0
SS
1385 /*
1386 * Check if the fault interrupt is already initialized.
1387 */
1388 if (iommu->irq)
1389 return 0;
1390
0ac2491f
SS
1391 irq = create_irq();
1392 if (!irq) {
1393 printk(KERN_ERR "IOMMU: no free vectors\n");
1394 return -EINVAL;
1395 }
1396
1397 set_irq_data(irq, iommu);
1398 iommu->irq = irq;
1399
1400 ret = arch_setup_dmar_msi(irq);
1401 if (ret) {
1402 set_irq_data(irq, NULL);
1403 iommu->irq = 0;
1404 destroy_irq(irq);
dd726435 1405 return ret;
0ac2491f
SS
1406 }
1407
0ac2491f
SS
1408 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1409 if (ret)
1410 printk(KERN_ERR "IOMMU: can't request irq\n");
1411 return ret;
1412}
9d783ba0
SS
1413
1414int __init enable_drhd_fault_handling(void)
1415{
1416 struct dmar_drhd_unit *drhd;
1417
1418 /*
1419 * Enable fault control interrupt.
1420 */
1421 for_each_drhd_unit(drhd) {
1422 int ret;
1423 struct intel_iommu *iommu = drhd->iommu;
1424 ret = dmar_set_interrupt(iommu);
1425
1426 if (ret) {
1427 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1428 " interrupt, ret %d\n",
1429 (unsigned long long)drhd->reg_base_addr, ret);
1430 return -1;
1431 }
1432 }
1433
1434 return 0;
1435}
eb4a52bc
FY
1436
1437/*
1438 * Re-enable Queued Invalidation interface.
1439 */
1440int dmar_reenable_qi(struct intel_iommu *iommu)
1441{
1442 if (!ecap_qis(iommu->ecap))
1443 return -ENOENT;
1444
1445 if (!iommu->qi)
1446 return -ENOENT;
1447
1448 /*
1449 * First disable queued invalidation.
1450 */
1451 dmar_disable_qi(iommu);
1452 /*
1453 * Then enable queued invalidation again. Since there is no pending
1454 * invalidation requests now, it's safe to re-enable queued
1455 * invalidation.
1456 */
1457 __dmar_enable_qi(iommu);
1458
1459 return 0;
1460}
074835f0
YS
1461
1462/*
1463 * Check interrupt remapping support in DMAR table description.
1464 */
0b8973a8 1465int __init dmar_ir_support(void)
074835f0
YS
1466{
1467 struct acpi_table_dmar *dmar;
1468 dmar = (struct acpi_table_dmar *)dmar_tbl;
1469 return dmar->flags & 0x1;
1470}
This page took 0.232561 seconds and 5 git commands to generate.