2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/iommu_table.h>
44 #include "irq_remapping.h"
46 /* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
50 LIST_HEAD(dmar_drhd_units
);
52 struct acpi_table_header
* __initdata dmar_tbl
;
53 static acpi_size dmar_tbl_size
;
55 static int alloc_iommu(struct dmar_drhd_unit
*drhd
);
57 static void __init
dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
60 * add INCLUDE_ALL at the tail, so scan the list will find it at
63 if (drhd
->include_all
)
64 list_add_tail(&drhd
->list
, &dmar_drhd_units
);
66 list_add(&drhd
->list
, &dmar_drhd_units
);
69 static int __init
dmar_parse_one_dev_scope(struct acpi_dmar_device_scope
*scope
,
70 struct pci_dev
**dev
, u16 segment
)
73 struct pci_dev
*pdev
= NULL
;
74 struct acpi_dmar_pci_path
*path
;
77 bus
= pci_find_bus(segment
, scope
->bus
);
78 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
79 count
= (scope
->length
- sizeof(struct acpi_dmar_device_scope
))
80 / sizeof(struct acpi_dmar_pci_path
);
86 * Some BIOSes list non-exist devices in DMAR table, just
90 pr_warn("Device scope bus [%d] not found\n", scope
->bus
);
93 pdev
= pci_get_slot(bus
, PCI_DEVFN(path
->device
, path
->function
));
95 /* warning will be printed below */
100 bus
= pdev
->subordinate
;
103 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
104 segment
, scope
->bus
, path
->device
, path
->function
);
107 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&& \
108 pdev
->subordinate
) || (scope
->entry_type
== \
109 ACPI_DMAR_SCOPE_TYPE_BRIDGE
&& !pdev
->subordinate
)) {
111 pr_warn("Device scope type does not match for %s\n",
119 int __init
dmar_parse_dev_scope(void *start
, void *end
, int *cnt
,
120 struct pci_dev
***devices
, u16 segment
)
122 struct acpi_dmar_device_scope
*scope
;
128 while (start
< end
) {
130 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
131 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
133 else if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_IOAPIC
&&
134 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_HPET
) {
135 pr_warn("Unsupported device scope\n");
137 start
+= scope
->length
;
142 *devices
= kcalloc(*cnt
, sizeof(struct pci_dev
*), GFP_KERNEL
);
148 while (start
< end
) {
150 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
151 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
) {
152 ret
= dmar_parse_one_dev_scope(scope
,
153 &(*devices
)[index
], segment
);
155 dmar_free_dev_scope(devices
, cnt
);
160 start
+= scope
->length
;
166 void dmar_free_dev_scope(struct pci_dev
***devices
, int *cnt
)
168 if (*devices
&& *cnt
) {
170 pci_dev_put((*devices
)[*cnt
]);
178 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
179 * structure which uniquely represent one DMA remapping hardware unit
180 * present in the platform
183 dmar_parse_one_drhd(struct acpi_dmar_header
*header
)
185 struct acpi_dmar_hardware_unit
*drhd
;
186 struct dmar_drhd_unit
*dmaru
;
189 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
190 dmaru
= kzalloc(sizeof(*dmaru
), GFP_KERNEL
);
195 dmaru
->reg_base_addr
= drhd
->address
;
196 dmaru
->segment
= drhd
->segment
;
197 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
199 ret
= alloc_iommu(dmaru
);
204 dmar_register_drhd_unit(dmaru
);
208 static int __init
dmar_parse_dev(struct dmar_drhd_unit
*dmaru
)
210 struct acpi_dmar_hardware_unit
*drhd
;
213 drhd
= (struct acpi_dmar_hardware_unit
*) dmaru
->hdr
;
215 if (dmaru
->include_all
)
218 ret
= dmar_parse_dev_scope((void *)(drhd
+ 1),
219 ((void *)drhd
) + drhd
->header
.length
,
220 &dmaru
->devices_cnt
, &dmaru
->devices
,
223 list_del(&dmaru
->list
);
229 #ifdef CONFIG_ACPI_NUMA
231 dmar_parse_one_rhsa(struct acpi_dmar_header
*header
)
233 struct acpi_dmar_rhsa
*rhsa
;
234 struct dmar_drhd_unit
*drhd
;
236 rhsa
= (struct acpi_dmar_rhsa
*)header
;
237 for_each_drhd_unit(drhd
) {
238 if (drhd
->reg_base_addr
== rhsa
->base_address
) {
239 int node
= acpi_map_pxm_to_node(rhsa
->proximity_domain
);
241 if (!node_online(node
))
243 drhd
->iommu
->node
= node
;
248 1, TAINT_FIRMWARE_WORKAROUND
,
249 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
250 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
252 dmi_get_system_info(DMI_BIOS_VENDOR
),
253 dmi_get_system_info(DMI_BIOS_VERSION
),
254 dmi_get_system_info(DMI_PRODUCT_VERSION
));
261 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
263 struct acpi_dmar_hardware_unit
*drhd
;
264 struct acpi_dmar_reserved_memory
*rmrr
;
265 struct acpi_dmar_atsr
*atsr
;
266 struct acpi_dmar_rhsa
*rhsa
;
268 switch (header
->type
) {
269 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
270 drhd
= container_of(header
, struct acpi_dmar_hardware_unit
,
272 pr_info("DRHD base: %#016Lx flags: %#x\n",
273 (unsigned long long)drhd
->address
, drhd
->flags
);
275 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
276 rmrr
= container_of(header
, struct acpi_dmar_reserved_memory
,
278 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
279 (unsigned long long)rmrr
->base_address
,
280 (unsigned long long)rmrr
->end_address
);
282 case ACPI_DMAR_TYPE_ATSR
:
283 atsr
= container_of(header
, struct acpi_dmar_atsr
, header
);
284 pr_info("ATSR flags: %#x\n", atsr
->flags
);
286 case ACPI_DMAR_HARDWARE_AFFINITY
:
287 rhsa
= container_of(header
, struct acpi_dmar_rhsa
, header
);
288 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
289 (unsigned long long)rhsa
->base_address
,
290 rhsa
->proximity_domain
);
296 * dmar_table_detect - checks to see if the platform supports DMAR devices
298 static int __init
dmar_table_detect(void)
300 acpi_status status
= AE_OK
;
302 /* if we could find DMAR table, then there are DMAR devices */
303 status
= acpi_get_table_with_size(ACPI_SIG_DMAR
, 0,
304 (struct acpi_table_header
**)&dmar_tbl
,
307 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
308 pr_warn("Unable to map DMAR\n");
309 status
= AE_NOT_FOUND
;
312 return (ACPI_SUCCESS(status
) ? 1 : 0);
316 * parse_dmar_table - parses the DMA reporting table
319 parse_dmar_table(void)
321 struct acpi_table_dmar
*dmar
;
322 struct acpi_dmar_header
*entry_header
;
327 * Do it again, earlier dmar_tbl mapping could be mapped with
333 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
334 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
336 dmar_tbl
= tboot_get_dmar_table(dmar_tbl
);
338 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
342 if (dmar
->width
< PAGE_SHIFT
- 1) {
343 pr_warn("Invalid DMAR haw\n");
347 pr_info("Host address width %d\n", dmar
->width
+ 1);
349 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
350 while (((unsigned long)entry_header
) <
351 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
352 /* Avoid looping forever on bad ACPI tables */
353 if (entry_header
->length
== 0) {
354 pr_warn("Invalid 0-length structure\n");
359 dmar_table_print_dmar_entry(entry_header
);
361 switch (entry_header
->type
) {
362 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
364 ret
= dmar_parse_one_drhd(entry_header
);
366 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
367 ret
= dmar_parse_one_rmrr(entry_header
);
369 case ACPI_DMAR_TYPE_ATSR
:
370 ret
= dmar_parse_one_atsr(entry_header
);
372 case ACPI_DMAR_HARDWARE_AFFINITY
:
373 #ifdef CONFIG_ACPI_NUMA
374 ret
= dmar_parse_one_rhsa(entry_header
);
378 pr_warn("Unknown DMAR structure type %d\n",
380 ret
= 0; /* for forward compatibility */
386 entry_header
= ((void *)entry_header
+ entry_header
->length
);
389 pr_warn(FW_BUG
"No DRHD structure found in DMAR table\n");
393 static int dmar_pci_device_match(struct pci_dev
*devices
[], int cnt
,
399 for (index
= 0; index
< cnt
; index
++)
400 if (dev
== devices
[index
])
403 /* Check our parent */
404 dev
= dev
->bus
->self
;
410 struct dmar_drhd_unit
*
411 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
413 struct dmar_drhd_unit
*dmaru
= NULL
;
414 struct acpi_dmar_hardware_unit
*drhd
;
416 dev
= pci_physfn(dev
);
418 for_each_drhd_unit(dmaru
) {
419 drhd
= container_of(dmaru
->hdr
,
420 struct acpi_dmar_hardware_unit
,
423 if (dmaru
->include_all
&&
424 drhd
->segment
== pci_domain_nr(dev
->bus
))
427 if (dmar_pci_device_match(dmaru
->devices
,
428 dmaru
->devices_cnt
, dev
))
435 int __init
dmar_dev_scope_init(void)
437 static int dmar_dev_scope_initialized
;
438 struct dmar_drhd_unit
*drhd
, *drhd_n
;
441 if (dmar_dev_scope_initialized
)
442 return dmar_dev_scope_initialized
;
444 if (list_empty(&dmar_drhd_units
))
447 list_for_each_entry_safe(drhd
, drhd_n
, &dmar_drhd_units
, list
) {
448 ret
= dmar_parse_dev(drhd
);
453 ret
= dmar_parse_rmrr_atsr_dev();
457 dmar_dev_scope_initialized
= 1;
461 dmar_dev_scope_initialized
= ret
;
466 int __init
dmar_table_init(void)
468 static int dmar_table_initialized
;
471 if (dmar_table_initialized
)
474 dmar_table_initialized
= 1;
476 ret
= parse_dmar_table();
479 pr_info("parse DMAR table failure.\n");
483 if (list_empty(&dmar_drhd_units
)) {
484 pr_info("No DMAR devices found\n");
491 static void warn_invalid_dmar(u64 addr
, const char *message
)
494 1, TAINT_FIRMWARE_WORKAROUND
,
495 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
496 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
498 dmi_get_system_info(DMI_BIOS_VENDOR
),
499 dmi_get_system_info(DMI_BIOS_VERSION
),
500 dmi_get_system_info(DMI_PRODUCT_VERSION
));
503 static int __init
check_zero_address(void)
505 struct acpi_table_dmar
*dmar
;
506 struct acpi_dmar_header
*entry_header
;
507 struct acpi_dmar_hardware_unit
*drhd
;
509 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
510 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
512 while (((unsigned long)entry_header
) <
513 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
514 /* Avoid looping forever on bad ACPI tables */
515 if (entry_header
->length
== 0) {
516 pr_warn("Invalid 0-length structure\n");
520 if (entry_header
->type
== ACPI_DMAR_TYPE_HARDWARE_UNIT
) {
524 drhd
= (void *)entry_header
;
525 if (!drhd
->address
) {
526 warn_invalid_dmar(0, "");
530 addr
= early_ioremap(drhd
->address
, VTD_PAGE_SIZE
);
532 printk("IOMMU: can't validate: %llx\n", drhd
->address
);
535 cap
= dmar_readq(addr
+ DMAR_CAP_REG
);
536 ecap
= dmar_readq(addr
+ DMAR_ECAP_REG
);
537 early_iounmap(addr
, VTD_PAGE_SIZE
);
538 if (cap
== (uint64_t)-1 && ecap
== (uint64_t)-1) {
539 warn_invalid_dmar(drhd
->address
,
540 " returns all ones");
545 entry_header
= ((void *)entry_header
+ entry_header
->length
);
553 int __init
detect_intel_iommu(void)
557 ret
= dmar_table_detect();
559 ret
= check_zero_address();
561 if (ret
&& !no_iommu
&& !iommu_detected
&& !dmar_disabled
) {
563 /* Make sure ACS will be enabled */
569 x86_init
.iommu
.iommu_init
= intel_iommu_init
;
572 early_acpi_os_unmap_memory(dmar_tbl
, dmar_tbl_size
);
575 return ret
? 1 : -ENODEV
;
579 static void unmap_iommu(struct intel_iommu
*iommu
)
582 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
586 * map_iommu: map the iommu's registers
587 * @iommu: the iommu to map
588 * @phys_addr: the physical address of the base resgister
590 * Memory map the iommu's registers. Start w/ a single page, and
591 * possibly expand if that turns out to be insufficent.
593 static int map_iommu(struct intel_iommu
*iommu
, u64 phys_addr
)
597 iommu
->reg_phys
= phys_addr
;
598 iommu
->reg_size
= VTD_PAGE_SIZE
;
600 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
, iommu
->name
)) {
601 pr_err("IOMMU: can't reserve memory\n");
606 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
608 pr_err("IOMMU: can't map the region\n");
613 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
614 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
616 if (iommu
->cap
== (uint64_t)-1 && iommu
->ecap
== (uint64_t)-1) {
618 warn_invalid_dmar(phys_addr
, " returns all ones");
622 /* the registers might be more than one page */
623 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
624 cap_max_fault_reg_offset(iommu
->cap
));
625 map_size
= VTD_PAGE_ALIGN(map_size
);
626 if (map_size
> iommu
->reg_size
) {
628 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
629 iommu
->reg_size
= map_size
;
630 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
,
632 pr_err("IOMMU: can't reserve memory\n");
636 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
638 pr_err("IOMMU: can't map the region\n");
649 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
654 static int alloc_iommu(struct dmar_drhd_unit
*drhd
)
656 struct intel_iommu
*iommu
;
658 static int iommu_allocated
= 0;
663 if (!drhd
->reg_base_addr
) {
664 warn_invalid_dmar(0, "");
668 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
672 iommu
->seq_id
= iommu_allocated
++;
673 sprintf (iommu
->name
, "dmar%d", iommu
->seq_id
);
675 err
= map_iommu(iommu
, drhd
->reg_base_addr
);
677 pr_err("IOMMU: failed to map %s\n", iommu
->name
);
682 agaw
= iommu_calculate_agaw(iommu
);
684 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
688 msagaw
= iommu_calculate_max_sagaw(iommu
);
690 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
695 iommu
->msagaw
= msagaw
;
699 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
700 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
702 (unsigned long long)drhd
->reg_base_addr
,
703 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
704 (unsigned long long)iommu
->cap
,
705 (unsigned long long)iommu
->ecap
);
707 /* Reflect status in gcmd */
708 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
709 if (sts
& DMA_GSTS_IRES
)
710 iommu
->gcmd
|= DMA_GCMD_IRE
;
711 if (sts
& DMA_GSTS_TES
)
712 iommu
->gcmd
|= DMA_GCMD_TE
;
713 if (sts
& DMA_GSTS_QIES
)
714 iommu
->gcmd
|= DMA_GCMD_QIE
;
716 raw_spin_lock_init(&iommu
->register_lock
);
728 void free_iommu(struct intel_iommu
*iommu
)
733 free_dmar_iommu(iommu
);
742 * Reclaim all the submitted descriptors which have completed its work.
744 static inline void reclaim_free_desc(struct q_inval
*qi
)
746 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
||
747 qi
->desc_status
[qi
->free_tail
] == QI_ABORT
) {
748 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
749 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
754 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
758 struct q_inval
*qi
= iommu
->qi
;
759 int wait_index
= (index
+ 1) % QI_LENGTH
;
761 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
764 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
767 * If IQE happens, the head points to the descriptor associated
768 * with the error. No new descriptors are fetched until the IQE
771 if (fault
& DMA_FSTS_IQE
) {
772 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
773 if ((head
>> DMAR_IQ_SHIFT
) == index
) {
774 pr_err("VT-d detected invalid descriptor: "
775 "low=%llx, high=%llx\n",
776 (unsigned long long)qi
->desc
[index
].low
,
777 (unsigned long long)qi
->desc
[index
].high
);
778 memcpy(&qi
->desc
[index
], &qi
->desc
[wait_index
],
779 sizeof(struct qi_desc
));
780 __iommu_flush_cache(iommu
, &qi
->desc
[index
],
781 sizeof(struct qi_desc
));
782 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
788 * If ITE happens, all pending wait_desc commands are aborted.
789 * No new descriptors are fetched until the ITE is cleared.
791 if (fault
& DMA_FSTS_ITE
) {
792 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
793 head
= ((head
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
795 tail
= readl(iommu
->reg
+ DMAR_IQT_REG
);
796 tail
= ((tail
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
798 writel(DMA_FSTS_ITE
, iommu
->reg
+ DMAR_FSTS_REG
);
801 if (qi
->desc_status
[head
] == QI_IN_USE
)
802 qi
->desc_status
[head
] = QI_ABORT
;
803 head
= (head
- 2 + QI_LENGTH
) % QI_LENGTH
;
804 } while (head
!= tail
);
806 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
810 if (fault
& DMA_FSTS_ICE
)
811 writel(DMA_FSTS_ICE
, iommu
->reg
+ DMAR_FSTS_REG
);
817 * Submit the queued invalidation descriptor to the remapping
818 * hardware unit and wait for its completion.
820 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
823 struct q_inval
*qi
= iommu
->qi
;
824 struct qi_desc
*hw
, wait_desc
;
825 int wait_index
, index
;
836 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
837 while (qi
->free_cnt
< 3) {
838 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
840 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
843 index
= qi
->free_head
;
844 wait_index
= (index
+ 1) % QI_LENGTH
;
846 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
850 wait_desc
.low
= QI_IWD_STATUS_DATA(QI_DONE
) |
851 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
852 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
854 hw
[wait_index
] = wait_desc
;
856 __iommu_flush_cache(iommu
, &hw
[index
], sizeof(struct qi_desc
));
857 __iommu_flush_cache(iommu
, &hw
[wait_index
], sizeof(struct qi_desc
));
859 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
863 * update the HW tail register indicating the presence of
866 writel(qi
->free_head
<< DMAR_IQ_SHIFT
, iommu
->reg
+ DMAR_IQT_REG
);
868 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
870 * We will leave the interrupts disabled, to prevent interrupt
871 * context to queue another cmd while a cmd is already submitted
872 * and waiting for completion on this cpu. This is to avoid
873 * a deadlock where the interrupt context can wait indefinitely
874 * for free slots in the queue.
876 rc
= qi_check_fault(iommu
, index
);
880 raw_spin_unlock(&qi
->q_lock
);
882 raw_spin_lock(&qi
->q_lock
);
885 qi
->desc_status
[index
] = QI_DONE
;
887 reclaim_free_desc(qi
);
888 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
897 * Flush the global interrupt entry cache.
899 void qi_global_iec(struct intel_iommu
*iommu
)
903 desc
.low
= QI_IEC_TYPE
;
906 /* should never fail */
907 qi_submit_sync(&desc
, iommu
);
910 void qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
915 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
916 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
919 qi_submit_sync(&desc
, iommu
);
922 void qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
923 unsigned int size_order
, u64 type
)
930 if (cap_write_drain(iommu
->cap
))
933 if (cap_read_drain(iommu
->cap
))
936 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
937 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
938 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
939 | QI_IOTLB_AM(size_order
);
941 qi_submit_sync(&desc
, iommu
);
944 void qi_flush_dev_iotlb(struct intel_iommu
*iommu
, u16 sid
, u16 qdep
,
945 u64 addr
, unsigned mask
)
950 BUG_ON(addr
& ((1 << (VTD_PAGE_SHIFT
+ mask
)) - 1));
951 addr
|= (1 << (VTD_PAGE_SHIFT
+ mask
- 1)) - 1;
952 desc
.high
= QI_DEV_IOTLB_ADDR(addr
) | QI_DEV_IOTLB_SIZE
;
954 desc
.high
= QI_DEV_IOTLB_ADDR(addr
);
956 if (qdep
>= QI_DEV_IOTLB_MAX_INVS
)
959 desc
.low
= QI_DEV_IOTLB_SID(sid
) | QI_DEV_IOTLB_QDEP(qdep
) |
962 qi_submit_sync(&desc
, iommu
);
966 * Disable Queued Invalidation interface.
968 void dmar_disable_qi(struct intel_iommu
*iommu
)
972 cycles_t start_time
= get_cycles();
974 if (!ecap_qis(iommu
->ecap
))
977 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
979 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
980 if (!(sts
& DMA_GSTS_QIES
))
984 * Give a chance to HW to complete the pending invalidation requests.
986 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
987 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
988 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
991 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
992 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
994 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
995 !(sts
& DMA_GSTS_QIES
), sts
);
997 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1001 * Enable queued invalidation.
1003 static void __dmar_enable_qi(struct intel_iommu
*iommu
)
1006 unsigned long flags
;
1007 struct q_inval
*qi
= iommu
->qi
;
1009 qi
->free_head
= qi
->free_tail
= 0;
1010 qi
->free_cnt
= QI_LENGTH
;
1012 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1014 /* write zero to the tail reg */
1015 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
1017 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
1019 iommu
->gcmd
|= DMA_GCMD_QIE
;
1020 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1022 /* Make sure hardware complete it */
1023 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
1025 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1029 * Enable Queued Invalidation interface. This is a must to support
1030 * interrupt-remapping. Also used by DMA-remapping, which replaces
1031 * register based IOTLB invalidation.
1033 int dmar_enable_qi(struct intel_iommu
*iommu
)
1036 struct page
*desc_page
;
1038 if (!ecap_qis(iommu
->ecap
))
1042 * queued invalidation is already setup and enabled.
1047 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
1054 desc_page
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
1061 qi
->desc
= page_address(desc_page
);
1063 qi
->desc_status
= kzalloc(QI_LENGTH
* sizeof(int), GFP_ATOMIC
);
1064 if (!qi
->desc_status
) {
1065 free_page((unsigned long) qi
->desc
);
1071 qi
->free_head
= qi
->free_tail
= 0;
1072 qi
->free_cnt
= QI_LENGTH
;
1074 raw_spin_lock_init(&qi
->q_lock
);
1076 __dmar_enable_qi(iommu
);
1081 /* iommu interrupt handling. Most stuff are MSI-like. */
1089 static const char *dma_remap_fault_reasons
[] =
1092 "Present bit in root entry is clear",
1093 "Present bit in context entry is clear",
1094 "Invalid context entry",
1095 "Access beyond MGAW",
1096 "PTE Write access is not set",
1097 "PTE Read access is not set",
1098 "Next page table ptr is invalid",
1099 "Root table address invalid",
1100 "Context table ptr is invalid",
1101 "non-zero reserved fields in RTP",
1102 "non-zero reserved fields in CTP",
1103 "non-zero reserved fields in PTE",
1104 "PCE for translation request specifies blocking",
1107 static const char *irq_remap_fault_reasons
[] =
1109 "Detected reserved fields in the decoded interrupt-remapped request",
1110 "Interrupt index exceeded the interrupt-remapping table size",
1111 "Present field in the IRTE entry is clear",
1112 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1113 "Detected reserved fields in the IRTE entry",
1114 "Blocked a compatibility format interrupt request",
1115 "Blocked an interrupt request due to source-id verification failure",
1118 static const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
1120 if (fault_reason
>= 0x20 && (fault_reason
- 0x20 <
1121 ARRAY_SIZE(irq_remap_fault_reasons
))) {
1122 *fault_type
= INTR_REMAP
;
1123 return irq_remap_fault_reasons
[fault_reason
- 0x20];
1124 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
1125 *fault_type
= DMA_REMAP
;
1126 return dma_remap_fault_reasons
[fault_reason
];
1128 *fault_type
= UNKNOWN
;
1133 void dmar_msi_unmask(struct irq_data
*data
)
1135 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1139 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1140 writel(0, iommu
->reg
+ DMAR_FECTL_REG
);
1141 /* Read a reg to force flush the post write */
1142 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1143 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1146 void dmar_msi_mask(struct irq_data
*data
)
1149 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1152 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1153 writel(DMA_FECTL_IM
, iommu
->reg
+ DMAR_FECTL_REG
);
1154 /* Read a reg to force flush the post write */
1155 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1156 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1159 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
1161 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1164 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1165 writel(msg
->data
, iommu
->reg
+ DMAR_FEDATA_REG
);
1166 writel(msg
->address_lo
, iommu
->reg
+ DMAR_FEADDR_REG
);
1167 writel(msg
->address_hi
, iommu
->reg
+ DMAR_FEUADDR_REG
);
1168 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1171 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
1173 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1176 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1177 msg
->data
= readl(iommu
->reg
+ DMAR_FEDATA_REG
);
1178 msg
->address_lo
= readl(iommu
->reg
+ DMAR_FEADDR_REG
);
1179 msg
->address_hi
= readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
1180 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1183 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
1184 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
1189 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
1191 if (fault_type
== INTR_REMAP
)
1192 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1193 "fault index %llx\n"
1194 "INTR-REMAP:[fault reason %02d] %s\n",
1195 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1196 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
1197 fault_reason
, reason
);
1199 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1200 "fault addr %llx \n"
1201 "DMAR:[fault reason %02d] %s\n",
1202 (type
? "DMA Read" : "DMA Write"),
1203 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1204 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
1208 #define PRIMARY_FAULT_REG_LEN (16)
1209 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
1211 struct intel_iommu
*iommu
= dev_id
;
1212 int reg
, fault_index
;
1216 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1217 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1219 pr_err("DRHD: handling fault status reg %x\n", fault_status
);
1221 /* TBD: ignore advanced fault log currently */
1222 if (!(fault_status
& DMA_FSTS_PPF
))
1225 fault_index
= dma_fsts_fault_record_index(fault_status
);
1226 reg
= cap_fault_reg_offset(iommu
->cap
);
1234 /* highest 32 bits */
1235 data
= readl(iommu
->reg
+ reg
+
1236 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1237 if (!(data
& DMA_FRCD_F
))
1240 fault_reason
= dma_frcd_fault_reason(data
);
1241 type
= dma_frcd_type(data
);
1243 data
= readl(iommu
->reg
+ reg
+
1244 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1245 source_id
= dma_frcd_source_id(data
);
1247 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1248 fault_index
* PRIMARY_FAULT_REG_LEN
);
1249 guest_addr
= dma_frcd_page_addr(guest_addr
);
1250 /* clear the fault */
1251 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1252 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1254 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1256 dmar_fault_do_one(iommu
, type
, fault_reason
,
1257 source_id
, guest_addr
);
1260 if (fault_index
>= cap_num_fault_regs(iommu
->cap
))
1262 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1265 writel(DMA_FSTS_PFO
| DMA_FSTS_PPF
, iommu
->reg
+ DMAR_FSTS_REG
);
1268 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1272 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1277 * Check if the fault interrupt is already initialized.
1284 pr_err("IOMMU: no free vectors\n");
1288 irq_set_handler_data(irq
, iommu
);
1291 ret
= arch_setup_dmar_msi(irq
);
1293 irq_set_handler_data(irq
, NULL
);
1299 ret
= request_irq(irq
, dmar_fault
, IRQF_NO_THREAD
, iommu
->name
, iommu
);
1301 pr_err("IOMMU: can't request irq\n");
1305 int __init
enable_drhd_fault_handling(void)
1307 struct dmar_drhd_unit
*drhd
;
1308 struct intel_iommu
*iommu
;
1311 * Enable fault control interrupt.
1313 for_each_iommu(iommu
, drhd
) {
1315 int ret
= dmar_set_interrupt(iommu
);
1318 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1319 (unsigned long long)drhd
->reg_base_addr
, ret
);
1324 * Clear any previous faults.
1326 dmar_fault(iommu
->irq
, iommu
);
1327 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1328 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1335 * Re-enable Queued Invalidation interface.
1337 int dmar_reenable_qi(struct intel_iommu
*iommu
)
1339 if (!ecap_qis(iommu
->ecap
))
1346 * First disable queued invalidation.
1348 dmar_disable_qi(iommu
);
1350 * Then enable queued invalidation again. Since there is no pending
1351 * invalidation requests now, it's safe to re-enable queued
1354 __dmar_enable_qi(iommu
);
1360 * Check interrupt remapping support in DMAR table description.
1362 int __init
dmar_ir_support(void)
1364 struct acpi_table_dmar
*dmar
;
1365 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
1368 return dmar
->flags
& 0x1;
1371 IOMMU_INIT_POST(detect_intel_iommu
);