2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/iommu_table.h>
44 #include "irq_remapping.h"
46 /* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
50 LIST_HEAD(dmar_drhd_units
);
52 struct acpi_table_header
* __initdata dmar_tbl
;
53 static acpi_size dmar_tbl_size
;
55 static void __init
dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
58 * add INCLUDE_ALL at the tail, so scan the list will find it at
61 if (drhd
->include_all
)
62 list_add_tail(&drhd
->list
, &dmar_drhd_units
);
64 list_add(&drhd
->list
, &dmar_drhd_units
);
67 static int __init
dmar_parse_one_dev_scope(struct acpi_dmar_device_scope
*scope
,
68 struct pci_dev
**dev
, u16 segment
)
71 struct pci_dev
*pdev
= NULL
;
72 struct acpi_dmar_pci_path
*path
;
75 bus
= pci_find_bus(segment
, scope
->bus
);
76 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
77 count
= (scope
->length
- sizeof(struct acpi_dmar_device_scope
))
78 / sizeof(struct acpi_dmar_pci_path
);
84 * Some BIOSes list non-exist devices in DMAR table, just
88 pr_warn("Device scope bus [%d] not found\n", scope
->bus
);
91 pdev
= pci_get_slot(bus
, PCI_DEVFN(path
->device
, path
->function
));
93 /* warning will be printed below */
98 bus
= pdev
->subordinate
;
101 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment
, scope
->bus
, path
->device
, path
->function
);
105 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&& \
106 pdev
->subordinate
) || (scope
->entry_type
== \
107 ACPI_DMAR_SCOPE_TYPE_BRIDGE
&& !pdev
->subordinate
)) {
109 pr_warn("Device scope type does not match for %s\n",
117 int __init
dmar_parse_dev_scope(void *start
, void *end
, int *cnt
,
118 struct pci_dev
***devices
, u16 segment
)
120 struct acpi_dmar_device_scope
*scope
;
126 while (start
< end
) {
128 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
129 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
131 else if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_IOAPIC
&&
132 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_HPET
) {
133 pr_warn("Unsupported device scope\n");
135 start
+= scope
->length
;
140 *devices
= kcalloc(*cnt
, sizeof(struct pci_dev
*), GFP_KERNEL
);
146 while (start
< end
) {
148 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
149 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
) {
150 ret
= dmar_parse_one_dev_scope(scope
,
151 &(*devices
)[index
], segment
);
153 dmar_free_dev_scope(devices
, cnt
);
158 start
+= scope
->length
;
164 void dmar_free_dev_scope(struct pci_dev
***devices
, int *cnt
)
166 if (*devices
&& *cnt
) {
168 pci_dev_put((*devices
)[*cnt
]);
176 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
177 * structure which uniquely represent one DMA remapping hardware unit
178 * present in the platform
181 dmar_parse_one_drhd(struct acpi_dmar_header
*header
)
183 struct acpi_dmar_hardware_unit
*drhd
;
184 struct dmar_drhd_unit
*dmaru
;
187 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
188 dmaru
= kzalloc(sizeof(*dmaru
), GFP_KERNEL
);
193 dmaru
->reg_base_addr
= drhd
->address
;
194 dmaru
->segment
= drhd
->segment
;
195 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
197 ret
= alloc_iommu(dmaru
);
202 dmar_register_drhd_unit(dmaru
);
206 static int __init
dmar_parse_dev(struct dmar_drhd_unit
*dmaru
)
208 struct acpi_dmar_hardware_unit
*drhd
;
211 drhd
= (struct acpi_dmar_hardware_unit
*) dmaru
->hdr
;
213 if (dmaru
->include_all
)
216 ret
= dmar_parse_dev_scope((void *)(drhd
+ 1),
217 ((void *)drhd
) + drhd
->header
.length
,
218 &dmaru
->devices_cnt
, &dmaru
->devices
,
221 list_del(&dmaru
->list
);
227 #ifdef CONFIG_ACPI_NUMA
229 dmar_parse_one_rhsa(struct acpi_dmar_header
*header
)
231 struct acpi_dmar_rhsa
*rhsa
;
232 struct dmar_drhd_unit
*drhd
;
234 rhsa
= (struct acpi_dmar_rhsa
*)header
;
235 for_each_drhd_unit(drhd
) {
236 if (drhd
->reg_base_addr
== rhsa
->base_address
) {
237 int node
= acpi_map_pxm_to_node(rhsa
->proximity_domain
);
239 if (!node_online(node
))
241 drhd
->iommu
->node
= node
;
246 1, TAINT_FIRMWARE_WORKAROUND
,
247 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
248 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
250 dmi_get_system_info(DMI_BIOS_VENDOR
),
251 dmi_get_system_info(DMI_BIOS_VERSION
),
252 dmi_get_system_info(DMI_PRODUCT_VERSION
));
259 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
261 struct acpi_dmar_hardware_unit
*drhd
;
262 struct acpi_dmar_reserved_memory
*rmrr
;
263 struct acpi_dmar_atsr
*atsr
;
264 struct acpi_dmar_rhsa
*rhsa
;
266 switch (header
->type
) {
267 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
268 drhd
= container_of(header
, struct acpi_dmar_hardware_unit
,
270 pr_info("DRHD base: %#016Lx flags: %#x\n",
271 (unsigned long long)drhd
->address
, drhd
->flags
);
273 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
274 rmrr
= container_of(header
, struct acpi_dmar_reserved_memory
,
276 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
277 (unsigned long long)rmrr
->base_address
,
278 (unsigned long long)rmrr
->end_address
);
280 case ACPI_DMAR_TYPE_ATSR
:
281 atsr
= container_of(header
, struct acpi_dmar_atsr
, header
);
282 pr_info("ATSR flags: %#x\n", atsr
->flags
);
284 case ACPI_DMAR_HARDWARE_AFFINITY
:
285 rhsa
= container_of(header
, struct acpi_dmar_rhsa
, header
);
286 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
287 (unsigned long long)rhsa
->base_address
,
288 rhsa
->proximity_domain
);
294 * dmar_table_detect - checks to see if the platform supports DMAR devices
296 static int __init
dmar_table_detect(void)
298 acpi_status status
= AE_OK
;
300 /* if we could find DMAR table, then there are DMAR devices */
301 status
= acpi_get_table_with_size(ACPI_SIG_DMAR
, 0,
302 (struct acpi_table_header
**)&dmar_tbl
,
305 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
306 pr_warn("Unable to map DMAR\n");
307 status
= AE_NOT_FOUND
;
310 return (ACPI_SUCCESS(status
) ? 1 : 0);
314 * parse_dmar_table - parses the DMA reporting table
317 parse_dmar_table(void)
319 struct acpi_table_dmar
*dmar
;
320 struct acpi_dmar_header
*entry_header
;
325 * Do it again, earlier dmar_tbl mapping could be mapped with
331 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
332 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
334 dmar_tbl
= tboot_get_dmar_table(dmar_tbl
);
336 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
340 if (dmar
->width
< PAGE_SHIFT
- 1) {
341 pr_warn("Invalid DMAR haw\n");
345 pr_info("Host address width %d\n", dmar
->width
+ 1);
347 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
348 while (((unsigned long)entry_header
) <
349 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
350 /* Avoid looping forever on bad ACPI tables */
351 if (entry_header
->length
== 0) {
352 pr_warn("Invalid 0-length structure\n");
357 dmar_table_print_dmar_entry(entry_header
);
359 switch (entry_header
->type
) {
360 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
362 ret
= dmar_parse_one_drhd(entry_header
);
364 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
365 ret
= dmar_parse_one_rmrr(entry_header
);
367 case ACPI_DMAR_TYPE_ATSR
:
368 ret
= dmar_parse_one_atsr(entry_header
);
370 case ACPI_DMAR_HARDWARE_AFFINITY
:
371 #ifdef CONFIG_ACPI_NUMA
372 ret
= dmar_parse_one_rhsa(entry_header
);
376 pr_warn("Unknown DMAR structure type %d\n",
378 ret
= 0; /* for forward compatibility */
384 entry_header
= ((void *)entry_header
+ entry_header
->length
);
387 pr_warn(FW_BUG
"No DRHD structure found in DMAR table\n");
391 static int dmar_pci_device_match(struct pci_dev
*devices
[], int cnt
,
397 for (index
= 0; index
< cnt
; index
++)
398 if (dev
== devices
[index
])
401 /* Check our parent */
402 dev
= dev
->bus
->self
;
408 struct dmar_drhd_unit
*
409 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
411 struct dmar_drhd_unit
*dmaru
= NULL
;
412 struct acpi_dmar_hardware_unit
*drhd
;
414 dev
= pci_physfn(dev
);
416 for_each_drhd_unit(dmaru
) {
417 drhd
= container_of(dmaru
->hdr
,
418 struct acpi_dmar_hardware_unit
,
421 if (dmaru
->include_all
&&
422 drhd
->segment
== pci_domain_nr(dev
->bus
))
425 if (dmar_pci_device_match(dmaru
->devices
,
426 dmaru
->devices_cnt
, dev
))
433 int __init
dmar_dev_scope_init(void)
435 static int dmar_dev_scope_initialized
;
436 struct dmar_drhd_unit
*drhd
, *drhd_n
;
439 if (dmar_dev_scope_initialized
)
440 return dmar_dev_scope_initialized
;
442 if (list_empty(&dmar_drhd_units
))
445 list_for_each_entry_safe(drhd
, drhd_n
, &dmar_drhd_units
, list
) {
446 ret
= dmar_parse_dev(drhd
);
451 ret
= dmar_parse_rmrr_atsr_dev();
455 dmar_dev_scope_initialized
= 1;
459 dmar_dev_scope_initialized
= ret
;
464 int __init
dmar_table_init(void)
466 static int dmar_table_initialized
;
469 if (dmar_table_initialized
)
472 dmar_table_initialized
= 1;
474 ret
= parse_dmar_table();
477 pr_info("parse DMAR table failure.\n");
481 if (list_empty(&dmar_drhd_units
)) {
482 pr_info("No DMAR devices found\n");
489 static void warn_invalid_dmar(u64 addr
, const char *message
)
492 1, TAINT_FIRMWARE_WORKAROUND
,
493 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
494 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
496 dmi_get_system_info(DMI_BIOS_VENDOR
),
497 dmi_get_system_info(DMI_BIOS_VERSION
),
498 dmi_get_system_info(DMI_PRODUCT_VERSION
));
501 static int __init
check_zero_address(void)
503 struct acpi_table_dmar
*dmar
;
504 struct acpi_dmar_header
*entry_header
;
505 struct acpi_dmar_hardware_unit
*drhd
;
507 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
508 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
510 while (((unsigned long)entry_header
) <
511 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
512 /* Avoid looping forever on bad ACPI tables */
513 if (entry_header
->length
== 0) {
514 pr_warn("Invalid 0-length structure\n");
518 if (entry_header
->type
== ACPI_DMAR_TYPE_HARDWARE_UNIT
) {
522 drhd
= (void *)entry_header
;
523 if (!drhd
->address
) {
524 warn_invalid_dmar(0, "");
528 addr
= early_ioremap(drhd
->address
, VTD_PAGE_SIZE
);
530 printk("IOMMU: can't validate: %llx\n", drhd
->address
);
533 cap
= dmar_readq(addr
+ DMAR_CAP_REG
);
534 ecap
= dmar_readq(addr
+ DMAR_ECAP_REG
);
535 early_iounmap(addr
, VTD_PAGE_SIZE
);
536 if (cap
== (uint64_t)-1 && ecap
== (uint64_t)-1) {
537 warn_invalid_dmar(drhd
->address
,
538 " returns all ones");
543 entry_header
= ((void *)entry_header
+ entry_header
->length
);
551 int __init
detect_intel_iommu(void)
555 ret
= dmar_table_detect();
557 ret
= check_zero_address();
559 struct acpi_table_dmar
*dmar
;
561 dmar
= (struct acpi_table_dmar
*) dmar_tbl
;
563 if (ret
&& irq_remapping_enabled
&& cpu_has_x2apic
&&
565 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
567 if (ret
&& !no_iommu
&& !iommu_detected
&& !dmar_disabled
) {
569 /* Make sure ACS will be enabled */
575 x86_init
.iommu
.iommu_init
= intel_iommu_init
;
578 early_acpi_os_unmap_memory(dmar_tbl
, dmar_tbl_size
);
581 return ret
? 1 : -ENODEV
;
585 static void unmap_iommu(struct intel_iommu
*iommu
)
588 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
592 * map_iommu: map the iommu's registers
593 * @iommu: the iommu to map
594 * @phys_addr: the physical address of the base resgister
596 * Memory map the iommu's registers. Start w/ a single page, and
597 * possibly expand if that turns out to be insufficent.
599 static int map_iommu(struct intel_iommu
*iommu
, u64 phys_addr
)
603 iommu
->reg_phys
= phys_addr
;
604 iommu
->reg_size
= VTD_PAGE_SIZE
;
606 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
, iommu
->name
)) {
607 pr_err("IOMMU: can't reserve memory\n");
612 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
614 pr_err("IOMMU: can't map the region\n");
619 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
620 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
622 if (iommu
->cap
== (uint64_t)-1 && iommu
->ecap
== (uint64_t)-1) {
624 warn_invalid_dmar(phys_addr
, " returns all ones");
628 /* the registers might be more than one page */
629 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
630 cap_max_fault_reg_offset(iommu
->cap
));
631 map_size
= VTD_PAGE_ALIGN(map_size
);
632 if (map_size
> iommu
->reg_size
) {
634 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
635 iommu
->reg_size
= map_size
;
636 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
,
638 pr_err("IOMMU: can't reserve memory\n");
642 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
644 pr_err("IOMMU: can't map the region\n");
655 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
660 int alloc_iommu(struct dmar_drhd_unit
*drhd
)
662 struct intel_iommu
*iommu
;
664 static int iommu_allocated
= 0;
669 if (!drhd
->reg_base_addr
) {
670 warn_invalid_dmar(0, "");
674 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
678 iommu
->seq_id
= iommu_allocated
++;
679 sprintf (iommu
->name
, "dmar%d", iommu
->seq_id
);
681 err
= map_iommu(iommu
, drhd
->reg_base_addr
);
683 pr_err("IOMMU: failed to map %s\n", iommu
->name
);
688 agaw
= iommu_calculate_agaw(iommu
);
690 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
694 msagaw
= iommu_calculate_max_sagaw(iommu
);
696 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
701 iommu
->msagaw
= msagaw
;
705 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
706 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
708 (unsigned long long)drhd
->reg_base_addr
,
709 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
710 (unsigned long long)iommu
->cap
,
711 (unsigned long long)iommu
->ecap
);
713 /* Reflect status in gcmd */
714 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
715 if (sts
& DMA_GSTS_IRES
)
716 iommu
->gcmd
|= DMA_GCMD_IRE
;
717 if (sts
& DMA_GSTS_TES
)
718 iommu
->gcmd
|= DMA_GCMD_TE
;
719 if (sts
& DMA_GSTS_QIES
)
720 iommu
->gcmd
|= DMA_GCMD_QIE
;
722 raw_spin_lock_init(&iommu
->register_lock
);
734 void free_iommu(struct intel_iommu
*iommu
)
739 free_dmar_iommu(iommu
);
748 * Reclaim all the submitted descriptors which have completed its work.
750 static inline void reclaim_free_desc(struct q_inval
*qi
)
752 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
||
753 qi
->desc_status
[qi
->free_tail
] == QI_ABORT
) {
754 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
755 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
760 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
764 struct q_inval
*qi
= iommu
->qi
;
765 int wait_index
= (index
+ 1) % QI_LENGTH
;
767 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
770 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
773 * If IQE happens, the head points to the descriptor associated
774 * with the error. No new descriptors are fetched until the IQE
777 if (fault
& DMA_FSTS_IQE
) {
778 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
779 if ((head
>> DMAR_IQ_SHIFT
) == index
) {
780 pr_err("VT-d detected invalid descriptor: "
781 "low=%llx, high=%llx\n",
782 (unsigned long long)qi
->desc
[index
].low
,
783 (unsigned long long)qi
->desc
[index
].high
);
784 memcpy(&qi
->desc
[index
], &qi
->desc
[wait_index
],
785 sizeof(struct qi_desc
));
786 __iommu_flush_cache(iommu
, &qi
->desc
[index
],
787 sizeof(struct qi_desc
));
788 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
794 * If ITE happens, all pending wait_desc commands are aborted.
795 * No new descriptors are fetched until the ITE is cleared.
797 if (fault
& DMA_FSTS_ITE
) {
798 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
799 head
= ((head
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
801 tail
= readl(iommu
->reg
+ DMAR_IQT_REG
);
802 tail
= ((tail
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
804 writel(DMA_FSTS_ITE
, iommu
->reg
+ DMAR_FSTS_REG
);
807 if (qi
->desc_status
[head
] == QI_IN_USE
)
808 qi
->desc_status
[head
] = QI_ABORT
;
809 head
= (head
- 2 + QI_LENGTH
) % QI_LENGTH
;
810 } while (head
!= tail
);
812 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
816 if (fault
& DMA_FSTS_ICE
)
817 writel(DMA_FSTS_ICE
, iommu
->reg
+ DMAR_FSTS_REG
);
823 * Submit the queued invalidation descriptor to the remapping
824 * hardware unit and wait for its completion.
826 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
829 struct q_inval
*qi
= iommu
->qi
;
830 struct qi_desc
*hw
, wait_desc
;
831 int wait_index
, index
;
842 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
843 while (qi
->free_cnt
< 3) {
844 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
846 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
849 index
= qi
->free_head
;
850 wait_index
= (index
+ 1) % QI_LENGTH
;
852 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
856 wait_desc
.low
= QI_IWD_STATUS_DATA(QI_DONE
) |
857 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
858 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
860 hw
[wait_index
] = wait_desc
;
862 __iommu_flush_cache(iommu
, &hw
[index
], sizeof(struct qi_desc
));
863 __iommu_flush_cache(iommu
, &hw
[wait_index
], sizeof(struct qi_desc
));
865 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
869 * update the HW tail register indicating the presence of
872 writel(qi
->free_head
<< DMAR_IQ_SHIFT
, iommu
->reg
+ DMAR_IQT_REG
);
874 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
876 * We will leave the interrupts disabled, to prevent interrupt
877 * context to queue another cmd while a cmd is already submitted
878 * and waiting for completion on this cpu. This is to avoid
879 * a deadlock where the interrupt context can wait indefinitely
880 * for free slots in the queue.
882 rc
= qi_check_fault(iommu
, index
);
886 raw_spin_unlock(&qi
->q_lock
);
888 raw_spin_lock(&qi
->q_lock
);
891 qi
->desc_status
[index
] = QI_DONE
;
893 reclaim_free_desc(qi
);
894 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
903 * Flush the global interrupt entry cache.
905 void qi_global_iec(struct intel_iommu
*iommu
)
909 desc
.low
= QI_IEC_TYPE
;
912 /* should never fail */
913 qi_submit_sync(&desc
, iommu
);
916 void qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
921 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
922 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
925 qi_submit_sync(&desc
, iommu
);
928 void qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
929 unsigned int size_order
, u64 type
)
936 if (cap_write_drain(iommu
->cap
))
939 if (cap_read_drain(iommu
->cap
))
942 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
943 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
944 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
945 | QI_IOTLB_AM(size_order
);
947 qi_submit_sync(&desc
, iommu
);
950 void qi_flush_dev_iotlb(struct intel_iommu
*iommu
, u16 sid
, u16 qdep
,
951 u64 addr
, unsigned mask
)
956 BUG_ON(addr
& ((1 << (VTD_PAGE_SHIFT
+ mask
)) - 1));
957 addr
|= (1 << (VTD_PAGE_SHIFT
+ mask
- 1)) - 1;
958 desc
.high
= QI_DEV_IOTLB_ADDR(addr
) | QI_DEV_IOTLB_SIZE
;
960 desc
.high
= QI_DEV_IOTLB_ADDR(addr
);
962 if (qdep
>= QI_DEV_IOTLB_MAX_INVS
)
965 desc
.low
= QI_DEV_IOTLB_SID(sid
) | QI_DEV_IOTLB_QDEP(qdep
) |
968 qi_submit_sync(&desc
, iommu
);
972 * Disable Queued Invalidation interface.
974 void dmar_disable_qi(struct intel_iommu
*iommu
)
978 cycles_t start_time
= get_cycles();
980 if (!ecap_qis(iommu
->ecap
))
983 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
985 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
986 if (!(sts
& DMA_GSTS_QIES
))
990 * Give a chance to HW to complete the pending invalidation requests.
992 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
993 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
994 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
997 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
998 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1000 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
1001 !(sts
& DMA_GSTS_QIES
), sts
);
1003 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1007 * Enable queued invalidation.
1009 static void __dmar_enable_qi(struct intel_iommu
*iommu
)
1012 unsigned long flags
;
1013 struct q_inval
*qi
= iommu
->qi
;
1015 qi
->free_head
= qi
->free_tail
= 0;
1016 qi
->free_cnt
= QI_LENGTH
;
1018 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1020 /* write zero to the tail reg */
1021 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
1023 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
1025 iommu
->gcmd
|= DMA_GCMD_QIE
;
1026 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1028 /* Make sure hardware complete it */
1029 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
1031 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1035 * Enable Queued Invalidation interface. This is a must to support
1036 * interrupt-remapping. Also used by DMA-remapping, which replaces
1037 * register based IOTLB invalidation.
1039 int dmar_enable_qi(struct intel_iommu
*iommu
)
1042 struct page
*desc_page
;
1044 if (!ecap_qis(iommu
->ecap
))
1048 * queued invalidation is already setup and enabled.
1053 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
1060 desc_page
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
1067 qi
->desc
= page_address(desc_page
);
1069 qi
->desc_status
= kzalloc(QI_LENGTH
* sizeof(int), GFP_ATOMIC
);
1070 if (!qi
->desc_status
) {
1071 free_page((unsigned long) qi
->desc
);
1077 qi
->free_head
= qi
->free_tail
= 0;
1078 qi
->free_cnt
= QI_LENGTH
;
1080 raw_spin_lock_init(&qi
->q_lock
);
1082 __dmar_enable_qi(iommu
);
1087 /* iommu interrupt handling. Most stuff are MSI-like. */
1095 static const char *dma_remap_fault_reasons
[] =
1098 "Present bit in root entry is clear",
1099 "Present bit in context entry is clear",
1100 "Invalid context entry",
1101 "Access beyond MGAW",
1102 "PTE Write access is not set",
1103 "PTE Read access is not set",
1104 "Next page table ptr is invalid",
1105 "Root table address invalid",
1106 "Context table ptr is invalid",
1107 "non-zero reserved fields in RTP",
1108 "non-zero reserved fields in CTP",
1109 "non-zero reserved fields in PTE",
1110 "PCE for translation request specifies blocking",
1113 static const char *irq_remap_fault_reasons
[] =
1115 "Detected reserved fields in the decoded interrupt-remapped request",
1116 "Interrupt index exceeded the interrupt-remapping table size",
1117 "Present field in the IRTE entry is clear",
1118 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1119 "Detected reserved fields in the IRTE entry",
1120 "Blocked a compatibility format interrupt request",
1121 "Blocked an interrupt request due to source-id verification failure",
1124 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1126 static const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
1128 if (fault_reason
>= 0x20 && (fault_reason
- 0x20 <
1129 ARRAY_SIZE(irq_remap_fault_reasons
))) {
1130 *fault_type
= INTR_REMAP
;
1131 return irq_remap_fault_reasons
[fault_reason
- 0x20];
1132 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
1133 *fault_type
= DMA_REMAP
;
1134 return dma_remap_fault_reasons
[fault_reason
];
1136 *fault_type
= UNKNOWN
;
1141 void dmar_msi_unmask(struct irq_data
*data
)
1143 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1147 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1148 writel(0, iommu
->reg
+ DMAR_FECTL_REG
);
1149 /* Read a reg to force flush the post write */
1150 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1151 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1154 void dmar_msi_mask(struct irq_data
*data
)
1157 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1160 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1161 writel(DMA_FECTL_IM
, iommu
->reg
+ DMAR_FECTL_REG
);
1162 /* Read a reg to force flush the post write */
1163 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1164 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1167 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
1169 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1172 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1173 writel(msg
->data
, iommu
->reg
+ DMAR_FEDATA_REG
);
1174 writel(msg
->address_lo
, iommu
->reg
+ DMAR_FEADDR_REG
);
1175 writel(msg
->address_hi
, iommu
->reg
+ DMAR_FEUADDR_REG
);
1176 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1179 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
1181 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1184 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1185 msg
->data
= readl(iommu
->reg
+ DMAR_FEDATA_REG
);
1186 msg
->address_lo
= readl(iommu
->reg
+ DMAR_FEADDR_REG
);
1187 msg
->address_hi
= readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
1188 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1191 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
1192 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
1197 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
1199 if (fault_type
== INTR_REMAP
)
1200 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1201 "fault index %llx\n"
1202 "INTR-REMAP:[fault reason %02d] %s\n",
1203 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1204 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
1205 fault_reason
, reason
);
1207 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1208 "fault addr %llx \n"
1209 "DMAR:[fault reason %02d] %s\n",
1210 (type
? "DMA Read" : "DMA Write"),
1211 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1212 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
1216 #define PRIMARY_FAULT_REG_LEN (16)
1217 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
1219 struct intel_iommu
*iommu
= dev_id
;
1220 int reg
, fault_index
;
1224 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1225 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1227 pr_err("DRHD: handling fault status reg %x\n", fault_status
);
1229 /* TBD: ignore advanced fault log currently */
1230 if (!(fault_status
& DMA_FSTS_PPF
))
1233 fault_index
= dma_fsts_fault_record_index(fault_status
);
1234 reg
= cap_fault_reg_offset(iommu
->cap
);
1242 /* highest 32 bits */
1243 data
= readl(iommu
->reg
+ reg
+
1244 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1245 if (!(data
& DMA_FRCD_F
))
1248 fault_reason
= dma_frcd_fault_reason(data
);
1249 type
= dma_frcd_type(data
);
1251 data
= readl(iommu
->reg
+ reg
+
1252 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1253 source_id
= dma_frcd_source_id(data
);
1255 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1256 fault_index
* PRIMARY_FAULT_REG_LEN
);
1257 guest_addr
= dma_frcd_page_addr(guest_addr
);
1258 /* clear the fault */
1259 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1260 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1262 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1264 dmar_fault_do_one(iommu
, type
, fault_reason
,
1265 source_id
, guest_addr
);
1268 if (fault_index
>= cap_num_fault_regs(iommu
->cap
))
1270 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1273 writel(DMA_FSTS_PFO
| DMA_FSTS_PPF
, iommu
->reg
+ DMAR_FSTS_REG
);
1276 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1280 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1285 * Check if the fault interrupt is already initialized.
1292 pr_err("IOMMU: no free vectors\n");
1296 irq_set_handler_data(irq
, iommu
);
1299 ret
= arch_setup_dmar_msi(irq
);
1301 irq_set_handler_data(irq
, NULL
);
1307 ret
= request_irq(irq
, dmar_fault
, IRQF_NO_THREAD
, iommu
->name
, iommu
);
1309 pr_err("IOMMU: can't request irq\n");
1313 int __init
enable_drhd_fault_handling(void)
1315 struct dmar_drhd_unit
*drhd
;
1318 * Enable fault control interrupt.
1320 for_each_drhd_unit(drhd
) {
1322 struct intel_iommu
*iommu
= drhd
->iommu
;
1324 ret
= dmar_set_interrupt(iommu
);
1327 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1328 (unsigned long long)drhd
->reg_base_addr
, ret
);
1333 * Clear any previous faults.
1335 dmar_fault(iommu
->irq
, iommu
);
1336 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1337 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1344 * Re-enable Queued Invalidation interface.
1346 int dmar_reenable_qi(struct intel_iommu
*iommu
)
1348 if (!ecap_qis(iommu
->ecap
))
1355 * First disable queued invalidation.
1357 dmar_disable_qi(iommu
);
1359 * Then enable queued invalidation again. Since there is no pending
1360 * invalidation requests now, it's safe to re-enable queued
1363 __dmar_enable_qi(iommu
);
1369 * Check interrupt remapping support in DMAR table description.
1371 int __init
dmar_ir_support(void)
1373 struct acpi_table_dmar
*dmar
;
1374 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
1377 return dmar
->flags
& 0x1;
1379 IOMMU_INIT_POST(detect_intel_iommu
);