2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <linux/memblock.h>
45 #include <linux/dma-contiguous.h>
46 #include <linux/crash_dump.h>
47 #include <asm/irq_remapping.h>
48 #include <asm/cacheflush.h>
49 #include <asm/iommu.h>
51 #include "irq_remapping.h"
53 #define ROOT_SIZE VTD_PAGE_SIZE
54 #define CONTEXT_SIZE VTD_PAGE_SIZE
56 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
57 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
58 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
59 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
61 #define IOAPIC_RANGE_START (0xfee00000)
62 #define IOAPIC_RANGE_END (0xfeefffff)
63 #define IOVA_START_ADDR (0x1000)
65 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
67 #define MAX_AGAW_WIDTH 64
68 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
70 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
71 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
73 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
74 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
75 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
76 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
77 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
79 /* IO virtual address start page frame number */
80 #define IOVA_START_PFN (1)
82 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
83 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
84 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
86 /* page table handling */
87 #define LEVEL_STRIDE (9)
88 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
91 * This bitmap is used to advertise the page sizes our hardware support
92 * to the IOMMU core, which will then use this information to split
93 * physically contiguous memory regions it is mapping into page sizes
96 * Traditionally the IOMMU core just handed us the mappings directly,
97 * after making sure the size is an order of a 4KiB page and that the
98 * mapping has natural alignment.
100 * To retain this behavior, we currently advertise that we support
101 * all page sizes that are an order of 4KiB.
103 * If at some point we'd like to utilize the IOMMU core's new behavior,
104 * we could change this to advertise the real page sizes we support.
106 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
108 static inline int agaw_to_level(int agaw
)
113 static inline int agaw_to_width(int agaw
)
115 return min_t(int, 30 + agaw
* LEVEL_STRIDE
, MAX_AGAW_WIDTH
);
118 static inline int width_to_agaw(int width
)
120 return DIV_ROUND_UP(width
- 30, LEVEL_STRIDE
);
123 static inline unsigned int level_to_offset_bits(int level
)
125 return (level
- 1) * LEVEL_STRIDE
;
128 static inline int pfn_level_offset(unsigned long pfn
, int level
)
130 return (pfn
>> level_to_offset_bits(level
)) & LEVEL_MASK
;
133 static inline unsigned long level_mask(int level
)
135 return -1UL << level_to_offset_bits(level
);
138 static inline unsigned long level_size(int level
)
140 return 1UL << level_to_offset_bits(level
);
143 static inline unsigned long align_to_level(unsigned long pfn
, int level
)
145 return (pfn
+ level_size(level
) - 1) & level_mask(level
);
148 static inline unsigned long lvl_to_nr_pages(unsigned int lvl
)
150 return 1 << min_t(int, (lvl
- 1) * LEVEL_STRIDE
, MAX_AGAW_PFN_WIDTH
);
153 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154 are never going to work. */
155 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn
)
157 return dma_pfn
>> (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
160 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn
)
162 return mm_pfn
<< (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
164 static inline unsigned long page_to_dma_pfn(struct page
*pg
)
166 return mm_to_dma_pfn(page_to_pfn(pg
));
168 static inline unsigned long virt_to_dma_pfn(void *p
)
170 return page_to_dma_pfn(virt_to_page(p
));
173 /* global iommu list, set NULL for ignored DMAR units */
174 static struct intel_iommu
**g_iommus
;
176 static void __init
check_tylersburg_isoch(void);
177 static int rwbf_quirk
;
180 * set to 1 to panic kernel if can't successfully enable VT-d
181 * (used when kernel is launched w/ TXT)
183 static int force_on
= 0;
188 * 12-63: Context Ptr (12 - (haw-1))
195 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
198 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
201 static phys_addr_t
root_entry_lctp(struct root_entry
*re
)
206 return re
->lo
& VTD_PAGE_MASK
;
210 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
213 static phys_addr_t
root_entry_uctp(struct root_entry
*re
)
218 return re
->hi
& VTD_PAGE_MASK
;
223 * 1: fault processing disable
224 * 2-3: translation type
225 * 12-63: address space root
231 struct context_entry
{
236 static inline void context_clear_pasid_enable(struct context_entry
*context
)
238 context
->lo
&= ~(1ULL << 11);
241 static inline bool context_pasid_enabled(struct context_entry
*context
)
243 return !!(context
->lo
& (1ULL << 11));
246 static inline void context_set_copied(struct context_entry
*context
)
248 context
->hi
|= (1ull << 3);
251 static inline bool context_copied(struct context_entry
*context
)
253 return !!(context
->hi
& (1ULL << 3));
256 static inline bool __context_present(struct context_entry
*context
)
258 return (context
->lo
& 1);
261 static inline bool context_present(struct context_entry
*context
)
263 return context_pasid_enabled(context
) ?
264 __context_present(context
) :
265 __context_present(context
) && !context_copied(context
);
268 static inline void context_set_present(struct context_entry
*context
)
273 static inline void context_set_fault_enable(struct context_entry
*context
)
275 context
->lo
&= (((u64
)-1) << 2) | 1;
278 static inline void context_set_translation_type(struct context_entry
*context
,
281 context
->lo
&= (((u64
)-1) << 4) | 3;
282 context
->lo
|= (value
& 3) << 2;
285 static inline void context_set_address_root(struct context_entry
*context
,
288 context
->lo
&= ~VTD_PAGE_MASK
;
289 context
->lo
|= value
& VTD_PAGE_MASK
;
292 static inline void context_set_address_width(struct context_entry
*context
,
295 context
->hi
|= value
& 7;
298 static inline void context_set_domain_id(struct context_entry
*context
,
301 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
304 static inline int context_domain_id(struct context_entry
*c
)
306 return((c
->hi
>> 8) & 0xffff);
309 static inline void context_clear_entry(struct context_entry
*context
)
322 * 12-63: Host physcial address
328 static inline void dma_clear_pte(struct dma_pte
*pte
)
333 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
336 return pte
->val
& VTD_PAGE_MASK
;
338 /* Must have a full atomic 64-bit read */
339 return __cmpxchg64(&pte
->val
, 0ULL, 0ULL) & VTD_PAGE_MASK
;
343 static inline bool dma_pte_present(struct dma_pte
*pte
)
345 return (pte
->val
& 3) != 0;
348 static inline bool dma_pte_superpage(struct dma_pte
*pte
)
350 return (pte
->val
& DMA_PTE_LARGE_PAGE
);
353 static inline int first_pte_in_page(struct dma_pte
*pte
)
355 return !((unsigned long)pte
& ~VTD_PAGE_MASK
);
359 * This domain is a statically identity mapping domain.
360 * 1. This domain creats a static 1:1 mapping to all usable memory.
361 * 2. It maps to each iommu if successful.
362 * 3. Each iommu mapps to this domain if successful.
364 static struct dmar_domain
*si_domain
;
365 static int hw_pass_through
= 1;
367 /* domain represents a virtual machine, more than one devices
368 * across iommus may be owned in one domain, e.g. kvm guest.
370 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
372 /* si_domain contains mulitple devices */
373 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
376 int id
; /* domain id */
377 int nid
; /* node id */
378 DECLARE_BITMAP(iommu_bmp
, DMAR_UNITS_SUPPORTED
);
379 /* bitmap of iommus this domain uses*/
381 struct list_head devices
; /* all devices' list */
382 struct iova_domain iovad
; /* iova's that belong to this domain */
384 struct dma_pte
*pgd
; /* virtual address */
385 int gaw
; /* max guest address width */
387 /* adjusted guest address width, 0 is level 2 30-bit */
390 int flags
; /* flags to find out type of domain */
392 int iommu_coherency
;/* indicate coherency of iommu access */
393 int iommu_snooping
; /* indicate snooping control feature*/
394 int iommu_count
; /* reference count of iommu */
395 int iommu_superpage
;/* Level of superpages supported:
396 0 == 4KiB (no superpages), 1 == 2MiB,
397 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
398 spinlock_t iommu_lock
; /* protect iommu set in domain */
399 u64 max_addr
; /* maximum mapped address */
401 struct iommu_domain domain
; /* generic domain data structure for
405 /* PCI domain-device relationship */
406 struct device_domain_info
{
407 struct list_head link
; /* link to domain siblings */
408 struct list_head global
; /* link to global list */
409 u8 bus
; /* PCI bus number */
410 u8 devfn
; /* PCI devfn number */
411 struct device
*dev
; /* it's NULL for PCIe-to-PCI bridge */
412 struct intel_iommu
*iommu
; /* IOMMU used by this device */
413 struct dmar_domain
*domain
; /* pointer to domain */
416 struct dmar_rmrr_unit
{
417 struct list_head list
; /* list of rmrr units */
418 struct acpi_dmar_header
*hdr
; /* ACPI header */
419 u64 base_address
; /* reserved base address*/
420 u64 end_address
; /* reserved end address */
421 struct dmar_dev_scope
*devices
; /* target devices */
422 int devices_cnt
; /* target device count */
425 struct dmar_atsr_unit
{
426 struct list_head list
; /* list of ATSR units */
427 struct acpi_dmar_header
*hdr
; /* ACPI header */
428 struct dmar_dev_scope
*devices
; /* target devices */
429 int devices_cnt
; /* target device count */
430 u8 include_all
:1; /* include all ports */
433 static LIST_HEAD(dmar_atsr_units
);
434 static LIST_HEAD(dmar_rmrr_units
);
436 #define for_each_rmrr_units(rmrr) \
437 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
439 static void flush_unmaps_timeout(unsigned long data
);
441 static DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
443 #define HIGH_WATER_MARK 250
444 struct deferred_flush_tables
{
446 struct iova
*iova
[HIGH_WATER_MARK
];
447 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
448 struct page
*freelist
[HIGH_WATER_MARK
];
451 static struct deferred_flush_tables
*deferred_flush
;
453 /* bitmap for indexing intel_iommus */
454 static int g_num_of_iommus
;
456 static DEFINE_SPINLOCK(async_umap_flush_lock
);
457 static LIST_HEAD(unmaps_to_do
);
460 static long list_size
;
462 static void domain_exit(struct dmar_domain
*domain
);
463 static void domain_remove_dev_info(struct dmar_domain
*domain
);
464 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
466 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
468 static int domain_detach_iommu(struct dmar_domain
*domain
,
469 struct intel_iommu
*iommu
);
471 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
472 int dmar_disabled
= 0;
474 int dmar_disabled
= 1;
475 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
477 int intel_iommu_enabled
= 0;
478 EXPORT_SYMBOL_GPL(intel_iommu_enabled
);
480 static int dmar_map_gfx
= 1;
481 static int dmar_forcedac
;
482 static int intel_iommu_strict
;
483 static int intel_iommu_superpage
= 1;
484 static int intel_iommu_ecs
= 1;
486 /* We only actually use ECS when PASID support (on the new bit 40)
487 * is also advertised. Some early implementations — the ones with
488 * PASID support on bit 28 — have issues even when we *only* use
489 * extended root/context tables. */
490 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
491 ecap_pasid(iommu->ecap))
493 int intel_iommu_gfx_mapped
;
494 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped
);
496 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
497 static DEFINE_SPINLOCK(device_domain_lock
);
498 static LIST_HEAD(device_domain_list
);
500 static const struct iommu_ops intel_iommu_ops
;
502 static bool translation_pre_enabled(struct intel_iommu
*iommu
)
504 return (iommu
->flags
& VTD_FLAG_TRANS_PRE_ENABLED
);
507 static void clear_translation_pre_enabled(struct intel_iommu
*iommu
)
509 iommu
->flags
&= ~VTD_FLAG_TRANS_PRE_ENABLED
;
512 static void init_translation_status(struct intel_iommu
*iommu
)
516 gsts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
517 if (gsts
& DMA_GSTS_TES
)
518 iommu
->flags
|= VTD_FLAG_TRANS_PRE_ENABLED
;
521 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
522 static struct dmar_domain
*to_dmar_domain(struct iommu_domain
*dom
)
524 return container_of(dom
, struct dmar_domain
, domain
);
527 static int __init
intel_iommu_setup(char *str
)
532 if (!strncmp(str
, "on", 2)) {
534 pr_info("IOMMU enabled\n");
535 } else if (!strncmp(str
, "off", 3)) {
537 pr_info("IOMMU disabled\n");
538 } else if (!strncmp(str
, "igfx_off", 8)) {
540 pr_info("Disable GFX device mapping\n");
541 } else if (!strncmp(str
, "forcedac", 8)) {
542 pr_info("Forcing DAC for PCI devices\n");
544 } else if (!strncmp(str
, "strict", 6)) {
545 pr_info("Disable batched IOTLB flush\n");
546 intel_iommu_strict
= 1;
547 } else if (!strncmp(str
, "sp_off", 6)) {
548 pr_info("Disable supported super page\n");
549 intel_iommu_superpage
= 0;
550 } else if (!strncmp(str
, "ecs_off", 7)) {
552 "Intel-IOMMU: disable extended context table support\n");
556 str
+= strcspn(str
, ",");
562 __setup("intel_iommu=", intel_iommu_setup
);
564 static struct kmem_cache
*iommu_domain_cache
;
565 static struct kmem_cache
*iommu_devinfo_cache
;
567 static inline void *alloc_pgtable_page(int node
)
572 page
= alloc_pages_node(node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
574 vaddr
= page_address(page
);
578 static inline void free_pgtable_page(void *vaddr
)
580 free_page((unsigned long)vaddr
);
583 static inline void *alloc_domain_mem(void)
585 return kmem_cache_alloc(iommu_domain_cache
, GFP_ATOMIC
);
588 static void free_domain_mem(void *vaddr
)
590 kmem_cache_free(iommu_domain_cache
, vaddr
);
593 static inline void * alloc_devinfo_mem(void)
595 return kmem_cache_alloc(iommu_devinfo_cache
, GFP_ATOMIC
);
598 static inline void free_devinfo_mem(void *vaddr
)
600 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
603 static inline int domain_type_is_vm(struct dmar_domain
*domain
)
605 return domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
;
608 static inline int domain_type_is_vm_or_si(struct dmar_domain
*domain
)
610 return domain
->flags
& (DOMAIN_FLAG_VIRTUAL_MACHINE
|
611 DOMAIN_FLAG_STATIC_IDENTITY
);
614 static inline int domain_pfn_supported(struct dmar_domain
*domain
,
617 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
619 return !(addr_width
< BITS_PER_LONG
&& pfn
>> addr_width
);
622 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
627 sagaw
= cap_sagaw(iommu
->cap
);
628 for (agaw
= width_to_agaw(max_gaw
);
630 if (test_bit(agaw
, &sagaw
))
638 * Calculate max SAGAW for each iommu.
640 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
642 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
646 * calculate agaw for each iommu.
647 * "SAGAW" may be different across iommus, use a default agaw, and
648 * get a supported less agaw for iommus that don't support the default agaw.
650 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
652 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
655 /* This functionin only returns single iommu in a domain */
656 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
660 /* si_domain and vm domain should not get here. */
661 BUG_ON(domain_type_is_vm_or_si(domain
));
662 iommu_id
= find_first_bit(domain
->iommu_bmp
, g_num_of_iommus
);
663 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
666 return g_iommus
[iommu_id
];
669 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
671 struct dmar_drhd_unit
*drhd
;
672 struct intel_iommu
*iommu
;
676 domain
->iommu_coherency
= 1;
678 for_each_set_bit(i
, domain
->iommu_bmp
, g_num_of_iommus
) {
680 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
681 domain
->iommu_coherency
= 0;
688 /* No hardware attached; use lowest common denominator */
690 for_each_active_iommu(iommu
, drhd
) {
691 if (!ecap_coherent(iommu
->ecap
)) {
692 domain
->iommu_coherency
= 0;
699 static int domain_update_iommu_snooping(struct intel_iommu
*skip
)
701 struct dmar_drhd_unit
*drhd
;
702 struct intel_iommu
*iommu
;
706 for_each_active_iommu(iommu
, drhd
) {
708 if (!ecap_sc_support(iommu
->ecap
)) {
719 static int domain_update_iommu_superpage(struct intel_iommu
*skip
)
721 struct dmar_drhd_unit
*drhd
;
722 struct intel_iommu
*iommu
;
725 if (!intel_iommu_superpage
) {
729 /* set iommu_superpage to the smallest common denominator */
731 for_each_active_iommu(iommu
, drhd
) {
733 mask
&= cap_super_page_val(iommu
->cap
);
743 /* Some capabilities may be different across iommus */
744 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
746 domain_update_iommu_coherency(domain
);
747 domain
->iommu_snooping
= domain_update_iommu_snooping(NULL
);
748 domain
->iommu_superpage
= domain_update_iommu_superpage(NULL
);
751 static inline struct context_entry
*iommu_context_addr(struct intel_iommu
*iommu
,
752 u8 bus
, u8 devfn
, int alloc
)
754 struct root_entry
*root
= &iommu
->root_entry
[bus
];
755 struct context_entry
*context
;
758 if (ecs_enabled(iommu
)) {
767 context
= phys_to_virt(*entry
& VTD_PAGE_MASK
);
769 unsigned long phy_addr
;
773 context
= alloc_pgtable_page(iommu
->node
);
777 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
778 phy_addr
= virt_to_phys((void *)context
);
779 *entry
= phy_addr
| 1;
780 __iommu_flush_cache(iommu
, entry
, sizeof(*entry
));
782 return &context
[devfn
];
785 static int iommu_dummy(struct device
*dev
)
787 return dev
->archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
;
790 static struct intel_iommu
*device_to_iommu(struct device
*dev
, u8
*bus
, u8
*devfn
)
792 struct dmar_drhd_unit
*drhd
= NULL
;
793 struct intel_iommu
*iommu
;
795 struct pci_dev
*ptmp
, *pdev
= NULL
;
799 if (iommu_dummy(dev
))
802 if (dev_is_pci(dev
)) {
803 pdev
= to_pci_dev(dev
);
804 segment
= pci_domain_nr(pdev
->bus
);
805 } else if (has_acpi_companion(dev
))
806 dev
= &ACPI_COMPANION(dev
)->dev
;
809 for_each_active_iommu(iommu
, drhd
) {
810 if (pdev
&& segment
!= drhd
->segment
)
813 for_each_active_dev_scope(drhd
->devices
,
814 drhd
->devices_cnt
, i
, tmp
) {
816 *bus
= drhd
->devices
[i
].bus
;
817 *devfn
= drhd
->devices
[i
].devfn
;
821 if (!pdev
|| !dev_is_pci(tmp
))
824 ptmp
= to_pci_dev(tmp
);
825 if (ptmp
->subordinate
&&
826 ptmp
->subordinate
->number
<= pdev
->bus
->number
&&
827 ptmp
->subordinate
->busn_res
.end
>= pdev
->bus
->number
)
831 if (pdev
&& drhd
->include_all
) {
833 *bus
= pdev
->bus
->number
;
834 *devfn
= pdev
->devfn
;
845 static void domain_flush_cache(struct dmar_domain
*domain
,
846 void *addr
, int size
)
848 if (!domain
->iommu_coherency
)
849 clflush_cache_range(addr
, size
);
852 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
854 struct context_entry
*context
;
858 spin_lock_irqsave(&iommu
->lock
, flags
);
859 context
= iommu_context_addr(iommu
, bus
, devfn
, 0);
861 ret
= context_present(context
);
862 spin_unlock_irqrestore(&iommu
->lock
, flags
);
866 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
868 struct context_entry
*context
;
871 spin_lock_irqsave(&iommu
->lock
, flags
);
872 context
= iommu_context_addr(iommu
, bus
, devfn
, 0);
874 context_clear_entry(context
);
875 __iommu_flush_cache(iommu
, context
, sizeof(*context
));
877 spin_unlock_irqrestore(&iommu
->lock
, flags
);
880 static void free_context_table(struct intel_iommu
*iommu
)
884 struct context_entry
*context
;
886 spin_lock_irqsave(&iommu
->lock
, flags
);
887 if (!iommu
->root_entry
) {
890 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
891 context
= iommu_context_addr(iommu
, i
, 0, 0);
893 free_pgtable_page(context
);
895 if (!ecs_enabled(iommu
))
898 context
= iommu_context_addr(iommu
, i
, 0x80, 0);
900 free_pgtable_page(context
);
903 free_pgtable_page(iommu
->root_entry
);
904 iommu
->root_entry
= NULL
;
906 spin_unlock_irqrestore(&iommu
->lock
, flags
);
909 static struct dma_pte
*pfn_to_dma_pte(struct dmar_domain
*domain
,
910 unsigned long pfn
, int *target_level
)
912 struct dma_pte
*parent
, *pte
= NULL
;
913 int level
= agaw_to_level(domain
->agaw
);
916 BUG_ON(!domain
->pgd
);
918 if (!domain_pfn_supported(domain
, pfn
))
919 /* Address beyond IOMMU's addressing capabilities. */
922 parent
= domain
->pgd
;
927 offset
= pfn_level_offset(pfn
, level
);
928 pte
= &parent
[offset
];
929 if (!*target_level
&& (dma_pte_superpage(pte
) || !dma_pte_present(pte
)))
931 if (level
== *target_level
)
934 if (!dma_pte_present(pte
)) {
937 tmp_page
= alloc_pgtable_page(domain
->nid
);
942 domain_flush_cache(domain
, tmp_page
, VTD_PAGE_SIZE
);
943 pteval
= ((uint64_t)virt_to_dma_pfn(tmp_page
) << VTD_PAGE_SHIFT
) | DMA_PTE_READ
| DMA_PTE_WRITE
;
944 if (cmpxchg64(&pte
->val
, 0ULL, pteval
))
945 /* Someone else set it while we were thinking; use theirs. */
946 free_pgtable_page(tmp_page
);
948 domain_flush_cache(domain
, pte
, sizeof(*pte
));
953 parent
= phys_to_virt(dma_pte_addr(pte
));
958 *target_level
= level
;
964 /* return address's pte at specific level */
965 static struct dma_pte
*dma_pfn_level_pte(struct dmar_domain
*domain
,
967 int level
, int *large_page
)
969 struct dma_pte
*parent
, *pte
= NULL
;
970 int total
= agaw_to_level(domain
->agaw
);
973 parent
= domain
->pgd
;
974 while (level
<= total
) {
975 offset
= pfn_level_offset(pfn
, total
);
976 pte
= &parent
[offset
];
980 if (!dma_pte_present(pte
)) {
985 if (dma_pte_superpage(pte
)) {
990 parent
= phys_to_virt(dma_pte_addr(pte
));
996 /* clear last level pte, a tlb flush should be followed */
997 static void dma_pte_clear_range(struct dmar_domain
*domain
,
998 unsigned long start_pfn
,
999 unsigned long last_pfn
)
1001 unsigned int large_page
= 1;
1002 struct dma_pte
*first_pte
, *pte
;
1004 BUG_ON(!domain_pfn_supported(domain
, start_pfn
));
1005 BUG_ON(!domain_pfn_supported(domain
, last_pfn
));
1006 BUG_ON(start_pfn
> last_pfn
);
1008 /* we don't need lock here; nobody else touches the iova range */
1011 first_pte
= pte
= dma_pfn_level_pte(domain
, start_pfn
, 1, &large_page
);
1013 start_pfn
= align_to_level(start_pfn
+ 1, large_page
+ 1);
1018 start_pfn
+= lvl_to_nr_pages(large_page
);
1020 } while (start_pfn
<= last_pfn
&& !first_pte_in_page(pte
));
1022 domain_flush_cache(domain
, first_pte
,
1023 (void *)pte
- (void *)first_pte
);
1025 } while (start_pfn
&& start_pfn
<= last_pfn
);
1028 static void dma_pte_free_level(struct dmar_domain
*domain
, int level
,
1029 struct dma_pte
*pte
, unsigned long pfn
,
1030 unsigned long start_pfn
, unsigned long last_pfn
)
1032 pfn
= max(start_pfn
, pfn
);
1033 pte
= &pte
[pfn_level_offset(pfn
, level
)];
1036 unsigned long level_pfn
;
1037 struct dma_pte
*level_pte
;
1039 if (!dma_pte_present(pte
) || dma_pte_superpage(pte
))
1042 level_pfn
= pfn
& level_mask(level
- 1);
1043 level_pte
= phys_to_virt(dma_pte_addr(pte
));
1046 dma_pte_free_level(domain
, level
- 1, level_pte
,
1047 level_pfn
, start_pfn
, last_pfn
);
1049 /* If range covers entire pagetable, free it */
1050 if (!(start_pfn
> level_pfn
||
1051 last_pfn
< level_pfn
+ level_size(level
) - 1)) {
1053 domain_flush_cache(domain
, pte
, sizeof(*pte
));
1054 free_pgtable_page(level_pte
);
1057 pfn
+= level_size(level
);
1058 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
1061 /* free page table pages. last level pte should already be cleared */
1062 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
1063 unsigned long start_pfn
,
1064 unsigned long last_pfn
)
1066 BUG_ON(!domain_pfn_supported(domain
, start_pfn
));
1067 BUG_ON(!domain_pfn_supported(domain
, last_pfn
));
1068 BUG_ON(start_pfn
> last_pfn
);
1070 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
1072 /* We don't need lock here; nobody else touches the iova range */
1073 dma_pte_free_level(domain
, agaw_to_level(domain
->agaw
),
1074 domain
->pgd
, 0, start_pfn
, last_pfn
);
1077 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
1078 free_pgtable_page(domain
->pgd
);
1083 /* When a page at a given level is being unlinked from its parent, we don't
1084 need to *modify* it at all. All we need to do is make a list of all the
1085 pages which can be freed just as soon as we've flushed the IOTLB and we
1086 know the hardware page-walk will no longer touch them.
1087 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1089 static struct page
*dma_pte_list_pagetables(struct dmar_domain
*domain
,
1090 int level
, struct dma_pte
*pte
,
1091 struct page
*freelist
)
1095 pg
= pfn_to_page(dma_pte_addr(pte
) >> PAGE_SHIFT
);
1096 pg
->freelist
= freelist
;
1102 pte
= page_address(pg
);
1104 if (dma_pte_present(pte
) && !dma_pte_superpage(pte
))
1105 freelist
= dma_pte_list_pagetables(domain
, level
- 1,
1108 } while (!first_pte_in_page(pte
));
1113 static struct page
*dma_pte_clear_level(struct dmar_domain
*domain
, int level
,
1114 struct dma_pte
*pte
, unsigned long pfn
,
1115 unsigned long start_pfn
,
1116 unsigned long last_pfn
,
1117 struct page
*freelist
)
1119 struct dma_pte
*first_pte
= NULL
, *last_pte
= NULL
;
1121 pfn
= max(start_pfn
, pfn
);
1122 pte
= &pte
[pfn_level_offset(pfn
, level
)];
1125 unsigned long level_pfn
;
1127 if (!dma_pte_present(pte
))
1130 level_pfn
= pfn
& level_mask(level
);
1132 /* If range covers entire pagetable, free it */
1133 if (start_pfn
<= level_pfn
&&
1134 last_pfn
>= level_pfn
+ level_size(level
) - 1) {
1135 /* These suborbinate page tables are going away entirely. Don't
1136 bother to clear them; we're just going to *free* them. */
1137 if (level
> 1 && !dma_pte_superpage(pte
))
1138 freelist
= dma_pte_list_pagetables(domain
, level
- 1, pte
, freelist
);
1144 } else if (level
> 1) {
1145 /* Recurse down into a level that isn't *entirely* obsolete */
1146 freelist
= dma_pte_clear_level(domain
, level
- 1,
1147 phys_to_virt(dma_pte_addr(pte
)),
1148 level_pfn
, start_pfn
, last_pfn
,
1152 pfn
+= level_size(level
);
1153 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
1156 domain_flush_cache(domain
, first_pte
,
1157 (void *)++last_pte
- (void *)first_pte
);
1162 /* We can't just free the pages because the IOMMU may still be walking
1163 the page tables, and may have cached the intermediate levels. The
1164 pages can only be freed after the IOTLB flush has been done. */
1165 struct page
*domain_unmap(struct dmar_domain
*domain
,
1166 unsigned long start_pfn
,
1167 unsigned long last_pfn
)
1169 struct page
*freelist
= NULL
;
1171 BUG_ON(!domain_pfn_supported(domain
, start_pfn
));
1172 BUG_ON(!domain_pfn_supported(domain
, last_pfn
));
1173 BUG_ON(start_pfn
> last_pfn
);
1175 /* we don't need lock here; nobody else touches the iova range */
1176 freelist
= dma_pte_clear_level(domain
, agaw_to_level(domain
->agaw
),
1177 domain
->pgd
, 0, start_pfn
, last_pfn
, NULL
);
1180 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
1181 struct page
*pgd_page
= virt_to_page(domain
->pgd
);
1182 pgd_page
->freelist
= freelist
;
1183 freelist
= pgd_page
;
1191 void dma_free_pagelist(struct page
*freelist
)
1195 while ((pg
= freelist
)) {
1196 freelist
= pg
->freelist
;
1197 free_pgtable_page(page_address(pg
));
1201 /* iommu handling */
1202 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
1204 struct root_entry
*root
;
1205 unsigned long flags
;
1207 root
= (struct root_entry
*)alloc_pgtable_page(iommu
->node
);
1209 pr_err("Allocating root entry for %s failed\n",
1214 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
1216 spin_lock_irqsave(&iommu
->lock
, flags
);
1217 iommu
->root_entry
= root
;
1218 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1223 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
1229 addr
= virt_to_phys(iommu
->root_entry
);
1230 if (ecs_enabled(iommu
))
1231 addr
|= DMA_RTADDR_RTT
;
1233 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1234 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, addr
);
1236 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
1238 /* Make sure hardware complete it */
1239 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1240 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
1242 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1245 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
1250 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
1253 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1254 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
1256 /* Make sure hardware complete it */
1257 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1258 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
1260 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1263 /* return value determine if we need a write buffer flush */
1264 static void __iommu_flush_context(struct intel_iommu
*iommu
,
1265 u16 did
, u16 source_id
, u8 function_mask
,
1272 case DMA_CCMD_GLOBAL_INVL
:
1273 val
= DMA_CCMD_GLOBAL_INVL
;
1275 case DMA_CCMD_DOMAIN_INVL
:
1276 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
1278 case DMA_CCMD_DEVICE_INVL
:
1279 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
1280 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
1285 val
|= DMA_CCMD_ICC
;
1287 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1288 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
1290 /* Make sure hardware complete it */
1291 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
1292 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
1294 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1297 /* return value determine if we need a write buffer flush */
1298 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
1299 u64 addr
, unsigned int size_order
, u64 type
)
1301 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
1302 u64 val
= 0, val_iva
= 0;
1306 case DMA_TLB_GLOBAL_FLUSH
:
1307 /* global flush doesn't need set IVA_REG */
1308 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
1310 case DMA_TLB_DSI_FLUSH
:
1311 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1313 case DMA_TLB_PSI_FLUSH
:
1314 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1315 /* IH bit is passed in as part of address */
1316 val_iva
= size_order
| addr
;
1321 /* Note: set drain read/write */
1324 * This is probably to be super secure.. Looks like we can
1325 * ignore it without any impact.
1327 if (cap_read_drain(iommu
->cap
))
1328 val
|= DMA_TLB_READ_DRAIN
;
1330 if (cap_write_drain(iommu
->cap
))
1331 val
|= DMA_TLB_WRITE_DRAIN
;
1333 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1334 /* Note: Only uses first TLB reg currently */
1336 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
1337 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
1339 /* Make sure hardware complete it */
1340 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
1341 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
1343 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1345 /* check IOTLB invalidation granularity */
1346 if (DMA_TLB_IAIG(val
) == 0)
1347 pr_err("Flush IOTLB failed\n");
1348 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
1349 pr_debug("TLB flush request %Lx, actual %Lx\n",
1350 (unsigned long long)DMA_TLB_IIRG(type
),
1351 (unsigned long long)DMA_TLB_IAIG(val
));
1354 static struct device_domain_info
*
1355 iommu_support_dev_iotlb (struct dmar_domain
*domain
, struct intel_iommu
*iommu
,
1359 unsigned long flags
;
1360 struct device_domain_info
*info
;
1361 struct pci_dev
*pdev
;
1363 if (!ecap_dev_iotlb_support(iommu
->ecap
))
1369 spin_lock_irqsave(&device_domain_lock
, flags
);
1370 list_for_each_entry(info
, &domain
->devices
, link
)
1371 if (info
->iommu
== iommu
&& info
->bus
== bus
&&
1372 info
->devfn
== devfn
) {
1376 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1378 if (!found
|| !info
->dev
|| !dev_is_pci(info
->dev
))
1381 pdev
= to_pci_dev(info
->dev
);
1383 if (!pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_ATS
))
1386 if (!dmar_find_matched_atsr_unit(pdev
))
1392 static void iommu_enable_dev_iotlb(struct device_domain_info
*info
)
1394 if (!info
|| !dev_is_pci(info
->dev
))
1397 pci_enable_ats(to_pci_dev(info
->dev
), VTD_PAGE_SHIFT
);
1400 static void iommu_disable_dev_iotlb(struct device_domain_info
*info
)
1402 if (!info
->dev
|| !dev_is_pci(info
->dev
) ||
1403 !pci_ats_enabled(to_pci_dev(info
->dev
)))
1406 pci_disable_ats(to_pci_dev(info
->dev
));
1409 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1410 u64 addr
, unsigned mask
)
1413 unsigned long flags
;
1414 struct device_domain_info
*info
;
1416 spin_lock_irqsave(&device_domain_lock
, flags
);
1417 list_for_each_entry(info
, &domain
->devices
, link
) {
1418 struct pci_dev
*pdev
;
1419 if (!info
->dev
|| !dev_is_pci(info
->dev
))
1422 pdev
= to_pci_dev(info
->dev
);
1423 if (!pci_ats_enabled(pdev
))
1426 sid
= info
->bus
<< 8 | info
->devfn
;
1427 qdep
= pci_ats_queue_depth(pdev
);
1428 qi_flush_dev_iotlb(info
->iommu
, sid
, qdep
, addr
, mask
);
1430 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1433 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
1434 unsigned long pfn
, unsigned int pages
, int ih
, int map
)
1436 unsigned int mask
= ilog2(__roundup_pow_of_two(pages
));
1437 uint64_t addr
= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
1444 * Fallback to domain selective flush if no PSI support or the size is
1446 * PSI requires page size to be 2 ^ x, and the base address is naturally
1447 * aligned to the size
1449 if (!cap_pgsel_inv(iommu
->cap
) || mask
> cap_max_amask_val(iommu
->cap
))
1450 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1453 iommu
->flush
.flush_iotlb(iommu
, did
, addr
| ih
, mask
,
1457 * In caching mode, changes of pages from non-present to present require
1458 * flush. However, device IOTLB doesn't need to be flushed in this case.
1460 if (!cap_caching_mode(iommu
->cap
) || !map
)
1461 iommu_flush_dev_iotlb(iommu
->domains
[did
], addr
, mask
);
1464 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1467 unsigned long flags
;
1469 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1470 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1471 pmen
&= ~DMA_PMEN_EPM
;
1472 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1474 /* wait for the protected region status bit to clear */
1475 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1476 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1478 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1481 static void iommu_enable_translation(struct intel_iommu
*iommu
)
1484 unsigned long flags
;
1486 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1487 iommu
->gcmd
|= DMA_GCMD_TE
;
1488 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1490 /* Make sure hardware complete it */
1491 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1492 readl
, (sts
& DMA_GSTS_TES
), sts
);
1494 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1497 static void iommu_disable_translation(struct intel_iommu
*iommu
)
1502 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1503 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1504 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1506 /* Make sure hardware complete it */
1507 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1508 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1510 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1514 static int iommu_init_domains(struct intel_iommu
*iommu
)
1516 unsigned long ndomains
;
1517 unsigned long nlongs
;
1519 ndomains
= cap_ndoms(iommu
->cap
);
1520 pr_debug("%s: Number of Domains supported <%ld>\n",
1521 iommu
->name
, ndomains
);
1522 nlongs
= BITS_TO_LONGS(ndomains
);
1524 spin_lock_init(&iommu
->lock
);
1526 /* TBD: there might be 64K domains,
1527 * consider other allocation for future chip
1529 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1530 if (!iommu
->domain_ids
) {
1531 pr_err("%s: Allocating domain id array failed\n",
1535 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1537 if (!iommu
->domains
) {
1538 pr_err("%s: Allocating domain array failed\n",
1540 kfree(iommu
->domain_ids
);
1541 iommu
->domain_ids
= NULL
;
1546 * if Caching mode is set, then invalid translations are tagged
1547 * with domainid 0. Hence we need to pre-allocate it.
1549 if (cap_caching_mode(iommu
->cap
))
1550 set_bit(0, iommu
->domain_ids
);
1554 static void disable_dmar_iommu(struct intel_iommu
*iommu
)
1556 struct dmar_domain
*domain
;
1559 if ((iommu
->domains
) && (iommu
->domain_ids
)) {
1560 for_each_set_bit(i
, iommu
->domain_ids
, cap_ndoms(iommu
->cap
)) {
1562 * Domain id 0 is reserved for invalid translation
1563 * if hardware supports caching mode.
1565 if (cap_caching_mode(iommu
->cap
) && i
== 0)
1568 domain
= iommu
->domains
[i
];
1569 clear_bit(i
, iommu
->domain_ids
);
1570 if (domain_detach_iommu(domain
, iommu
) == 0 &&
1571 !domain_type_is_vm(domain
))
1572 domain_exit(domain
);
1576 if (iommu
->gcmd
& DMA_GCMD_TE
)
1577 iommu_disable_translation(iommu
);
1580 static void free_dmar_iommu(struct intel_iommu
*iommu
)
1582 if ((iommu
->domains
) && (iommu
->domain_ids
)) {
1583 kfree(iommu
->domains
);
1584 kfree(iommu
->domain_ids
);
1585 iommu
->domains
= NULL
;
1586 iommu
->domain_ids
= NULL
;
1589 g_iommus
[iommu
->seq_id
] = NULL
;
1591 /* free context mapping */
1592 free_context_table(iommu
);
1595 static struct dmar_domain
*alloc_domain(int flags
)
1597 /* domain id for virtual machine, it won't be set in context */
1598 static atomic_t vm_domid
= ATOMIC_INIT(0);
1599 struct dmar_domain
*domain
;
1601 domain
= alloc_domain_mem();
1605 memset(domain
, 0, sizeof(*domain
));
1607 domain
->flags
= flags
;
1608 spin_lock_init(&domain
->iommu_lock
);
1609 INIT_LIST_HEAD(&domain
->devices
);
1610 if (flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1611 domain
->id
= atomic_inc_return(&vm_domid
);
1616 static int __iommu_attach_domain(struct dmar_domain
*domain
,
1617 struct intel_iommu
*iommu
)
1620 unsigned long ndomains
;
1622 ndomains
= cap_ndoms(iommu
->cap
);
1623 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1624 if (num
< ndomains
) {
1625 set_bit(num
, iommu
->domain_ids
);
1626 iommu
->domains
[num
] = domain
;
1634 static int iommu_attach_domain(struct dmar_domain
*domain
,
1635 struct intel_iommu
*iommu
)
1638 unsigned long flags
;
1640 spin_lock_irqsave(&iommu
->lock
, flags
);
1641 num
= __iommu_attach_domain(domain
, iommu
);
1642 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1644 pr_err("%s: No free domain ids\n", iommu
->name
);
1649 static int iommu_attach_vm_domain(struct dmar_domain
*domain
,
1650 struct intel_iommu
*iommu
)
1653 unsigned long ndomains
;
1655 ndomains
= cap_ndoms(iommu
->cap
);
1656 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
)
1657 if (iommu
->domains
[num
] == domain
)
1660 return __iommu_attach_domain(domain
, iommu
);
1663 static void iommu_detach_domain(struct dmar_domain
*domain
,
1664 struct intel_iommu
*iommu
)
1666 unsigned long flags
;
1669 spin_lock_irqsave(&iommu
->lock
, flags
);
1670 if (domain_type_is_vm_or_si(domain
)) {
1671 ndomains
= cap_ndoms(iommu
->cap
);
1672 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
1673 if (iommu
->domains
[num
] == domain
) {
1674 clear_bit(num
, iommu
->domain_ids
);
1675 iommu
->domains
[num
] = NULL
;
1680 clear_bit(domain
->id
, iommu
->domain_ids
);
1681 iommu
->domains
[domain
->id
] = NULL
;
1683 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1686 static void domain_attach_iommu(struct dmar_domain
*domain
,
1687 struct intel_iommu
*iommu
)
1689 unsigned long flags
;
1691 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1692 if (!test_and_set_bit(iommu
->seq_id
, domain
->iommu_bmp
)) {
1693 domain
->iommu_count
++;
1694 if (domain
->iommu_count
== 1)
1695 domain
->nid
= iommu
->node
;
1696 domain_update_iommu_cap(domain
);
1698 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1701 static int domain_detach_iommu(struct dmar_domain
*domain
,
1702 struct intel_iommu
*iommu
)
1704 unsigned long flags
;
1705 int count
= INT_MAX
;
1707 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1708 if (test_and_clear_bit(iommu
->seq_id
, domain
->iommu_bmp
)) {
1709 count
= --domain
->iommu_count
;
1710 domain_update_iommu_cap(domain
);
1712 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1717 static struct iova_domain reserved_iova_list
;
1718 static struct lock_class_key reserved_rbtree_key
;
1720 static int dmar_init_reserved_ranges(void)
1722 struct pci_dev
*pdev
= NULL
;
1726 init_iova_domain(&reserved_iova_list
, VTD_PAGE_SIZE
, IOVA_START_PFN
,
1729 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1730 &reserved_rbtree_key
);
1732 /* IOAPIC ranges shouldn't be accessed by DMA */
1733 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1734 IOVA_PFN(IOAPIC_RANGE_END
));
1736 pr_err("Reserve IOAPIC range failed\n");
1740 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1741 for_each_pci_dev(pdev
) {
1744 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1745 r
= &pdev
->resource
[i
];
1746 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1748 iova
= reserve_iova(&reserved_iova_list
,
1752 pr_err("Reserve iova failed\n");
1760 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1762 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1765 static inline int guestwidth_to_adjustwidth(int gaw
)
1768 int r
= (gaw
- 12) % 9;
1779 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1781 struct intel_iommu
*iommu
;
1782 int adjust_width
, agaw
;
1783 unsigned long sagaw
;
1785 init_iova_domain(&domain
->iovad
, VTD_PAGE_SIZE
, IOVA_START_PFN
,
1787 domain_reserve_special_ranges(domain
);
1789 /* calculate AGAW */
1790 iommu
= domain_get_iommu(domain
);
1791 if (guest_width
> cap_mgaw(iommu
->cap
))
1792 guest_width
= cap_mgaw(iommu
->cap
);
1793 domain
->gaw
= guest_width
;
1794 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1795 agaw
= width_to_agaw(adjust_width
);
1796 sagaw
= cap_sagaw(iommu
->cap
);
1797 if (!test_bit(agaw
, &sagaw
)) {
1798 /* hardware doesn't support it, choose a bigger one */
1799 pr_debug("Hardware doesn't support agaw %d\n", agaw
);
1800 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1804 domain
->agaw
= agaw
;
1806 if (ecap_coherent(iommu
->ecap
))
1807 domain
->iommu_coherency
= 1;
1809 domain
->iommu_coherency
= 0;
1811 if (ecap_sc_support(iommu
->ecap
))
1812 domain
->iommu_snooping
= 1;
1814 domain
->iommu_snooping
= 0;
1816 if (intel_iommu_superpage
)
1817 domain
->iommu_superpage
= fls(cap_super_page_val(iommu
->cap
));
1819 domain
->iommu_superpage
= 0;
1821 domain
->nid
= iommu
->node
;
1823 /* always allocate the top pgd */
1824 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
1827 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1831 static void domain_exit(struct dmar_domain
*domain
)
1833 struct dmar_drhd_unit
*drhd
;
1834 struct intel_iommu
*iommu
;
1835 struct page
*freelist
= NULL
;
1837 /* Domain 0 is reserved, so dont process it */
1841 /* Flush any lazy unmaps that may reference this domain */
1842 if (!intel_iommu_strict
)
1843 flush_unmaps_timeout(0);
1845 /* remove associated devices */
1846 domain_remove_dev_info(domain
);
1849 put_iova_domain(&domain
->iovad
);
1851 freelist
= domain_unmap(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1853 /* clear attached or cached domains */
1855 for_each_active_iommu(iommu
, drhd
)
1856 if (domain_type_is_vm(domain
) ||
1857 test_bit(iommu
->seq_id
, domain
->iommu_bmp
))
1858 iommu_detach_domain(domain
, iommu
);
1861 dma_free_pagelist(freelist
);
1863 free_domain_mem(domain
);
1866 static int domain_context_mapping_one(struct dmar_domain
*domain
,
1867 struct intel_iommu
*iommu
,
1868 u8 bus
, u8 devfn
, int translation
)
1870 struct context_entry
*context
;
1871 unsigned long flags
;
1872 struct dma_pte
*pgd
;
1875 struct device_domain_info
*info
= NULL
;
1877 pr_debug("Set context mapping for %02x:%02x.%d\n",
1878 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1880 BUG_ON(!domain
->pgd
);
1881 BUG_ON(translation
!= CONTEXT_TT_PASS_THROUGH
&&
1882 translation
!= CONTEXT_TT_MULTI_LEVEL
);
1884 spin_lock_irqsave(&iommu
->lock
, flags
);
1885 context
= iommu_context_addr(iommu
, bus
, devfn
, 1);
1886 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1889 spin_lock_irqsave(&iommu
->lock
, flags
);
1890 if (context_present(context
)) {
1891 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1895 context_clear_entry(context
);
1900 if (domain_type_is_vm_or_si(domain
)) {
1901 if (domain_type_is_vm(domain
)) {
1902 id
= iommu_attach_vm_domain(domain
, iommu
);
1904 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1905 pr_err("%s: No free domain ids\n", iommu
->name
);
1910 /* Skip top levels of page tables for
1911 * iommu which has less agaw than default.
1912 * Unnecessary for PT mode.
1914 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1915 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1916 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1917 if (!dma_pte_present(pgd
)) {
1918 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1925 context_set_domain_id(context
, id
);
1927 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1928 info
= iommu_support_dev_iotlb(domain
, iommu
, bus
, devfn
);
1929 translation
= info
? CONTEXT_TT_DEV_IOTLB
:
1930 CONTEXT_TT_MULTI_LEVEL
;
1933 * In pass through mode, AW must be programmed to indicate the largest
1934 * AGAW value supported by hardware. And ASR is ignored by hardware.
1936 if (unlikely(translation
== CONTEXT_TT_PASS_THROUGH
))
1937 context_set_address_width(context
, iommu
->msagaw
);
1939 context_set_address_root(context
, virt_to_phys(pgd
));
1940 context_set_address_width(context
, iommu
->agaw
);
1943 context_set_translation_type(context
, translation
);
1944 context_set_fault_enable(context
);
1945 context_set_present(context
);
1946 domain_flush_cache(domain
, context
, sizeof(*context
));
1949 * It's a non-present to present mapping. If hardware doesn't cache
1950 * non-present entry we only need to flush the write-buffer. If the
1951 * _does_ cache non-present entries, then it does so in the special
1952 * domain #0, which we have to flush:
1954 if (cap_caching_mode(iommu
->cap
)) {
1955 iommu
->flush
.flush_context(iommu
, 0,
1956 (((u16
)bus
) << 8) | devfn
,
1957 DMA_CCMD_MASK_NOBIT
,
1958 DMA_CCMD_DEVICE_INVL
);
1959 iommu
->flush
.flush_iotlb(iommu
, id
, 0, 0, DMA_TLB_DSI_FLUSH
);
1961 iommu_flush_write_buffer(iommu
);
1963 iommu_enable_dev_iotlb(info
);
1964 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1966 domain_attach_iommu(domain
, iommu
);
1971 struct domain_context_mapping_data
{
1972 struct dmar_domain
*domain
;
1973 struct intel_iommu
*iommu
;
1977 static int domain_context_mapping_cb(struct pci_dev
*pdev
,
1978 u16 alias
, void *opaque
)
1980 struct domain_context_mapping_data
*data
= opaque
;
1982 return domain_context_mapping_one(data
->domain
, data
->iommu
,
1983 PCI_BUS_NUM(alias
), alias
& 0xff,
1988 domain_context_mapping(struct dmar_domain
*domain
, struct device
*dev
,
1991 struct intel_iommu
*iommu
;
1993 struct domain_context_mapping_data data
;
1995 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
1999 if (!dev_is_pci(dev
))
2000 return domain_context_mapping_one(domain
, iommu
, bus
, devfn
,
2003 data
.domain
= domain
;
2005 data
.translation
= translation
;
2007 return pci_for_each_dma_alias(to_pci_dev(dev
),
2008 &domain_context_mapping_cb
, &data
);
2011 static int domain_context_mapped_cb(struct pci_dev
*pdev
,
2012 u16 alias
, void *opaque
)
2014 struct intel_iommu
*iommu
= opaque
;
2016 return !device_context_mapped(iommu
, PCI_BUS_NUM(alias
), alias
& 0xff);
2019 static int domain_context_mapped(struct device
*dev
)
2021 struct intel_iommu
*iommu
;
2024 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2028 if (!dev_is_pci(dev
))
2029 return device_context_mapped(iommu
, bus
, devfn
);
2031 return !pci_for_each_dma_alias(to_pci_dev(dev
),
2032 domain_context_mapped_cb
, iommu
);
2035 /* Returns a number of VTD pages, but aligned to MM page size */
2036 static inline unsigned long aligned_nrpages(unsigned long host_addr
,
2039 host_addr
&= ~PAGE_MASK
;
2040 return PAGE_ALIGN(host_addr
+ size
) >> VTD_PAGE_SHIFT
;
2043 /* Return largest possible superpage level for a given mapping */
2044 static inline int hardware_largepage_caps(struct dmar_domain
*domain
,
2045 unsigned long iov_pfn
,
2046 unsigned long phy_pfn
,
2047 unsigned long pages
)
2049 int support
, level
= 1;
2050 unsigned long pfnmerge
;
2052 support
= domain
->iommu_superpage
;
2054 /* To use a large page, the virtual *and* physical addresses
2055 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2056 of them will mean we have to use smaller pages. So just
2057 merge them and check both at once. */
2058 pfnmerge
= iov_pfn
| phy_pfn
;
2060 while (support
&& !(pfnmerge
& ~VTD_STRIDE_MASK
)) {
2061 pages
>>= VTD_STRIDE_SHIFT
;
2064 pfnmerge
>>= VTD_STRIDE_SHIFT
;
2071 static int __domain_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
2072 struct scatterlist
*sg
, unsigned long phys_pfn
,
2073 unsigned long nr_pages
, int prot
)
2075 struct dma_pte
*first_pte
= NULL
, *pte
= NULL
;
2076 phys_addr_t
uninitialized_var(pteval
);
2077 unsigned long sg_res
= 0;
2078 unsigned int largepage_lvl
= 0;
2079 unsigned long lvl_pages
= 0;
2081 BUG_ON(!domain_pfn_supported(domain
, iov_pfn
+ nr_pages
- 1));
2083 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
2086 prot
&= DMA_PTE_READ
| DMA_PTE_WRITE
| DMA_PTE_SNP
;
2090 pteval
= ((phys_addr_t
)phys_pfn
<< VTD_PAGE_SHIFT
) | prot
;
2093 while (nr_pages
> 0) {
2097 sg_res
= aligned_nrpages(sg
->offset
, sg
->length
);
2098 sg
->dma_address
= ((dma_addr_t
)iov_pfn
<< VTD_PAGE_SHIFT
) + sg
->offset
;
2099 sg
->dma_length
= sg
->length
;
2100 pteval
= page_to_phys(sg_page(sg
)) | prot
;
2101 phys_pfn
= pteval
>> VTD_PAGE_SHIFT
;
2105 largepage_lvl
= hardware_largepage_caps(domain
, iov_pfn
, phys_pfn
, sg_res
);
2107 first_pte
= pte
= pfn_to_dma_pte(domain
, iov_pfn
, &largepage_lvl
);
2110 /* It is large page*/
2111 if (largepage_lvl
> 1) {
2112 pteval
|= DMA_PTE_LARGE_PAGE
;
2113 lvl_pages
= lvl_to_nr_pages(largepage_lvl
);
2115 * Ensure that old small page tables are
2116 * removed to make room for superpage,
2119 dma_pte_free_pagetable(domain
, iov_pfn
,
2120 iov_pfn
+ lvl_pages
- 1);
2122 pteval
&= ~(uint64_t)DMA_PTE_LARGE_PAGE
;
2126 /* We don't need lock here, nobody else
2127 * touches the iova range
2129 tmp
= cmpxchg64_local(&pte
->val
, 0ULL, pteval
);
2131 static int dumps
= 5;
2132 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2133 iov_pfn
, tmp
, (unsigned long long)pteval
);
2136 debug_dma_dump_mappings(NULL
);
2141 lvl_pages
= lvl_to_nr_pages(largepage_lvl
);
2143 BUG_ON(nr_pages
< lvl_pages
);
2144 BUG_ON(sg_res
< lvl_pages
);
2146 nr_pages
-= lvl_pages
;
2147 iov_pfn
+= lvl_pages
;
2148 phys_pfn
+= lvl_pages
;
2149 pteval
+= lvl_pages
* VTD_PAGE_SIZE
;
2150 sg_res
-= lvl_pages
;
2152 /* If the next PTE would be the first in a new page, then we
2153 need to flush the cache on the entries we've just written.
2154 And then we'll need to recalculate 'pte', so clear it and
2155 let it get set again in the if (!pte) block above.
2157 If we're done (!nr_pages) we need to flush the cache too.
2159 Also if we've been setting superpages, we may need to
2160 recalculate 'pte' and switch back to smaller pages for the
2161 end of the mapping, if the trailing size is not enough to
2162 use another superpage (i.e. sg_res < lvl_pages). */
2164 if (!nr_pages
|| first_pte_in_page(pte
) ||
2165 (largepage_lvl
> 1 && sg_res
< lvl_pages
)) {
2166 domain_flush_cache(domain
, first_pte
,
2167 (void *)pte
- (void *)first_pte
);
2171 if (!sg_res
&& nr_pages
)
2177 static inline int domain_sg_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
2178 struct scatterlist
*sg
, unsigned long nr_pages
,
2181 return __domain_mapping(domain
, iov_pfn
, sg
, 0, nr_pages
, prot
);
2184 static inline int domain_pfn_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
2185 unsigned long phys_pfn
, unsigned long nr_pages
,
2188 return __domain_mapping(domain
, iov_pfn
, NULL
, phys_pfn
, nr_pages
, prot
);
2191 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
2196 clear_context_table(iommu
, bus
, devfn
);
2197 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2198 DMA_CCMD_GLOBAL_INVL
);
2199 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2202 static inline void unlink_domain_info(struct device_domain_info
*info
)
2204 assert_spin_locked(&device_domain_lock
);
2205 list_del(&info
->link
);
2206 list_del(&info
->global
);
2208 info
->dev
->archdata
.iommu
= NULL
;
2211 static void domain_remove_dev_info(struct dmar_domain
*domain
)
2213 struct device_domain_info
*info
, *tmp
;
2214 unsigned long flags
;
2216 spin_lock_irqsave(&device_domain_lock
, flags
);
2217 list_for_each_entry_safe(info
, tmp
, &domain
->devices
, link
) {
2218 unlink_domain_info(info
);
2219 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2221 iommu_disable_dev_iotlb(info
);
2222 iommu_detach_dev(info
->iommu
, info
->bus
, info
->devfn
);
2224 if (domain_type_is_vm(domain
)) {
2225 iommu_detach_dependent_devices(info
->iommu
, info
->dev
);
2226 domain_detach_iommu(domain
, info
->iommu
);
2229 free_devinfo_mem(info
);
2230 spin_lock_irqsave(&device_domain_lock
, flags
);
2232 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2237 * Note: we use struct device->archdata.iommu stores the info
2239 static struct dmar_domain
*find_domain(struct device
*dev
)
2241 struct device_domain_info
*info
;
2243 /* No lock here, assumes no domain exit in normal case */
2244 info
= dev
->archdata
.iommu
;
2246 return info
->domain
;
2250 static inline struct device_domain_info
*
2251 dmar_search_domain_by_dev_info(int segment
, int bus
, int devfn
)
2253 struct device_domain_info
*info
;
2255 list_for_each_entry(info
, &device_domain_list
, global
)
2256 if (info
->iommu
->segment
== segment
&& info
->bus
== bus
&&
2257 info
->devfn
== devfn
)
2263 static struct dmar_domain
*dmar_insert_dev_info(struct intel_iommu
*iommu
,
2266 struct dmar_domain
*domain
)
2268 struct dmar_domain
*found
= NULL
;
2269 struct device_domain_info
*info
;
2270 unsigned long flags
;
2272 info
= alloc_devinfo_mem();
2277 info
->devfn
= devfn
;
2279 info
->domain
= domain
;
2280 info
->iommu
= iommu
;
2282 spin_lock_irqsave(&device_domain_lock
, flags
);
2284 found
= find_domain(dev
);
2286 struct device_domain_info
*info2
;
2287 info2
= dmar_search_domain_by_dev_info(iommu
->segment
, bus
, devfn
);
2289 found
= info2
->domain
;
2292 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2293 free_devinfo_mem(info
);
2294 /* Caller must free the original domain */
2298 list_add(&info
->link
, &domain
->devices
);
2299 list_add(&info
->global
, &device_domain_list
);
2301 dev
->archdata
.iommu
= info
;
2302 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2307 static int get_last_alias(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
2309 *(u16
*)opaque
= alias
;
2313 /* domain is initialized */
2314 static struct dmar_domain
*get_domain_for_dev(struct device
*dev
, int gaw
)
2316 struct dmar_domain
*domain
, *tmp
;
2317 struct intel_iommu
*iommu
;
2318 struct device_domain_info
*info
;
2320 unsigned long flags
;
2323 domain
= find_domain(dev
);
2327 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2331 if (dev_is_pci(dev
)) {
2332 struct pci_dev
*pdev
= to_pci_dev(dev
);
2334 pci_for_each_dma_alias(pdev
, get_last_alias
, &dma_alias
);
2336 spin_lock_irqsave(&device_domain_lock
, flags
);
2337 info
= dmar_search_domain_by_dev_info(pci_domain_nr(pdev
->bus
),
2338 PCI_BUS_NUM(dma_alias
),
2341 iommu
= info
->iommu
;
2342 domain
= info
->domain
;
2344 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2346 /* DMA alias already has a domain, uses it */
2351 /* Allocate and initialize new domain for the device */
2352 domain
= alloc_domain(0);
2355 domain
->id
= iommu_attach_domain(domain
, iommu
);
2356 if (domain
->id
< 0) {
2357 free_domain_mem(domain
);
2360 domain_attach_iommu(domain
, iommu
);
2361 if (domain_init(domain
, gaw
)) {
2362 domain_exit(domain
);
2366 /* register PCI DMA alias device */
2367 if (dev_is_pci(dev
)) {
2368 tmp
= dmar_insert_dev_info(iommu
, PCI_BUS_NUM(dma_alias
),
2369 dma_alias
& 0xff, NULL
, domain
);
2371 if (!tmp
|| tmp
!= domain
) {
2372 domain_exit(domain
);
2381 tmp
= dmar_insert_dev_info(iommu
, bus
, devfn
, dev
, domain
);
2383 if (!tmp
|| tmp
!= domain
) {
2384 domain_exit(domain
);
2391 static int iommu_identity_mapping
;
2392 #define IDENTMAP_ALL 1
2393 #define IDENTMAP_GFX 2
2394 #define IDENTMAP_AZALIA 4
2396 static int iommu_domain_identity_map(struct dmar_domain
*domain
,
2397 unsigned long long start
,
2398 unsigned long long end
)
2400 unsigned long first_vpfn
= start
>> VTD_PAGE_SHIFT
;
2401 unsigned long last_vpfn
= end
>> VTD_PAGE_SHIFT
;
2403 if (!reserve_iova(&domain
->iovad
, dma_to_mm_pfn(first_vpfn
),
2404 dma_to_mm_pfn(last_vpfn
))) {
2405 pr_err("Reserving iova failed\n");
2409 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2410 start
, end
, domain
->id
);
2412 * RMRR range might have overlap with physical memory range,
2415 dma_pte_clear_range(domain
, first_vpfn
, last_vpfn
);
2417 return domain_pfn_mapping(domain
, first_vpfn
, first_vpfn
,
2418 last_vpfn
- first_vpfn
+ 1,
2419 DMA_PTE_READ
|DMA_PTE_WRITE
);
2422 static int iommu_prepare_identity_map(struct device
*dev
,
2423 unsigned long long start
,
2424 unsigned long long end
)
2426 struct dmar_domain
*domain
;
2429 domain
= get_domain_for_dev(dev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2433 /* For _hardware_ passthrough, don't bother. But for software
2434 passthrough, we do it anyway -- it may indicate a memory
2435 range which is reserved in E820, so which didn't get set
2436 up to start with in si_domain */
2437 if (domain
== si_domain
&& hw_pass_through
) {
2438 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2439 dev_name(dev
), start
, end
);
2443 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2444 dev_name(dev
), start
, end
);
2447 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2448 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2449 dmi_get_system_info(DMI_BIOS_VENDOR
),
2450 dmi_get_system_info(DMI_BIOS_VERSION
),
2451 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2456 if (end
>> agaw_to_width(domain
->agaw
)) {
2457 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2458 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2459 agaw_to_width(domain
->agaw
),
2460 dmi_get_system_info(DMI_BIOS_VENDOR
),
2461 dmi_get_system_info(DMI_BIOS_VERSION
),
2462 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2467 ret
= iommu_domain_identity_map(domain
, start
, end
);
2471 /* context entry init */
2472 ret
= domain_context_mapping(domain
, dev
, CONTEXT_TT_MULTI_LEVEL
);
2479 domain_exit(domain
);
2483 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
2486 if (dev
->archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2488 return iommu_prepare_identity_map(dev
, rmrr
->base_address
,
2492 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2493 static inline void iommu_prepare_isa(void)
2495 struct pci_dev
*pdev
;
2498 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
2502 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2503 ret
= iommu_prepare_identity_map(&pdev
->dev
, 0, 16*1024*1024 - 1);
2506 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2511 static inline void iommu_prepare_isa(void)
2515 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2517 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
);
2519 static int __init
si_domain_init(int hw
)
2521 struct dmar_drhd_unit
*drhd
;
2522 struct intel_iommu
*iommu
;
2526 si_domain
= alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY
);
2530 for_each_active_iommu(iommu
, drhd
) {
2531 ret
= iommu_attach_domain(si_domain
, iommu
);
2533 domain_exit(si_domain
);
2536 si_domain
->id
= ret
;
2538 } else if (si_domain
->id
!= ret
) {
2539 domain_exit(si_domain
);
2542 domain_attach_iommu(si_domain
, iommu
);
2545 if (md_domain_init(si_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2546 domain_exit(si_domain
);
2550 pr_debug("Identity mapping domain is domain %d\n",
2556 for_each_online_node(nid
) {
2557 unsigned long start_pfn
, end_pfn
;
2560 for_each_mem_pfn_range(i
, nid
, &start_pfn
, &end_pfn
, NULL
) {
2561 ret
= iommu_domain_identity_map(si_domain
,
2562 PFN_PHYS(start_pfn
), PFN_PHYS(end_pfn
));
2571 static int identity_mapping(struct device
*dev
)
2573 struct device_domain_info
*info
;
2575 if (likely(!iommu_identity_mapping
))
2578 info
= dev
->archdata
.iommu
;
2579 if (info
&& info
!= DUMMY_DEVICE_DOMAIN_INFO
)
2580 return (info
->domain
== si_domain
);
2585 static int domain_add_dev_info(struct dmar_domain
*domain
,
2586 struct device
*dev
, int translation
)
2588 struct dmar_domain
*ndomain
;
2589 struct intel_iommu
*iommu
;
2593 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2597 ndomain
= dmar_insert_dev_info(iommu
, bus
, devfn
, dev
, domain
);
2598 if (ndomain
!= domain
)
2601 ret
= domain_context_mapping(domain
, dev
, translation
);
2603 domain_remove_one_dev_info(domain
, dev
);
2610 static bool device_has_rmrr(struct device
*dev
)
2612 struct dmar_rmrr_unit
*rmrr
;
2617 for_each_rmrr_units(rmrr
) {
2619 * Return TRUE if this RMRR contains the device that
2622 for_each_active_dev_scope(rmrr
->devices
,
2623 rmrr
->devices_cnt
, i
, tmp
)
2634 * There are a couple cases where we need to restrict the functionality of
2635 * devices associated with RMRRs. The first is when evaluating a device for
2636 * identity mapping because problems exist when devices are moved in and out
2637 * of domains and their respective RMRR information is lost. This means that
2638 * a device with associated RMRRs will never be in a "passthrough" domain.
2639 * The second is use of the device through the IOMMU API. This interface
2640 * expects to have full control of the IOVA space for the device. We cannot
2641 * satisfy both the requirement that RMRR access is maintained and have an
2642 * unencumbered IOVA space. We also have no ability to quiesce the device's
2643 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2644 * We therefore prevent devices associated with an RMRR from participating in
2645 * the IOMMU API, which eliminates them from device assignment.
2647 * In both cases we assume that PCI USB devices with RMRRs have them largely
2648 * for historical reasons and that the RMRR space is not actively used post
2649 * boot. This exclusion may change if vendors begin to abuse it.
2651 * The same exception is made for graphics devices, with the requirement that
2652 * any use of the RMRR regions will be torn down before assigning the device
2655 static bool device_is_rmrr_locked(struct device
*dev
)
2657 if (!device_has_rmrr(dev
))
2660 if (dev_is_pci(dev
)) {
2661 struct pci_dev
*pdev
= to_pci_dev(dev
);
2663 if (IS_USB_DEVICE(pdev
) || IS_GFX_DEVICE(pdev
))
2670 static int iommu_should_identity_map(struct device
*dev
, int startup
)
2673 if (dev_is_pci(dev
)) {
2674 struct pci_dev
*pdev
= to_pci_dev(dev
);
2676 if (device_is_rmrr_locked(dev
))
2679 if ((iommu_identity_mapping
& IDENTMAP_AZALIA
) && IS_AZALIA(pdev
))
2682 if ((iommu_identity_mapping
& IDENTMAP_GFX
) && IS_GFX_DEVICE(pdev
))
2685 if (!(iommu_identity_mapping
& IDENTMAP_ALL
))
2689 * We want to start off with all devices in the 1:1 domain, and
2690 * take them out later if we find they can't access all of memory.
2692 * However, we can't do this for PCI devices behind bridges,
2693 * because all PCI devices behind the same bridge will end up
2694 * with the same source-id on their transactions.
2696 * Practically speaking, we can't change things around for these
2697 * devices at run-time, because we can't be sure there'll be no
2698 * DMA transactions in flight for any of their siblings.
2700 * So PCI devices (unless they're on the root bus) as well as
2701 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2702 * the 1:1 domain, just in _case_ one of their siblings turns out
2703 * not to be able to map all of memory.
2705 if (!pci_is_pcie(pdev
)) {
2706 if (!pci_is_root_bus(pdev
->bus
))
2708 if (pdev
->class >> 8 == PCI_CLASS_BRIDGE_PCI
)
2710 } else if (pci_pcie_type(pdev
) == PCI_EXP_TYPE_PCI_BRIDGE
)
2713 if (device_has_rmrr(dev
))
2718 * At boot time, we don't yet know if devices will be 64-bit capable.
2719 * Assume that they will — if they turn out not to be, then we can
2720 * take them out of the 1:1 domain later.
2724 * If the device's dma_mask is less than the system's memory
2725 * size then this is not a candidate for identity mapping.
2727 u64 dma_mask
= *dev
->dma_mask
;
2729 if (dev
->coherent_dma_mask
&&
2730 dev
->coherent_dma_mask
< dma_mask
)
2731 dma_mask
= dev
->coherent_dma_mask
;
2733 return dma_mask
>= dma_get_required_mask(dev
);
2739 static int __init
dev_prepare_static_identity_mapping(struct device
*dev
, int hw
)
2743 if (!iommu_should_identity_map(dev
, 1))
2746 ret
= domain_add_dev_info(si_domain
, dev
,
2747 hw
? CONTEXT_TT_PASS_THROUGH
:
2748 CONTEXT_TT_MULTI_LEVEL
);
2750 pr_info("%s identity mapping for device %s\n",
2751 hw
? "Hardware" : "Software", dev_name(dev
));
2752 else if (ret
== -ENODEV
)
2753 /* device not associated with an iommu */
2760 static int __init
iommu_prepare_static_identity_mapping(int hw
)
2762 struct pci_dev
*pdev
= NULL
;
2763 struct dmar_drhd_unit
*drhd
;
2764 struct intel_iommu
*iommu
;
2769 for_each_pci_dev(pdev
) {
2770 ret
= dev_prepare_static_identity_mapping(&pdev
->dev
, hw
);
2775 for_each_active_iommu(iommu
, drhd
)
2776 for_each_active_dev_scope(drhd
->devices
, drhd
->devices_cnt
, i
, dev
) {
2777 struct acpi_device_physical_node
*pn
;
2778 struct acpi_device
*adev
;
2780 if (dev
->bus
!= &acpi_bus_type
)
2783 adev
= to_acpi_device(dev
);
2784 mutex_lock(&adev
->physical_node_lock
);
2785 list_for_each_entry(pn
, &adev
->physical_node_list
, node
) {
2786 ret
= dev_prepare_static_identity_mapping(pn
->dev
, hw
);
2790 mutex_unlock(&adev
->physical_node_lock
);
2798 static void intel_iommu_init_qi(struct intel_iommu
*iommu
)
2801 * Start from the sane iommu hardware state.
2802 * If the queued invalidation is already initialized by us
2803 * (for example, while enabling interrupt-remapping) then
2804 * we got the things already rolling from a sane state.
2808 * Clear any previous faults.
2810 dmar_fault(-1, iommu
);
2812 * Disable queued invalidation if supported and already enabled
2813 * before OS handover.
2815 dmar_disable_qi(iommu
);
2818 if (dmar_enable_qi(iommu
)) {
2820 * Queued Invalidate not enabled, use Register Based Invalidate
2822 iommu
->flush
.flush_context
= __iommu_flush_context
;
2823 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
2824 pr_info("%s: Using Register based invalidation\n",
2827 iommu
->flush
.flush_context
= qi_flush_context
;
2828 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
2829 pr_info("%s: Using Queued invalidation\n", iommu
->name
);
2833 static int copy_context_table(struct intel_iommu
*iommu
,
2834 struct root_entry
*old_re
,
2835 struct context_entry
**tbl
,
2838 struct context_entry
*old_ce
= NULL
, *new_ce
= NULL
, ce
;
2839 int tbl_idx
, pos
= 0, idx
, devfn
, ret
= 0, did
;
2840 phys_addr_t old_ce_phys
;
2842 tbl_idx
= ext
? bus
* 2 : bus
;
2844 for (devfn
= 0; devfn
< 256; devfn
++) {
2845 /* First calculate the correct index */
2846 idx
= (ext
? devfn
* 2 : devfn
) % 256;
2849 /* First save what we may have and clean up */
2851 tbl
[tbl_idx
] = new_ce
;
2852 __iommu_flush_cache(iommu
, new_ce
,
2862 old_ce_phys
= root_entry_lctp(old_re
);
2864 old_ce_phys
= root_entry_uctp(old_re
);
2867 if (ext
&& devfn
== 0) {
2868 /* No LCTP, try UCTP */
2877 old_ce
= ioremap_cache(old_ce_phys
, PAGE_SIZE
);
2881 new_ce
= alloc_pgtable_page(iommu
->node
);
2888 /* Now copy the context entry */
2891 if (!__context_present(&ce
))
2894 did
= context_domain_id(&ce
);
2895 if (did
>= 0 && did
< cap_ndoms(iommu
->cap
))
2896 set_bit(did
, iommu
->domain_ids
);
2899 * We need a marker for copied context entries. This
2900 * marker needs to work for the old format as well as
2901 * for extended context entries.
2903 * Bit 67 of the context entry is used. In the old
2904 * format this bit is available to software, in the
2905 * extended format it is the PGE bit, but PGE is ignored
2906 * by HW if PASIDs are disabled (and thus still
2909 * So disable PASIDs first and then mark the entry
2910 * copied. This means that we don't copy PASID
2911 * translations from the old kernel, but this is fine as
2912 * faults there are not fatal.
2914 context_clear_pasid_enable(&ce
);
2915 context_set_copied(&ce
);
2920 tbl
[tbl_idx
+ pos
] = new_ce
;
2922 __iommu_flush_cache(iommu
, new_ce
, VTD_PAGE_SIZE
);
2931 static int copy_translation_tables(struct intel_iommu
*iommu
)
2933 struct context_entry
**ctxt_tbls
;
2934 struct root_entry
*old_rt
;
2935 phys_addr_t old_rt_phys
;
2936 int ctxt_table_entries
;
2937 unsigned long flags
;
2942 rtaddr_reg
= dmar_readq(iommu
->reg
+ DMAR_RTADDR_REG
);
2943 ext
= !!(rtaddr_reg
& DMA_RTADDR_RTT
);
2944 new_ext
= !!ecap_ecs(iommu
->ecap
);
2947 * The RTT bit can only be changed when translation is disabled,
2948 * but disabling translation means to open a window for data
2949 * corruption. So bail out and don't copy anything if we would
2950 * have to change the bit.
2955 old_rt_phys
= rtaddr_reg
& VTD_PAGE_MASK
;
2959 old_rt
= ioremap_cache(old_rt_phys
, PAGE_SIZE
);
2963 /* This is too big for the stack - allocate it from slab */
2964 ctxt_table_entries
= ext
? 512 : 256;
2966 ctxt_tbls
= kzalloc(ctxt_table_entries
* sizeof(void *), GFP_KERNEL
);
2970 for (bus
= 0; bus
< 256; bus
++) {
2971 ret
= copy_context_table(iommu
, &old_rt
[bus
],
2972 ctxt_tbls
, bus
, ext
);
2974 pr_err("%s: Failed to copy context table for bus %d\n",
2980 spin_lock_irqsave(&iommu
->lock
, flags
);
2982 /* Context tables are copied, now write them to the root_entry table */
2983 for (bus
= 0; bus
< 256; bus
++) {
2984 int idx
= ext
? bus
* 2 : bus
;
2987 if (ctxt_tbls
[idx
]) {
2988 val
= virt_to_phys(ctxt_tbls
[idx
]) | 1;
2989 iommu
->root_entry
[bus
].lo
= val
;
2992 if (!ext
|| !ctxt_tbls
[idx
+ 1])
2995 val
= virt_to_phys(ctxt_tbls
[idx
+ 1]) | 1;
2996 iommu
->root_entry
[bus
].hi
= val
;
2999 spin_unlock_irqrestore(&iommu
->lock
, flags
);
3003 __iommu_flush_cache(iommu
, iommu
->root_entry
, PAGE_SIZE
);
3013 static int __init
init_dmars(void)
3015 struct dmar_drhd_unit
*drhd
;
3016 struct dmar_rmrr_unit
*rmrr
;
3017 bool copied_tables
= false;
3019 struct intel_iommu
*iommu
;
3025 * initialize and program root entry to not present
3028 for_each_drhd_unit(drhd
) {
3030 * lock not needed as this is only incremented in the single
3031 * threaded kernel __init code path all other access are read
3034 if (g_num_of_iommus
< DMAR_UNITS_SUPPORTED
) {
3038 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED
);
3041 /* Preallocate enough resources for IOMMU hot-addition */
3042 if (g_num_of_iommus
< DMAR_UNITS_SUPPORTED
)
3043 g_num_of_iommus
= DMAR_UNITS_SUPPORTED
;
3045 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
3048 pr_err("Allocating global iommu array failed\n");
3053 deferred_flush
= kzalloc(g_num_of_iommus
*
3054 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
3055 if (!deferred_flush
) {
3060 for_each_active_iommu(iommu
, drhd
) {
3061 g_iommus
[iommu
->seq_id
] = iommu
;
3063 intel_iommu_init_qi(iommu
);
3065 ret
= iommu_init_domains(iommu
);
3069 init_translation_status(iommu
);
3071 if (translation_pre_enabled(iommu
) && !is_kdump_kernel()) {
3072 iommu_disable_translation(iommu
);
3073 clear_translation_pre_enabled(iommu
);
3074 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3080 * we could share the same root & context tables
3081 * among all IOMMU's. Need to Split it later.
3083 ret
= iommu_alloc_root_entry(iommu
);
3087 if (translation_pre_enabled(iommu
)) {
3088 pr_info("Translation already enabled - trying to copy translation structures\n");
3090 ret
= copy_translation_tables(iommu
);
3093 * We found the IOMMU with translation
3094 * enabled - but failed to copy over the
3095 * old root-entry table. Try to proceed
3096 * by disabling translation now and
3097 * allocating a clean root-entry table.
3098 * This might cause DMAR faults, but
3099 * probably the dump will still succeed.
3101 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3103 iommu_disable_translation(iommu
);
3104 clear_translation_pre_enabled(iommu
);
3106 pr_info("Copied translation tables from previous kernel for %s\n",
3108 copied_tables
= true;
3112 iommu_flush_write_buffer(iommu
);
3113 iommu_set_root_entry(iommu
);
3114 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
3115 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
3117 if (!ecap_pass_through(iommu
->ecap
))
3118 hw_pass_through
= 0;
3121 if (iommu_pass_through
)
3122 iommu_identity_mapping
|= IDENTMAP_ALL
;
3124 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3125 iommu_identity_mapping
|= IDENTMAP_GFX
;
3128 if (iommu_identity_mapping
) {
3129 ret
= si_domain_init(hw_pass_through
);
3134 check_tylersburg_isoch();
3137 * If we copied translations from a previous kernel in the kdump
3138 * case, we can not assign the devices to domains now, as that
3139 * would eliminate the old mappings. So skip this part and defer
3140 * the assignment to device driver initialization time.
3146 * If pass through is not set or not enabled, setup context entries for
3147 * identity mappings for rmrr, gfx, and isa and may fall back to static
3148 * identity mapping if iommu_identity_mapping is set.
3150 if (iommu_identity_mapping
) {
3151 ret
= iommu_prepare_static_identity_mapping(hw_pass_through
);
3153 pr_crit("Failed to setup IOMMU pass-through\n");
3159 * for each dev attached to rmrr
3161 * locate drhd for dev, alloc domain for dev
3162 * allocate free domain
3163 * allocate page table entries for rmrr
3164 * if context not allocated for bus
3165 * allocate and init context
3166 * set present in root table for this bus
3167 * init context with domain, translation etc
3171 pr_info("Setting RMRR:\n");
3172 for_each_rmrr_units(rmrr
) {
3173 /* some BIOS lists non-exist devices in DMAR table. */
3174 for_each_active_dev_scope(rmrr
->devices
, rmrr
->devices_cnt
,
3176 ret
= iommu_prepare_rmrr_dev(rmrr
, dev
);
3178 pr_err("Mapping reserved region failed\n");
3182 iommu_prepare_isa();
3189 * global invalidate context cache
3190 * global invalidate iotlb
3191 * enable translation
3193 for_each_iommu(iommu
, drhd
) {
3194 if (drhd
->ignored
) {
3196 * we always have to disable PMRs or DMA may fail on
3200 iommu_disable_protect_mem_regions(iommu
);
3204 iommu_flush_write_buffer(iommu
);
3206 ret
= dmar_set_interrupt(iommu
);
3210 if (!translation_pre_enabled(iommu
))
3211 iommu_enable_translation(iommu
);
3213 iommu_disable_protect_mem_regions(iommu
);
3219 for_each_active_iommu(iommu
, drhd
) {
3220 disable_dmar_iommu(iommu
);
3221 free_dmar_iommu(iommu
);
3223 kfree(deferred_flush
);
3230 /* This takes a number of _MM_ pages, not VTD pages */
3231 static struct iova
*intel_alloc_iova(struct device
*dev
,
3232 struct dmar_domain
*domain
,
3233 unsigned long nrpages
, uint64_t dma_mask
)
3235 struct iova
*iova
= NULL
;
3237 /* Restrict dma_mask to the width that the iommu can handle */
3238 dma_mask
= min_t(uint64_t, DOMAIN_MAX_ADDR(domain
->gaw
), dma_mask
);
3240 if (!dmar_forcedac
&& dma_mask
> DMA_BIT_MASK(32)) {
3242 * First try to allocate an io virtual address in
3243 * DMA_BIT_MASK(32) and if that fails then try allocating
3246 iova
= alloc_iova(&domain
->iovad
, nrpages
,
3247 IOVA_PFN(DMA_BIT_MASK(32)), 1);
3251 iova
= alloc_iova(&domain
->iovad
, nrpages
, IOVA_PFN(dma_mask
), 1);
3252 if (unlikely(!iova
)) {
3253 pr_err("Allocating %ld-page iova for %s failed",
3254 nrpages
, dev_name(dev
));
3261 static struct dmar_domain
*__get_valid_domain_for_dev(struct device
*dev
)
3263 struct dmar_domain
*domain
;
3266 domain
= get_domain_for_dev(dev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
3268 pr_err("Allocating domain for %s failed\n",
3273 /* make sure context mapping is ok */
3274 if (unlikely(!domain_context_mapped(dev
))) {
3275 ret
= domain_context_mapping(domain
, dev
, CONTEXT_TT_MULTI_LEVEL
);
3277 pr_err("Domain context map for %s failed\n",
3286 static inline struct dmar_domain
*get_valid_domain_for_dev(struct device
*dev
)
3288 struct device_domain_info
*info
;
3290 /* No lock here, assumes no domain exit in normal case */
3291 info
= dev
->archdata
.iommu
;
3293 return info
->domain
;
3295 return __get_valid_domain_for_dev(dev
);
3298 /* Check if the dev needs to go through non-identity map and unmap process.*/
3299 static int iommu_no_mapping(struct device
*dev
)
3303 if (iommu_dummy(dev
))
3306 if (!iommu_identity_mapping
)
3309 found
= identity_mapping(dev
);
3311 if (iommu_should_identity_map(dev
, 0))
3315 * 32 bit DMA is removed from si_domain and fall back
3316 * to non-identity mapping.
3318 domain_remove_one_dev_info(si_domain
, dev
);
3319 pr_info("32bit %s uses non-identity mapping\n",
3325 * In case of a detached 64 bit DMA device from vm, the device
3326 * is put into si_domain for identity mapping.
3328 if (iommu_should_identity_map(dev
, 0)) {
3330 ret
= domain_add_dev_info(si_domain
, dev
,
3332 CONTEXT_TT_PASS_THROUGH
:
3333 CONTEXT_TT_MULTI_LEVEL
);
3335 pr_info("64bit %s uses identity mapping\n",
3345 static dma_addr_t
__intel_map_single(struct device
*dev
, phys_addr_t paddr
,
3346 size_t size
, int dir
, u64 dma_mask
)
3348 struct dmar_domain
*domain
;
3349 phys_addr_t start_paddr
;
3353 struct intel_iommu
*iommu
;
3354 unsigned long paddr_pfn
= paddr
>> PAGE_SHIFT
;
3356 BUG_ON(dir
== DMA_NONE
);
3358 if (iommu_no_mapping(dev
))
3361 domain
= get_valid_domain_for_dev(dev
);
3365 iommu
= domain_get_iommu(domain
);
3366 size
= aligned_nrpages(paddr
, size
);
3368 iova
= intel_alloc_iova(dev
, domain
, dma_to_mm_pfn(size
), dma_mask
);
3373 * Check if DMAR supports zero-length reads on write only
3376 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
3377 !cap_zlr(iommu
->cap
))
3378 prot
|= DMA_PTE_READ
;
3379 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
3380 prot
|= DMA_PTE_WRITE
;
3382 * paddr - (paddr + size) might be partial page, we should map the whole
3383 * page. Note: if two part of one page are separately mapped, we
3384 * might have two guest_addr mapping to the same host paddr, but this
3385 * is not a big problem
3387 ret
= domain_pfn_mapping(domain
, mm_to_dma_pfn(iova
->pfn_lo
),
3388 mm_to_dma_pfn(paddr_pfn
), size
, prot
);
3392 /* it's a non-present to present mapping. Only flush if caching mode */
3393 if (cap_caching_mode(iommu
->cap
))
3394 iommu_flush_iotlb_psi(iommu
, domain
->id
, mm_to_dma_pfn(iova
->pfn_lo
), size
, 0, 1);
3396 iommu_flush_write_buffer(iommu
);
3398 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
3399 start_paddr
+= paddr
& ~PAGE_MASK
;
3404 __free_iova(&domain
->iovad
, iova
);
3405 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3406 dev_name(dev
), size
, (unsigned long long)paddr
, dir
);
3410 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
3411 unsigned long offset
, size_t size
,
3412 enum dma_data_direction dir
,
3413 struct dma_attrs
*attrs
)
3415 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
3416 dir
, *dev
->dma_mask
);
3419 static void flush_unmaps(void)
3425 /* just flush them all */
3426 for (i
= 0; i
< g_num_of_iommus
; i
++) {
3427 struct intel_iommu
*iommu
= g_iommus
[i
];
3431 if (!deferred_flush
[i
].next
)
3434 /* In caching mode, global flushes turn emulation expensive */
3435 if (!cap_caching_mode(iommu
->cap
))
3436 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3437 DMA_TLB_GLOBAL_FLUSH
);
3438 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
3440 struct iova
*iova
= deferred_flush
[i
].iova
[j
];
3441 struct dmar_domain
*domain
= deferred_flush
[i
].domain
[j
];
3443 /* On real hardware multiple invalidations are expensive */
3444 if (cap_caching_mode(iommu
->cap
))
3445 iommu_flush_iotlb_psi(iommu
, domain
->id
,
3446 iova
->pfn_lo
, iova_size(iova
),
3447 !deferred_flush
[i
].freelist
[j
], 0);
3449 mask
= ilog2(mm_to_dma_pfn(iova_size(iova
)));
3450 iommu_flush_dev_iotlb(deferred_flush
[i
].domain
[j
],
3451 (uint64_t)iova
->pfn_lo
<< PAGE_SHIFT
, mask
);
3453 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
, iova
);
3454 if (deferred_flush
[i
].freelist
[j
])
3455 dma_free_pagelist(deferred_flush
[i
].freelist
[j
]);
3457 deferred_flush
[i
].next
= 0;
3463 static void flush_unmaps_timeout(unsigned long data
)
3465 unsigned long flags
;
3467 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
3469 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
3472 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
, struct page
*freelist
)
3474 unsigned long flags
;
3476 struct intel_iommu
*iommu
;
3478 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
3479 if (list_size
== HIGH_WATER_MARK
)
3482 iommu
= domain_get_iommu(dom
);
3483 iommu_id
= iommu
->seq_id
;
3485 next
= deferred_flush
[iommu_id
].next
;
3486 deferred_flush
[iommu_id
].domain
[next
] = dom
;
3487 deferred_flush
[iommu_id
].iova
[next
] = iova
;
3488 deferred_flush
[iommu_id
].freelist
[next
] = freelist
;
3489 deferred_flush
[iommu_id
].next
++;
3492 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
3496 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
3499 static void intel_unmap(struct device
*dev
, dma_addr_t dev_addr
)
3501 struct dmar_domain
*domain
;
3502 unsigned long start_pfn
, last_pfn
;
3504 struct intel_iommu
*iommu
;
3505 struct page
*freelist
;
3507 if (iommu_no_mapping(dev
))
3510 domain
= find_domain(dev
);
3513 iommu
= domain_get_iommu(domain
);
3515 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
3516 if (WARN_ONCE(!iova
, "Driver unmaps unmatched page at PFN %llx\n",
3517 (unsigned long long)dev_addr
))
3520 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3521 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
3523 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3524 dev_name(dev
), start_pfn
, last_pfn
);
3526 freelist
= domain_unmap(domain
, start_pfn
, last_pfn
);
3528 if (intel_iommu_strict
) {
3529 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
3530 last_pfn
- start_pfn
+ 1, !freelist
, 0);
3532 __free_iova(&domain
->iovad
, iova
);
3533 dma_free_pagelist(freelist
);
3535 add_unmap(domain
, iova
, freelist
);
3537 * queue up the release of the unmap to save the 1/6th of the
3538 * cpu used up by the iotlb flush operation...
3543 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
3544 size_t size
, enum dma_data_direction dir
,
3545 struct dma_attrs
*attrs
)
3547 intel_unmap(dev
, dev_addr
);
3550 static void *intel_alloc_coherent(struct device
*dev
, size_t size
,
3551 dma_addr_t
*dma_handle
, gfp_t flags
,
3552 struct dma_attrs
*attrs
)
3554 struct page
*page
= NULL
;
3557 size
= PAGE_ALIGN(size
);
3558 order
= get_order(size
);
3560 if (!iommu_no_mapping(dev
))
3561 flags
&= ~(GFP_DMA
| GFP_DMA32
);
3562 else if (dev
->coherent_dma_mask
< dma_get_required_mask(dev
)) {
3563 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(32))
3569 if (flags
& __GFP_WAIT
) {
3570 unsigned int count
= size
>> PAGE_SHIFT
;
3572 page
= dma_alloc_from_contiguous(dev
, count
, order
);
3573 if (page
&& iommu_no_mapping(dev
) &&
3574 page_to_phys(page
) + size
> dev
->coherent_dma_mask
) {
3575 dma_release_from_contiguous(dev
, page
, count
);
3581 page
= alloc_pages(flags
, order
);
3584 memset(page_address(page
), 0, size
);
3586 *dma_handle
= __intel_map_single(dev
, page_to_phys(page
), size
,
3588 dev
->coherent_dma_mask
);
3590 return page_address(page
);
3591 if (!dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
))
3592 __free_pages(page
, order
);
3597 static void intel_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
3598 dma_addr_t dma_handle
, struct dma_attrs
*attrs
)
3601 struct page
*page
= virt_to_page(vaddr
);
3603 size
= PAGE_ALIGN(size
);
3604 order
= get_order(size
);
3606 intel_unmap(dev
, dma_handle
);
3607 if (!dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
))
3608 __free_pages(page
, order
);
3611 static void intel_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
3612 int nelems
, enum dma_data_direction dir
,
3613 struct dma_attrs
*attrs
)
3615 intel_unmap(dev
, sglist
[0].dma_address
);
3618 static int intel_nontranslate_map_sg(struct device
*hddev
,
3619 struct scatterlist
*sglist
, int nelems
, int dir
)
3622 struct scatterlist
*sg
;
3624 for_each_sg(sglist
, sg
, nelems
, i
) {
3625 BUG_ON(!sg_page(sg
));
3626 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
3627 sg
->dma_length
= sg
->length
;
3632 static int intel_map_sg(struct device
*dev
, struct scatterlist
*sglist
, int nelems
,
3633 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
3636 struct dmar_domain
*domain
;
3639 struct iova
*iova
= NULL
;
3641 struct scatterlist
*sg
;
3642 unsigned long start_vpfn
;
3643 struct intel_iommu
*iommu
;
3645 BUG_ON(dir
== DMA_NONE
);
3646 if (iommu_no_mapping(dev
))
3647 return intel_nontranslate_map_sg(dev
, sglist
, nelems
, dir
);
3649 domain
= get_valid_domain_for_dev(dev
);
3653 iommu
= domain_get_iommu(domain
);
3655 for_each_sg(sglist
, sg
, nelems
, i
)
3656 size
+= aligned_nrpages(sg
->offset
, sg
->length
);
3658 iova
= intel_alloc_iova(dev
, domain
, dma_to_mm_pfn(size
),
3661 sglist
->dma_length
= 0;
3666 * Check if DMAR supports zero-length reads on write only
3669 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
3670 !cap_zlr(iommu
->cap
))
3671 prot
|= DMA_PTE_READ
;
3672 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
3673 prot
|= DMA_PTE_WRITE
;
3675 start_vpfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3677 ret
= domain_sg_mapping(domain
, start_vpfn
, sglist
, size
, prot
);
3678 if (unlikely(ret
)) {
3679 dma_pte_free_pagetable(domain
, start_vpfn
,
3680 start_vpfn
+ size
- 1);
3681 __free_iova(&domain
->iovad
, iova
);
3685 /* it's a non-present to present mapping. Only flush if caching mode */
3686 if (cap_caching_mode(iommu
->cap
))
3687 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_vpfn
, size
, 0, 1);
3689 iommu_flush_write_buffer(iommu
);
3694 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
3699 struct dma_map_ops intel_dma_ops
= {
3700 .alloc
= intel_alloc_coherent
,
3701 .free
= intel_free_coherent
,
3702 .map_sg
= intel_map_sg
,
3703 .unmap_sg
= intel_unmap_sg
,
3704 .map_page
= intel_map_page
,
3705 .unmap_page
= intel_unmap_page
,
3706 .mapping_error
= intel_mapping_error
,
3709 static inline int iommu_domain_cache_init(void)
3713 iommu_domain_cache
= kmem_cache_create("iommu_domain",
3714 sizeof(struct dmar_domain
),
3719 if (!iommu_domain_cache
) {
3720 pr_err("Couldn't create iommu_domain cache\n");
3727 static inline int iommu_devinfo_cache_init(void)
3731 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
3732 sizeof(struct device_domain_info
),
3736 if (!iommu_devinfo_cache
) {
3737 pr_err("Couldn't create devinfo cache\n");
3744 static int __init
iommu_init_mempool(void)
3747 ret
= iommu_iova_cache_init();
3751 ret
= iommu_domain_cache_init();
3755 ret
= iommu_devinfo_cache_init();
3759 kmem_cache_destroy(iommu_domain_cache
);
3761 iommu_iova_cache_destroy();
3766 static void __init
iommu_exit_mempool(void)
3768 kmem_cache_destroy(iommu_devinfo_cache
);
3769 kmem_cache_destroy(iommu_domain_cache
);
3770 iommu_iova_cache_destroy();
3773 static void quirk_ioat_snb_local_iommu(struct pci_dev
*pdev
)
3775 struct dmar_drhd_unit
*drhd
;
3779 /* We know that this device on this chipset has its own IOMMU.
3780 * If we find it under a different IOMMU, then the BIOS is lying
3781 * to us. Hope that the IOMMU for this device is actually
3782 * disabled, and it needs no translation...
3784 rc
= pci_bus_read_config_dword(pdev
->bus
, PCI_DEVFN(0, 0), 0xb0, &vtbar
);
3786 /* "can't" happen */
3787 dev_info(&pdev
->dev
, "failed to run vt-d quirk\n");
3790 vtbar
&= 0xffff0000;
3792 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3793 drhd
= dmar_find_matched_drhd_unit(pdev
);
3794 if (WARN_TAINT_ONCE(!drhd
|| drhd
->reg_base_addr
- vtbar
!= 0xa000,
3795 TAINT_FIRMWARE_WORKAROUND
,
3796 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3797 pdev
->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3799 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB
, quirk_ioat_snb_local_iommu
);
3801 static void __init
init_no_remapping_devices(void)
3803 struct dmar_drhd_unit
*drhd
;
3807 for_each_drhd_unit(drhd
) {
3808 if (!drhd
->include_all
) {
3809 for_each_active_dev_scope(drhd
->devices
,
3810 drhd
->devices_cnt
, i
, dev
)
3812 /* ignore DMAR unit if no devices exist */
3813 if (i
== drhd
->devices_cnt
)
3818 for_each_active_drhd_unit(drhd
) {
3819 if (drhd
->include_all
)
3822 for_each_active_dev_scope(drhd
->devices
,
3823 drhd
->devices_cnt
, i
, dev
)
3824 if (!dev_is_pci(dev
) || !IS_GFX_DEVICE(to_pci_dev(dev
)))
3826 if (i
< drhd
->devices_cnt
)
3829 /* This IOMMU has *only* gfx devices. Either bypass it or
3830 set the gfx_mapped flag, as appropriate */
3832 intel_iommu_gfx_mapped
= 1;
3835 for_each_active_dev_scope(drhd
->devices
,
3836 drhd
->devices_cnt
, i
, dev
)
3837 dev
->archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3842 #ifdef CONFIG_SUSPEND
3843 static int init_iommu_hw(void)
3845 struct dmar_drhd_unit
*drhd
;
3846 struct intel_iommu
*iommu
= NULL
;
3848 for_each_active_iommu(iommu
, drhd
)
3850 dmar_reenable_qi(iommu
);
3852 for_each_iommu(iommu
, drhd
) {
3853 if (drhd
->ignored
) {
3855 * we always have to disable PMRs or DMA may fail on
3859 iommu_disable_protect_mem_regions(iommu
);
3863 iommu_flush_write_buffer(iommu
);
3865 iommu_set_root_entry(iommu
);
3867 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3868 DMA_CCMD_GLOBAL_INVL
);
3869 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
3870 iommu_enable_translation(iommu
);
3871 iommu_disable_protect_mem_regions(iommu
);
3877 static void iommu_flush_all(void)
3879 struct dmar_drhd_unit
*drhd
;
3880 struct intel_iommu
*iommu
;
3882 for_each_active_iommu(iommu
, drhd
) {
3883 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3884 DMA_CCMD_GLOBAL_INVL
);
3885 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3886 DMA_TLB_GLOBAL_FLUSH
);
3890 static int iommu_suspend(void)
3892 struct dmar_drhd_unit
*drhd
;
3893 struct intel_iommu
*iommu
= NULL
;
3896 for_each_active_iommu(iommu
, drhd
) {
3897 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
3899 if (!iommu
->iommu_state
)
3905 for_each_active_iommu(iommu
, drhd
) {
3906 iommu_disable_translation(iommu
);
3908 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3910 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
3911 readl(iommu
->reg
+ DMAR_FECTL_REG
);
3912 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
3913 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
3914 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
3915 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
3916 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
3917 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
3919 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3924 for_each_active_iommu(iommu
, drhd
)
3925 kfree(iommu
->iommu_state
);
3930 static void iommu_resume(void)
3932 struct dmar_drhd_unit
*drhd
;
3933 struct intel_iommu
*iommu
= NULL
;
3936 if (init_iommu_hw()) {
3938 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3940 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3944 for_each_active_iommu(iommu
, drhd
) {
3946 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3948 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
3949 iommu
->reg
+ DMAR_FECTL_REG
);
3950 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
3951 iommu
->reg
+ DMAR_FEDATA_REG
);
3952 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
3953 iommu
->reg
+ DMAR_FEADDR_REG
);
3954 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
3955 iommu
->reg
+ DMAR_FEUADDR_REG
);
3957 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3960 for_each_active_iommu(iommu
, drhd
)
3961 kfree(iommu
->iommu_state
);
3964 static struct syscore_ops iommu_syscore_ops
= {
3965 .resume
= iommu_resume
,
3966 .suspend
= iommu_suspend
,
3969 static void __init
init_iommu_pm_ops(void)
3971 register_syscore_ops(&iommu_syscore_ops
);
3975 static inline void init_iommu_pm_ops(void) {}
3976 #endif /* CONFIG_PM */
3979 int __init
dmar_parse_one_rmrr(struct acpi_dmar_header
*header
, void *arg
)
3981 struct acpi_dmar_reserved_memory
*rmrr
;
3982 struct dmar_rmrr_unit
*rmrru
;
3984 rmrru
= kzalloc(sizeof(*rmrru
), GFP_KERNEL
);
3988 rmrru
->hdr
= header
;
3989 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
3990 rmrru
->base_address
= rmrr
->base_address
;
3991 rmrru
->end_address
= rmrr
->end_address
;
3992 rmrru
->devices
= dmar_alloc_dev_scope((void *)(rmrr
+ 1),
3993 ((void *)rmrr
) + rmrr
->header
.length
,
3994 &rmrru
->devices_cnt
);
3995 if (rmrru
->devices_cnt
&& rmrru
->devices
== NULL
) {
4000 list_add(&rmrru
->list
, &dmar_rmrr_units
);
4005 static struct dmar_atsr_unit
*dmar_find_atsr(struct acpi_dmar_atsr
*atsr
)
4007 struct dmar_atsr_unit
*atsru
;
4008 struct acpi_dmar_atsr
*tmp
;
4010 list_for_each_entry_rcu(atsru
, &dmar_atsr_units
, list
) {
4011 tmp
= (struct acpi_dmar_atsr
*)atsru
->hdr
;
4012 if (atsr
->segment
!= tmp
->segment
)
4014 if (atsr
->header
.length
!= tmp
->header
.length
)
4016 if (memcmp(atsr
, tmp
, atsr
->header
.length
) == 0)
4023 int dmar_parse_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
4025 struct acpi_dmar_atsr
*atsr
;
4026 struct dmar_atsr_unit
*atsru
;
4028 if (system_state
!= SYSTEM_BOOTING
&& !intel_iommu_enabled
)
4031 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
4032 atsru
= dmar_find_atsr(atsr
);
4036 atsru
= kzalloc(sizeof(*atsru
) + hdr
->length
, GFP_KERNEL
);
4041 * If memory is allocated from slab by ACPI _DSM method, we need to
4042 * copy the memory content because the memory buffer will be freed
4045 atsru
->hdr
= (void *)(atsru
+ 1);
4046 memcpy(atsru
->hdr
, hdr
, hdr
->length
);
4047 atsru
->include_all
= atsr
->flags
& 0x1;
4048 if (!atsru
->include_all
) {
4049 atsru
->devices
= dmar_alloc_dev_scope((void *)(atsr
+ 1),
4050 (void *)atsr
+ atsr
->header
.length
,
4051 &atsru
->devices_cnt
);
4052 if (atsru
->devices_cnt
&& atsru
->devices
== NULL
) {
4058 list_add_rcu(&atsru
->list
, &dmar_atsr_units
);
4063 static void intel_iommu_free_atsr(struct dmar_atsr_unit
*atsru
)
4065 dmar_free_dev_scope(&atsru
->devices
, &atsru
->devices_cnt
);
4069 int dmar_release_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
4071 struct acpi_dmar_atsr
*atsr
;
4072 struct dmar_atsr_unit
*atsru
;
4074 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
4075 atsru
= dmar_find_atsr(atsr
);
4077 list_del_rcu(&atsru
->list
);
4079 intel_iommu_free_atsr(atsru
);
4085 int dmar_check_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
4089 struct acpi_dmar_atsr
*atsr
;
4090 struct dmar_atsr_unit
*atsru
;
4092 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
4093 atsru
= dmar_find_atsr(atsr
);
4097 if (!atsru
->include_all
&& atsru
->devices
&& atsru
->devices_cnt
)
4098 for_each_active_dev_scope(atsru
->devices
, atsru
->devices_cnt
,
4105 static int intel_iommu_add(struct dmar_drhd_unit
*dmaru
)
4108 struct intel_iommu
*iommu
= dmaru
->iommu
;
4110 if (g_iommus
[iommu
->seq_id
])
4113 if (hw_pass_through
&& !ecap_pass_through(iommu
->ecap
)) {
4114 pr_warn("%s: Doesn't support hardware pass through.\n",
4118 if (!ecap_sc_support(iommu
->ecap
) &&
4119 domain_update_iommu_snooping(iommu
)) {
4120 pr_warn("%s: Doesn't support snooping.\n",
4124 sp
= domain_update_iommu_superpage(iommu
) - 1;
4125 if (sp
>= 0 && !(cap_super_page_val(iommu
->cap
) & (1 << sp
))) {
4126 pr_warn("%s: Doesn't support large page.\n",
4132 * Disable translation if already enabled prior to OS handover.
4134 if (iommu
->gcmd
& DMA_GCMD_TE
)
4135 iommu_disable_translation(iommu
);
4137 g_iommus
[iommu
->seq_id
] = iommu
;
4138 ret
= iommu_init_domains(iommu
);
4140 ret
= iommu_alloc_root_entry(iommu
);
4144 if (dmaru
->ignored
) {
4146 * we always have to disable PMRs or DMA may fail on this device
4149 iommu_disable_protect_mem_regions(iommu
);
4153 intel_iommu_init_qi(iommu
);
4154 iommu_flush_write_buffer(iommu
);
4155 ret
= dmar_set_interrupt(iommu
);
4159 iommu_set_root_entry(iommu
);
4160 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
4161 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
4162 iommu_enable_translation(iommu
);
4165 ret
= iommu_attach_domain(si_domain
, iommu
);
4166 if (ret
< 0 || si_domain
->id
!= ret
)
4168 domain_attach_iommu(si_domain
, iommu
);
4171 iommu_disable_protect_mem_regions(iommu
);
4175 disable_dmar_iommu(iommu
);
4177 free_dmar_iommu(iommu
);
4181 int dmar_iommu_hotplug(struct dmar_drhd_unit
*dmaru
, bool insert
)
4184 struct intel_iommu
*iommu
= dmaru
->iommu
;
4186 if (!intel_iommu_enabled
)
4192 ret
= intel_iommu_add(dmaru
);
4194 disable_dmar_iommu(iommu
);
4195 free_dmar_iommu(iommu
);
4201 static void intel_iommu_free_dmars(void)
4203 struct dmar_rmrr_unit
*rmrru
, *rmrr_n
;
4204 struct dmar_atsr_unit
*atsru
, *atsr_n
;
4206 list_for_each_entry_safe(rmrru
, rmrr_n
, &dmar_rmrr_units
, list
) {
4207 list_del(&rmrru
->list
);
4208 dmar_free_dev_scope(&rmrru
->devices
, &rmrru
->devices_cnt
);
4212 list_for_each_entry_safe(atsru
, atsr_n
, &dmar_atsr_units
, list
) {
4213 list_del(&atsru
->list
);
4214 intel_iommu_free_atsr(atsru
);
4218 int dmar_find_matched_atsr_unit(struct pci_dev
*dev
)
4221 struct pci_bus
*bus
;
4222 struct pci_dev
*bridge
= NULL
;
4224 struct acpi_dmar_atsr
*atsr
;
4225 struct dmar_atsr_unit
*atsru
;
4227 dev
= pci_physfn(dev
);
4228 for (bus
= dev
->bus
; bus
; bus
= bus
->parent
) {
4230 if (!bridge
|| !pci_is_pcie(bridge
) ||
4231 pci_pcie_type(bridge
) == PCI_EXP_TYPE_PCI_BRIDGE
)
4233 if (pci_pcie_type(bridge
) == PCI_EXP_TYPE_ROOT_PORT
)
4240 list_for_each_entry_rcu(atsru
, &dmar_atsr_units
, list
) {
4241 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
4242 if (atsr
->segment
!= pci_domain_nr(dev
->bus
))
4245 for_each_dev_scope(atsru
->devices
, atsru
->devices_cnt
, i
, tmp
)
4246 if (tmp
== &bridge
->dev
)
4249 if (atsru
->include_all
)
4259 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info
*info
)
4262 struct dmar_rmrr_unit
*rmrru
;
4263 struct dmar_atsr_unit
*atsru
;
4264 struct acpi_dmar_atsr
*atsr
;
4265 struct acpi_dmar_reserved_memory
*rmrr
;
4267 if (!intel_iommu_enabled
&& system_state
!= SYSTEM_BOOTING
)
4270 list_for_each_entry(rmrru
, &dmar_rmrr_units
, list
) {
4271 rmrr
= container_of(rmrru
->hdr
,
4272 struct acpi_dmar_reserved_memory
, header
);
4273 if (info
->event
== BUS_NOTIFY_ADD_DEVICE
) {
4274 ret
= dmar_insert_dev_scope(info
, (void *)(rmrr
+ 1),
4275 ((void *)rmrr
) + rmrr
->header
.length
,
4276 rmrr
->segment
, rmrru
->devices
,
4277 rmrru
->devices_cnt
);
4280 } else if (info
->event
== BUS_NOTIFY_DEL_DEVICE
) {
4281 dmar_remove_dev_scope(info
, rmrr
->segment
,
4282 rmrru
->devices
, rmrru
->devices_cnt
);
4286 list_for_each_entry(atsru
, &dmar_atsr_units
, list
) {
4287 if (atsru
->include_all
)
4290 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
4291 if (info
->event
== BUS_NOTIFY_ADD_DEVICE
) {
4292 ret
= dmar_insert_dev_scope(info
, (void *)(atsr
+ 1),
4293 (void *)atsr
+ atsr
->header
.length
,
4294 atsr
->segment
, atsru
->devices
,
4295 atsru
->devices_cnt
);
4300 } else if (info
->event
== BUS_NOTIFY_DEL_DEVICE
) {
4301 if (dmar_remove_dev_scope(info
, atsr
->segment
,
4302 atsru
->devices
, atsru
->devices_cnt
))
4311 * Here we only respond to action of unbound device from driver.
4313 * Added device is not attached to its DMAR domain here yet. That will happen
4314 * when mapping the device to iova.
4316 static int device_notifier(struct notifier_block
*nb
,
4317 unsigned long action
, void *data
)
4319 struct device
*dev
= data
;
4320 struct dmar_domain
*domain
;
4322 if (iommu_dummy(dev
))
4325 if (action
!= BUS_NOTIFY_REMOVED_DEVICE
)
4328 domain
= find_domain(dev
);
4332 down_read(&dmar_global_lock
);
4333 domain_remove_one_dev_info(domain
, dev
);
4334 if (!domain_type_is_vm_or_si(domain
) && list_empty(&domain
->devices
))
4335 domain_exit(domain
);
4336 up_read(&dmar_global_lock
);
4341 static struct notifier_block device_nb
= {
4342 .notifier_call
= device_notifier
,
4345 static int intel_iommu_memory_notifier(struct notifier_block
*nb
,
4346 unsigned long val
, void *v
)
4348 struct memory_notify
*mhp
= v
;
4349 unsigned long long start
, end
;
4350 unsigned long start_vpfn
, last_vpfn
;
4353 case MEM_GOING_ONLINE
:
4354 start
= mhp
->start_pfn
<< PAGE_SHIFT
;
4355 end
= ((mhp
->start_pfn
+ mhp
->nr_pages
) << PAGE_SHIFT
) - 1;
4356 if (iommu_domain_identity_map(si_domain
, start
, end
)) {
4357 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4364 case MEM_CANCEL_ONLINE
:
4365 start_vpfn
= mm_to_dma_pfn(mhp
->start_pfn
);
4366 last_vpfn
= mm_to_dma_pfn(mhp
->start_pfn
+ mhp
->nr_pages
- 1);
4367 while (start_vpfn
<= last_vpfn
) {
4369 struct dmar_drhd_unit
*drhd
;
4370 struct intel_iommu
*iommu
;
4371 struct page
*freelist
;
4373 iova
= find_iova(&si_domain
->iovad
, start_vpfn
);
4375 pr_debug("Failed get IOVA for PFN %lx\n",
4380 iova
= split_and_remove_iova(&si_domain
->iovad
, iova
,
4381 start_vpfn
, last_vpfn
);
4383 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4384 start_vpfn
, last_vpfn
);
4388 freelist
= domain_unmap(si_domain
, iova
->pfn_lo
,
4392 for_each_active_iommu(iommu
, drhd
)
4393 iommu_flush_iotlb_psi(iommu
, si_domain
->id
,
4394 iova
->pfn_lo
, iova_size(iova
),
4397 dma_free_pagelist(freelist
);
4399 start_vpfn
= iova
->pfn_hi
+ 1;
4400 free_iova_mem(iova
);
4408 static struct notifier_block intel_iommu_memory_nb
= {
4409 .notifier_call
= intel_iommu_memory_notifier
,
4414 static ssize_t
intel_iommu_show_version(struct device
*dev
,
4415 struct device_attribute
*attr
,
4418 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4419 u32 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
4420 return sprintf(buf
, "%d:%d\n",
4421 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
));
4423 static DEVICE_ATTR(version
, S_IRUGO
, intel_iommu_show_version
, NULL
);
4425 static ssize_t
intel_iommu_show_address(struct device
*dev
,
4426 struct device_attribute
*attr
,
4429 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4430 return sprintf(buf
, "%llx\n", iommu
->reg_phys
);
4432 static DEVICE_ATTR(address
, S_IRUGO
, intel_iommu_show_address
, NULL
);
4434 static ssize_t
intel_iommu_show_cap(struct device
*dev
,
4435 struct device_attribute
*attr
,
4438 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4439 return sprintf(buf
, "%llx\n", iommu
->cap
);
4441 static DEVICE_ATTR(cap
, S_IRUGO
, intel_iommu_show_cap
, NULL
);
4443 static ssize_t
intel_iommu_show_ecap(struct device
*dev
,
4444 struct device_attribute
*attr
,
4447 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4448 return sprintf(buf
, "%llx\n", iommu
->ecap
);
4450 static DEVICE_ATTR(ecap
, S_IRUGO
, intel_iommu_show_ecap
, NULL
);
4452 static struct attribute
*intel_iommu_attrs
[] = {
4453 &dev_attr_version
.attr
,
4454 &dev_attr_address
.attr
,
4456 &dev_attr_ecap
.attr
,
4460 static struct attribute_group intel_iommu_group
= {
4461 .name
= "intel-iommu",
4462 .attrs
= intel_iommu_attrs
,
4465 const struct attribute_group
*intel_iommu_groups
[] = {
4470 int __init
intel_iommu_init(void)
4473 struct dmar_drhd_unit
*drhd
;
4474 struct intel_iommu
*iommu
;
4476 /* VT-d is required for a TXT/tboot launch, so enforce that */
4477 force_on
= tboot_force_iommu();
4479 if (iommu_init_mempool()) {
4481 panic("tboot: Failed to initialize iommu memory\n");
4485 down_write(&dmar_global_lock
);
4486 if (dmar_table_init()) {
4488 panic("tboot: Failed to initialize DMAR table\n");
4492 if (dmar_dev_scope_init() < 0) {
4494 panic("tboot: Failed to initialize DMAR device scope\n");
4498 if (no_iommu
|| dmar_disabled
)
4501 if (list_empty(&dmar_rmrr_units
))
4502 pr_info("No RMRR found\n");
4504 if (list_empty(&dmar_atsr_units
))
4505 pr_info("No ATSR found\n");
4507 if (dmar_init_reserved_ranges()) {
4509 panic("tboot: Failed to reserve iommu ranges\n");
4510 goto out_free_reserved_range
;
4513 init_no_remapping_devices();
4518 panic("tboot: Failed to initialize DMARs\n");
4519 pr_err("Initialization failed\n");
4520 goto out_free_reserved_range
;
4522 up_write(&dmar_global_lock
);
4523 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4525 init_timer(&unmap_timer
);
4526 #ifdef CONFIG_SWIOTLB
4529 dma_ops
= &intel_dma_ops
;
4531 init_iommu_pm_ops();
4533 for_each_active_iommu(iommu
, drhd
)
4534 iommu
->iommu_dev
= iommu_device_create(NULL
, iommu
,
4538 bus_set_iommu(&pci_bus_type
, &intel_iommu_ops
);
4539 bus_register_notifier(&pci_bus_type
, &device_nb
);
4540 if (si_domain
&& !hw_pass_through
)
4541 register_memory_notifier(&intel_iommu_memory_nb
);
4543 intel_iommu_enabled
= 1;
4547 out_free_reserved_range
:
4548 put_iova_domain(&reserved_iova_list
);
4550 intel_iommu_free_dmars();
4551 up_write(&dmar_global_lock
);
4552 iommu_exit_mempool();
4556 static int iommu_detach_dev_cb(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
4558 struct intel_iommu
*iommu
= opaque
;
4560 iommu_detach_dev(iommu
, PCI_BUS_NUM(alias
), alias
& 0xff);
4565 * NB - intel-iommu lacks any sort of reference counting for the users of
4566 * dependent devices. If multiple endpoints have intersecting dependent
4567 * devices, unbinding the driver from any one of them will possibly leave
4568 * the others unable to operate.
4570 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
4573 if (!iommu
|| !dev
|| !dev_is_pci(dev
))
4576 pci_for_each_dma_alias(to_pci_dev(dev
), &iommu_detach_dev_cb
, iommu
);
4579 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
4582 struct device_domain_info
*info
, *tmp
;
4583 struct intel_iommu
*iommu
;
4584 unsigned long flags
;
4588 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4592 spin_lock_irqsave(&device_domain_lock
, flags
);
4593 list_for_each_entry_safe(info
, tmp
, &domain
->devices
, link
) {
4594 if (info
->iommu
== iommu
&& info
->bus
== bus
&&
4595 info
->devfn
== devfn
) {
4596 unlink_domain_info(info
);
4597 spin_unlock_irqrestore(&device_domain_lock
, flags
);
4599 iommu_disable_dev_iotlb(info
);
4600 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
4601 iommu_detach_dependent_devices(iommu
, dev
);
4602 free_devinfo_mem(info
);
4604 spin_lock_irqsave(&device_domain_lock
, flags
);
4612 /* if there is no other devices under the same iommu
4613 * owned by this domain, clear this iommu in iommu_bmp
4614 * update iommu count and coherency
4616 if (info
->iommu
== iommu
)
4620 spin_unlock_irqrestore(&device_domain_lock
, flags
);
4623 domain_detach_iommu(domain
, iommu
);
4624 if (!domain_type_is_vm_or_si(domain
))
4625 iommu_detach_domain(domain
, iommu
);
4629 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
)
4633 init_iova_domain(&domain
->iovad
, VTD_PAGE_SIZE
, IOVA_START_PFN
,
4635 domain_reserve_special_ranges(domain
);
4637 /* calculate AGAW */
4638 domain
->gaw
= guest_width
;
4639 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
4640 domain
->agaw
= width_to_agaw(adjust_width
);
4642 domain
->iommu_coherency
= 0;
4643 domain
->iommu_snooping
= 0;
4644 domain
->iommu_superpage
= 0;
4645 domain
->max_addr
= 0;
4647 /* always allocate the top pgd */
4648 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
4651 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
4655 static struct iommu_domain
*intel_iommu_domain_alloc(unsigned type
)
4657 struct dmar_domain
*dmar_domain
;
4658 struct iommu_domain
*domain
;
4660 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
4663 dmar_domain
= alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE
);
4665 pr_err("Can't allocate dmar_domain\n");
4668 if (md_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
4669 pr_err("Domain initialization failed\n");
4670 domain_exit(dmar_domain
);
4673 domain_update_iommu_cap(dmar_domain
);
4675 domain
= &dmar_domain
->domain
;
4676 domain
->geometry
.aperture_start
= 0;
4677 domain
->geometry
.aperture_end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
);
4678 domain
->geometry
.force_aperture
= true;
4683 static void intel_iommu_domain_free(struct iommu_domain
*domain
)
4685 domain_exit(to_dmar_domain(domain
));
4688 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
4691 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
4692 struct intel_iommu
*iommu
;
4696 if (device_is_rmrr_locked(dev
)) {
4697 dev_warn(dev
, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4701 /* normally dev is not mapped */
4702 if (unlikely(domain_context_mapped(dev
))) {
4703 struct dmar_domain
*old_domain
;
4705 old_domain
= find_domain(dev
);
4707 if (domain_type_is_vm_or_si(dmar_domain
))
4708 domain_remove_one_dev_info(old_domain
, dev
);
4710 domain_remove_dev_info(old_domain
);
4712 if (!domain_type_is_vm_or_si(old_domain
) &&
4713 list_empty(&old_domain
->devices
))
4714 domain_exit(old_domain
);
4718 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4722 /* check if this iommu agaw is sufficient for max mapped address */
4723 addr_width
= agaw_to_width(iommu
->agaw
);
4724 if (addr_width
> cap_mgaw(iommu
->cap
))
4725 addr_width
= cap_mgaw(iommu
->cap
);
4727 if (dmar_domain
->max_addr
> (1LL << addr_width
)) {
4728 pr_err("%s: iommu width (%d) is not "
4729 "sufficient for the mapped address (%llx)\n",
4730 __func__
, addr_width
, dmar_domain
->max_addr
);
4733 dmar_domain
->gaw
= addr_width
;
4736 * Knock out extra levels of page tables if necessary
4738 while (iommu
->agaw
< dmar_domain
->agaw
) {
4739 struct dma_pte
*pte
;
4741 pte
= dmar_domain
->pgd
;
4742 if (dma_pte_present(pte
)) {
4743 dmar_domain
->pgd
= (struct dma_pte
*)
4744 phys_to_virt(dma_pte_addr(pte
));
4745 free_pgtable_page(pte
);
4747 dmar_domain
->agaw
--;
4750 return domain_add_dev_info(dmar_domain
, dev
, CONTEXT_TT_MULTI_LEVEL
);
4753 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
4756 domain_remove_one_dev_info(to_dmar_domain(domain
), dev
);
4759 static int intel_iommu_map(struct iommu_domain
*domain
,
4760 unsigned long iova
, phys_addr_t hpa
,
4761 size_t size
, int iommu_prot
)
4763 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
4768 if (iommu_prot
& IOMMU_READ
)
4769 prot
|= DMA_PTE_READ
;
4770 if (iommu_prot
& IOMMU_WRITE
)
4771 prot
|= DMA_PTE_WRITE
;
4772 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
4773 prot
|= DMA_PTE_SNP
;
4775 max_addr
= iova
+ size
;
4776 if (dmar_domain
->max_addr
< max_addr
) {
4779 /* check if minimum agaw is sufficient for mapped address */
4780 end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
) + 1;
4781 if (end
< max_addr
) {
4782 pr_err("%s: iommu width (%d) is not "
4783 "sufficient for the mapped address (%llx)\n",
4784 __func__
, dmar_domain
->gaw
, max_addr
);
4787 dmar_domain
->max_addr
= max_addr
;
4789 /* Round up size to next multiple of PAGE_SIZE, if it and
4790 the low bits of hpa would take us onto the next page */
4791 size
= aligned_nrpages(hpa
, size
);
4792 ret
= domain_pfn_mapping(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
4793 hpa
>> VTD_PAGE_SHIFT
, size
, prot
);
4797 static size_t intel_iommu_unmap(struct iommu_domain
*domain
,
4798 unsigned long iova
, size_t size
)
4800 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
4801 struct page
*freelist
= NULL
;
4802 struct intel_iommu
*iommu
;
4803 unsigned long start_pfn
, last_pfn
;
4804 unsigned int npages
;
4805 int iommu_id
, num
, ndomains
, level
= 0;
4807 /* Cope with horrid API which requires us to unmap more than the
4808 size argument if it happens to be a large-page mapping. */
4809 if (!pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, &level
))
4812 if (size
< VTD_PAGE_SIZE
<< level_to_offset_bits(level
))
4813 size
= VTD_PAGE_SIZE
<< level_to_offset_bits(level
);
4815 start_pfn
= iova
>> VTD_PAGE_SHIFT
;
4816 last_pfn
= (iova
+ size
- 1) >> VTD_PAGE_SHIFT
;
4818 freelist
= domain_unmap(dmar_domain
, start_pfn
, last_pfn
);
4820 npages
= last_pfn
- start_pfn
+ 1;
4822 for_each_set_bit(iommu_id
, dmar_domain
->iommu_bmp
, g_num_of_iommus
) {
4823 iommu
= g_iommus
[iommu_id
];
4826 * find bit position of dmar_domain
4828 ndomains
= cap_ndoms(iommu
->cap
);
4829 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
4830 if (iommu
->domains
[num
] == dmar_domain
)
4831 iommu_flush_iotlb_psi(iommu
, num
, start_pfn
,
4832 npages
, !freelist
, 0);
4837 dma_free_pagelist(freelist
);
4839 if (dmar_domain
->max_addr
== iova
+ size
)
4840 dmar_domain
->max_addr
= iova
;
4845 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
4848 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
4849 struct dma_pte
*pte
;
4853 pte
= pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, &level
);
4855 phys
= dma_pte_addr(pte
);
4860 static bool intel_iommu_capable(enum iommu_cap cap
)
4862 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
4863 return domain_update_iommu_snooping(NULL
) == 1;
4864 if (cap
== IOMMU_CAP_INTR_REMAP
)
4865 return irq_remapping_enabled
== 1;
4870 static int intel_iommu_add_device(struct device
*dev
)
4872 struct intel_iommu
*iommu
;
4873 struct iommu_group
*group
;
4876 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4880 iommu_device_link(iommu
->iommu_dev
, dev
);
4882 group
= iommu_group_get_for_dev(dev
);
4885 return PTR_ERR(group
);
4887 iommu_group_put(group
);
4891 static void intel_iommu_remove_device(struct device
*dev
)
4893 struct intel_iommu
*iommu
;
4896 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4900 iommu_group_remove_device(dev
);
4902 iommu_device_unlink(iommu
->iommu_dev
, dev
);
4905 static const struct iommu_ops intel_iommu_ops
= {
4906 .capable
= intel_iommu_capable
,
4907 .domain_alloc
= intel_iommu_domain_alloc
,
4908 .domain_free
= intel_iommu_domain_free
,
4909 .attach_dev
= intel_iommu_attach_device
,
4910 .detach_dev
= intel_iommu_detach_device
,
4911 .map
= intel_iommu_map
,
4912 .unmap
= intel_iommu_unmap
,
4913 .map_sg
= default_iommu_map_sg
,
4914 .iova_to_phys
= intel_iommu_iova_to_phys
,
4915 .add_device
= intel_iommu_add_device
,
4916 .remove_device
= intel_iommu_remove_device
,
4917 .pgsize_bitmap
= INTEL_IOMMU_PGSIZES
,
4920 static void quirk_iommu_g4x_gfx(struct pci_dev
*dev
)
4922 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4923 pr_info("Disabling IOMMU for graphics on this chipset\n");
4927 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_g4x_gfx
);
4928 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_g4x_gfx
);
4929 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_g4x_gfx
);
4930 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_g4x_gfx
);
4931 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_g4x_gfx
);
4932 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_g4x_gfx
);
4933 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_g4x_gfx
);
4935 static void quirk_iommu_rwbf(struct pci_dev
*dev
)
4938 * Mobile 4 Series Chipset neglects to set RWBF capability,
4939 * but needs it. Same seems to hold for the desktop versions.
4941 pr_info("Forcing write-buffer flush capability\n");
4945 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);
4946 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_rwbf
);
4947 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_rwbf
);
4948 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_rwbf
);
4949 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_rwbf
);
4950 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_rwbf
);
4951 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_rwbf
);
4954 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4955 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4956 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4957 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4958 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4959 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4960 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4961 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4963 static void quirk_calpella_no_shadow_gtt(struct pci_dev
*dev
)
4967 if (pci_read_config_word(dev
, GGC
, &ggc
))
4970 if (!(ggc
& GGC_MEMORY_VT_ENABLED
)) {
4971 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4973 } else if (dmar_map_gfx
) {
4974 /* we have to ensure the gfx device is idle before we flush */
4975 pr_info("Disabling batched IOTLB flush on Ironlake\n");
4976 intel_iommu_strict
= 1;
4979 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0040, quirk_calpella_no_shadow_gtt
);
4980 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0044, quirk_calpella_no_shadow_gtt
);
4981 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0062, quirk_calpella_no_shadow_gtt
);
4982 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x006a, quirk_calpella_no_shadow_gtt
);
4984 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4985 ISOCH DMAR unit for the Azalia sound device, but not give it any
4986 TLB entries, which causes it to deadlock. Check for that. We do
4987 this in a function called from init_dmars(), instead of in a PCI
4988 quirk, because we don't want to print the obnoxious "BIOS broken"
4989 message if VT-d is actually disabled.
4991 static void __init
check_tylersburg_isoch(void)
4993 struct pci_dev
*pdev
;
4994 uint32_t vtisochctrl
;
4996 /* If there's no Azalia in the system anyway, forget it. */
4997 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x3a3e, NULL
);
5002 /* System Management Registers. Might be hidden, in which case
5003 we can't do the sanity check. But that's OK, because the
5004 known-broken BIOSes _don't_ actually hide it, so far. */
5005 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x342e, NULL
);
5009 if (pci_read_config_dword(pdev
, 0x188, &vtisochctrl
)) {
5016 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5017 if (vtisochctrl
& 1)
5020 /* Drop all bits other than the number of TLB entries */
5021 vtisochctrl
&= 0x1c;
5023 /* If we have the recommended number of TLB entries (16), fine. */
5024 if (vtisochctrl
== 0x10)
5027 /* Zero TLB entries? You get to ride the short bus to school. */
5029 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5030 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5031 dmi_get_system_info(DMI_BIOS_VENDOR
),
5032 dmi_get_system_info(DMI_BIOS_VERSION
),
5033 dmi_get_system_info(DMI_PRODUCT_VERSION
));
5034 iommu_identity_mapping
|= IDENTMAP_AZALIA
;
5038 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",