2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
20 #include <linux/init.h>
21 #include <linux/bitmap.h>
22 #include <linux/debugfs.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/irq.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/pci.h>
29 #include <linux/dmar.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/mempool.h>
32 #include <linux/memory.h>
33 #include <linux/timer.h>
34 #include <linux/iova.h>
35 #include <linux/iommu.h>
36 #include <linux/intel-iommu.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/pci-ats.h>
41 #include <linux/memblock.h>
42 #include <linux/dma-contiguous.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/cacheflush.h>
45 #include <asm/iommu.h>
47 #include "irq_remapping.h"
49 #define ROOT_SIZE VTD_PAGE_SIZE
50 #define CONTEXT_SIZE VTD_PAGE_SIZE
52 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
54 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
56 #define IOAPIC_RANGE_START (0xfee00000)
57 #define IOAPIC_RANGE_END (0xfeefffff)
58 #define IOVA_START_ADDR (0x1000)
60 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62 #define MAX_AGAW_WIDTH 64
63 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
65 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
68 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
74 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
75 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
76 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
78 /* page table handling */
79 #define LEVEL_STRIDE (9)
80 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
83 * This bitmap is used to advertise the page sizes our hardware support
84 * to the IOMMU core, which will then use this information to split
85 * physically contiguous memory regions it is mapping into page sizes
88 * Traditionally the IOMMU core just handed us the mappings directly,
89 * after making sure the size is an order of a 4KiB page and that the
90 * mapping has natural alignment.
92 * To retain this behavior, we currently advertise that we support
93 * all page sizes that are an order of 4KiB.
95 * If at some point we'd like to utilize the IOMMU core's new behavior,
96 * we could change this to advertise the real page sizes we support.
98 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
100 static inline int agaw_to_level(int agaw
)
105 static inline int agaw_to_width(int agaw
)
107 return min_t(int, 30 + agaw
* LEVEL_STRIDE
, MAX_AGAW_WIDTH
);
110 static inline int width_to_agaw(int width
)
112 return DIV_ROUND_UP(width
- 30, LEVEL_STRIDE
);
115 static inline unsigned int level_to_offset_bits(int level
)
117 return (level
- 1) * LEVEL_STRIDE
;
120 static inline int pfn_level_offset(unsigned long pfn
, int level
)
122 return (pfn
>> level_to_offset_bits(level
)) & LEVEL_MASK
;
125 static inline unsigned long level_mask(int level
)
127 return -1UL << level_to_offset_bits(level
);
130 static inline unsigned long level_size(int level
)
132 return 1UL << level_to_offset_bits(level
);
135 static inline unsigned long align_to_level(unsigned long pfn
, int level
)
137 return (pfn
+ level_size(level
) - 1) & level_mask(level
);
140 static inline unsigned long lvl_to_nr_pages(unsigned int lvl
)
142 return 1 << min_t(int, (lvl
- 1) * LEVEL_STRIDE
, MAX_AGAW_PFN_WIDTH
);
145 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
146 are never going to work. */
147 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn
)
149 return dma_pfn
>> (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
152 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn
)
154 return mm_pfn
<< (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
156 static inline unsigned long page_to_dma_pfn(struct page
*pg
)
158 return mm_to_dma_pfn(page_to_pfn(pg
));
160 static inline unsigned long virt_to_dma_pfn(void *p
)
162 return page_to_dma_pfn(virt_to_page(p
));
165 /* global iommu list, set NULL for ignored DMAR units */
166 static struct intel_iommu
**g_iommus
;
168 static void __init
check_tylersburg_isoch(void);
169 static int rwbf_quirk
;
172 * set to 1 to panic kernel if can't successfully enable VT-d
173 * (used when kernel is launched w/ TXT)
175 static int force_on
= 0;
180 * 12-63: Context Ptr (12 - (haw-1))
187 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
188 static inline bool root_present(struct root_entry
*root
)
190 return (root
->val
& 1);
192 static inline void set_root_present(struct root_entry
*root
)
196 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
198 root
->val
&= ~VTD_PAGE_MASK
;
199 root
->val
|= value
& VTD_PAGE_MASK
;
202 static inline struct context_entry
*
203 get_context_addr_from_root(struct root_entry
*root
)
205 return (struct context_entry
*)
206 (root_present(root
)?phys_to_virt(
207 root
->val
& VTD_PAGE_MASK
) :
214 * 1: fault processing disable
215 * 2-3: translation type
216 * 12-63: address space root
222 struct context_entry
{
227 static inline bool context_present(struct context_entry
*context
)
229 return (context
->lo
& 1);
231 static inline void context_set_present(struct context_entry
*context
)
236 static inline void context_set_fault_enable(struct context_entry
*context
)
238 context
->lo
&= (((u64
)-1) << 2) | 1;
241 static inline void context_set_translation_type(struct context_entry
*context
,
244 context
->lo
&= (((u64
)-1) << 4) | 3;
245 context
->lo
|= (value
& 3) << 2;
248 static inline void context_set_address_root(struct context_entry
*context
,
251 context
->lo
&= ~VTD_PAGE_MASK
;
252 context
->lo
|= value
& VTD_PAGE_MASK
;
255 static inline void context_set_address_width(struct context_entry
*context
,
258 context
->hi
|= value
& 7;
261 static inline void context_set_domain_id(struct context_entry
*context
,
264 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
267 static inline void context_clear_entry(struct context_entry
*context
)
280 * 12-63: Host physcial address
286 static inline void dma_clear_pte(struct dma_pte
*pte
)
291 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
294 return pte
->val
& VTD_PAGE_MASK
;
296 /* Must have a full atomic 64-bit read */
297 return __cmpxchg64(&pte
->val
, 0ULL, 0ULL) & VTD_PAGE_MASK
;
301 static inline bool dma_pte_present(struct dma_pte
*pte
)
303 return (pte
->val
& 3) != 0;
306 static inline bool dma_pte_superpage(struct dma_pte
*pte
)
308 return (pte
->val
& DMA_PTE_LARGE_PAGE
);
311 static inline int first_pte_in_page(struct dma_pte
*pte
)
313 return !((unsigned long)pte
& ~VTD_PAGE_MASK
);
317 * This domain is a statically identity mapping domain.
318 * 1. This domain creats a static 1:1 mapping to all usable memory.
319 * 2. It maps to each iommu if successful.
320 * 3. Each iommu mapps to this domain if successful.
322 static struct dmar_domain
*si_domain
;
323 static int hw_pass_through
= 1;
325 /* domain represents a virtual machine, more than one devices
326 * across iommus may be owned in one domain, e.g. kvm guest.
328 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
330 /* si_domain contains mulitple devices */
331 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
334 int id
; /* domain id */
335 int nid
; /* node id */
336 DECLARE_BITMAP(iommu_bmp
, DMAR_UNITS_SUPPORTED
);
337 /* bitmap of iommus this domain uses*/
339 struct list_head devices
; /* all devices' list */
340 struct iova_domain iovad
; /* iova's that belong to this domain */
342 struct dma_pte
*pgd
; /* virtual address */
343 int gaw
; /* max guest address width */
345 /* adjusted guest address width, 0 is level 2 30-bit */
348 int flags
; /* flags to find out type of domain */
350 int iommu_coherency
;/* indicate coherency of iommu access */
351 int iommu_snooping
; /* indicate snooping control feature*/
352 int iommu_count
; /* reference count of iommu */
353 int iommu_superpage
;/* Level of superpages supported:
354 0 == 4KiB (no superpages), 1 == 2MiB,
355 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
356 spinlock_t iommu_lock
; /* protect iommu set in domain */
357 u64 max_addr
; /* maximum mapped address */
360 /* PCI domain-device relationship */
361 struct device_domain_info
{
362 struct list_head link
; /* link to domain siblings */
363 struct list_head global
; /* link to global list */
364 u8 bus
; /* PCI bus number */
365 u8 devfn
; /* PCI devfn number */
366 struct device
*dev
; /* it's NULL for PCIe-to-PCI bridge */
367 struct intel_iommu
*iommu
; /* IOMMU used by this device */
368 struct dmar_domain
*domain
; /* pointer to domain */
371 struct dmar_rmrr_unit
{
372 struct list_head list
; /* list of rmrr units */
373 struct acpi_dmar_header
*hdr
; /* ACPI header */
374 u64 base_address
; /* reserved base address*/
375 u64 end_address
; /* reserved end address */
376 struct dmar_dev_scope
*devices
; /* target devices */
377 int devices_cnt
; /* target device count */
380 struct dmar_atsr_unit
{
381 struct list_head list
; /* list of ATSR units */
382 struct acpi_dmar_header
*hdr
; /* ACPI header */
383 struct dmar_dev_scope
*devices
; /* target devices */
384 int devices_cnt
; /* target device count */
385 u8 include_all
:1; /* include all ports */
388 static LIST_HEAD(dmar_atsr_units
);
389 static LIST_HEAD(dmar_rmrr_units
);
391 #define for_each_rmrr_units(rmrr) \
392 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
394 static void flush_unmaps_timeout(unsigned long data
);
396 static DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
398 #define HIGH_WATER_MARK 250
399 struct deferred_flush_tables
{
401 struct iova
*iova
[HIGH_WATER_MARK
];
402 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
403 struct page
*freelist
[HIGH_WATER_MARK
];
406 static struct deferred_flush_tables
*deferred_flush
;
408 /* bitmap for indexing intel_iommus */
409 static int g_num_of_iommus
;
411 static DEFINE_SPINLOCK(async_umap_flush_lock
);
412 static LIST_HEAD(unmaps_to_do
);
415 static long list_size
;
417 static void domain_exit(struct dmar_domain
*domain
);
418 static void domain_remove_dev_info(struct dmar_domain
*domain
);
419 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
421 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
423 static int domain_detach_iommu(struct dmar_domain
*domain
,
424 struct intel_iommu
*iommu
);
426 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
427 int dmar_disabled
= 0;
429 int dmar_disabled
= 1;
430 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
432 int intel_iommu_enabled
= 0;
433 EXPORT_SYMBOL_GPL(intel_iommu_enabled
);
435 static int dmar_map_gfx
= 1;
436 static int dmar_forcedac
;
437 static int intel_iommu_strict
;
438 static int intel_iommu_superpage
= 1;
440 int intel_iommu_gfx_mapped
;
441 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped
);
443 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
444 static DEFINE_SPINLOCK(device_domain_lock
);
445 static LIST_HEAD(device_domain_list
);
447 static const struct iommu_ops intel_iommu_ops
;
449 static int __init
intel_iommu_setup(char *str
)
454 if (!strncmp(str
, "on", 2)) {
456 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
457 } else if (!strncmp(str
, "off", 3)) {
459 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
460 } else if (!strncmp(str
, "igfx_off", 8)) {
463 "Intel-IOMMU: disable GFX device mapping\n");
464 } else if (!strncmp(str
, "forcedac", 8)) {
466 "Intel-IOMMU: Forcing DAC for PCI devices\n");
468 } else if (!strncmp(str
, "strict", 6)) {
470 "Intel-IOMMU: disable batched IOTLB flush\n");
471 intel_iommu_strict
= 1;
472 } else if (!strncmp(str
, "sp_off", 6)) {
474 "Intel-IOMMU: disable supported super page\n");
475 intel_iommu_superpage
= 0;
478 str
+= strcspn(str
, ",");
484 __setup("intel_iommu=", intel_iommu_setup
);
486 static struct kmem_cache
*iommu_domain_cache
;
487 static struct kmem_cache
*iommu_devinfo_cache
;
488 static struct kmem_cache
*iommu_iova_cache
;
490 static inline void *alloc_pgtable_page(int node
)
495 page
= alloc_pages_node(node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
497 vaddr
= page_address(page
);
501 static inline void free_pgtable_page(void *vaddr
)
503 free_page((unsigned long)vaddr
);
506 static inline void *alloc_domain_mem(void)
508 return kmem_cache_alloc(iommu_domain_cache
, GFP_ATOMIC
);
511 static void free_domain_mem(void *vaddr
)
513 kmem_cache_free(iommu_domain_cache
, vaddr
);
516 static inline void * alloc_devinfo_mem(void)
518 return kmem_cache_alloc(iommu_devinfo_cache
, GFP_ATOMIC
);
521 static inline void free_devinfo_mem(void *vaddr
)
523 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
526 struct iova
*alloc_iova_mem(void)
528 return kmem_cache_alloc(iommu_iova_cache
, GFP_ATOMIC
);
531 void free_iova_mem(struct iova
*iova
)
533 kmem_cache_free(iommu_iova_cache
, iova
);
536 static inline int domain_type_is_vm(struct dmar_domain
*domain
)
538 return domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
;
541 static inline int domain_type_is_vm_or_si(struct dmar_domain
*domain
)
543 return domain
->flags
& (DOMAIN_FLAG_VIRTUAL_MACHINE
|
544 DOMAIN_FLAG_STATIC_IDENTITY
);
547 static inline int domain_pfn_supported(struct dmar_domain
*domain
,
550 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
552 return !(addr_width
< BITS_PER_LONG
&& pfn
>> addr_width
);
555 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
560 sagaw
= cap_sagaw(iommu
->cap
);
561 for (agaw
= width_to_agaw(max_gaw
);
563 if (test_bit(agaw
, &sagaw
))
571 * Calculate max SAGAW for each iommu.
573 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
575 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
579 * calculate agaw for each iommu.
580 * "SAGAW" may be different across iommus, use a default agaw, and
581 * get a supported less agaw for iommus that don't support the default agaw.
583 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
585 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
588 /* This functionin only returns single iommu in a domain */
589 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
593 /* si_domain and vm domain should not get here. */
594 BUG_ON(domain_type_is_vm_or_si(domain
));
595 iommu_id
= find_first_bit(domain
->iommu_bmp
, g_num_of_iommus
);
596 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
599 return g_iommus
[iommu_id
];
602 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
604 struct dmar_drhd_unit
*drhd
;
605 struct intel_iommu
*iommu
;
608 domain
->iommu_coherency
= 1;
610 for_each_set_bit(i
, domain
->iommu_bmp
, g_num_of_iommus
) {
612 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
613 domain
->iommu_coherency
= 0;
620 /* No hardware attached; use lowest common denominator */
622 for_each_active_iommu(iommu
, drhd
) {
623 if (!ecap_coherent(iommu
->ecap
)) {
624 domain
->iommu_coherency
= 0;
631 static int domain_update_iommu_snooping(struct intel_iommu
*skip
)
633 struct dmar_drhd_unit
*drhd
;
634 struct intel_iommu
*iommu
;
638 for_each_active_iommu(iommu
, drhd
) {
640 if (!ecap_sc_support(iommu
->ecap
)) {
651 static int domain_update_iommu_superpage(struct intel_iommu
*skip
)
653 struct dmar_drhd_unit
*drhd
;
654 struct intel_iommu
*iommu
;
657 if (!intel_iommu_superpage
) {
661 /* set iommu_superpage to the smallest common denominator */
663 for_each_active_iommu(iommu
, drhd
) {
665 mask
&= cap_super_page_val(iommu
->cap
);
675 /* Some capabilities may be different across iommus */
676 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
678 domain_update_iommu_coherency(domain
);
679 domain
->iommu_snooping
= domain_update_iommu_snooping(NULL
);
680 domain
->iommu_superpage
= domain_update_iommu_superpage(NULL
);
683 static struct intel_iommu
*device_to_iommu(struct device
*dev
, u8
*bus
, u8
*devfn
)
685 struct dmar_drhd_unit
*drhd
= NULL
;
686 struct intel_iommu
*iommu
;
688 struct pci_dev
*ptmp
, *pdev
= NULL
;
692 if (dev_is_pci(dev
)) {
693 pdev
= to_pci_dev(dev
);
694 segment
= pci_domain_nr(pdev
->bus
);
695 } else if (ACPI_COMPANION(dev
))
696 dev
= &ACPI_COMPANION(dev
)->dev
;
699 for_each_active_iommu(iommu
, drhd
) {
700 if (pdev
&& segment
!= drhd
->segment
)
703 for_each_active_dev_scope(drhd
->devices
,
704 drhd
->devices_cnt
, i
, tmp
) {
706 *bus
= drhd
->devices
[i
].bus
;
707 *devfn
= drhd
->devices
[i
].devfn
;
711 if (!pdev
|| !dev_is_pci(tmp
))
714 ptmp
= to_pci_dev(tmp
);
715 if (ptmp
->subordinate
&&
716 ptmp
->subordinate
->number
<= pdev
->bus
->number
&&
717 ptmp
->subordinate
->busn_res
.end
>= pdev
->bus
->number
)
721 if (pdev
&& drhd
->include_all
) {
723 *bus
= pdev
->bus
->number
;
724 *devfn
= pdev
->devfn
;
735 static void domain_flush_cache(struct dmar_domain
*domain
,
736 void *addr
, int size
)
738 if (!domain
->iommu_coherency
)
739 clflush_cache_range(addr
, size
);
742 /* Gets context entry for a given bus and devfn */
743 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
746 struct root_entry
*root
;
747 struct context_entry
*context
;
748 unsigned long phy_addr
;
751 spin_lock_irqsave(&iommu
->lock
, flags
);
752 root
= &iommu
->root_entry
[bus
];
753 context
= get_context_addr_from_root(root
);
755 context
= (struct context_entry
*)
756 alloc_pgtable_page(iommu
->node
);
758 spin_unlock_irqrestore(&iommu
->lock
, flags
);
761 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
762 phy_addr
= virt_to_phys((void *)context
);
763 set_root_value(root
, phy_addr
);
764 set_root_present(root
);
765 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
767 spin_unlock_irqrestore(&iommu
->lock
, flags
);
768 return &context
[devfn
];
771 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
773 struct root_entry
*root
;
774 struct context_entry
*context
;
778 spin_lock_irqsave(&iommu
->lock
, flags
);
779 root
= &iommu
->root_entry
[bus
];
780 context
= get_context_addr_from_root(root
);
785 ret
= context_present(&context
[devfn
]);
787 spin_unlock_irqrestore(&iommu
->lock
, flags
);
791 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
793 struct root_entry
*root
;
794 struct context_entry
*context
;
797 spin_lock_irqsave(&iommu
->lock
, flags
);
798 root
= &iommu
->root_entry
[bus
];
799 context
= get_context_addr_from_root(root
);
801 context_clear_entry(&context
[devfn
]);
802 __iommu_flush_cache(iommu
, &context
[devfn
], \
805 spin_unlock_irqrestore(&iommu
->lock
, flags
);
808 static void free_context_table(struct intel_iommu
*iommu
)
810 struct root_entry
*root
;
813 struct context_entry
*context
;
815 spin_lock_irqsave(&iommu
->lock
, flags
);
816 if (!iommu
->root_entry
) {
819 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
820 root
= &iommu
->root_entry
[i
];
821 context
= get_context_addr_from_root(root
);
823 free_pgtable_page(context
);
825 free_pgtable_page(iommu
->root_entry
);
826 iommu
->root_entry
= NULL
;
828 spin_unlock_irqrestore(&iommu
->lock
, flags
);
831 static struct dma_pte
*pfn_to_dma_pte(struct dmar_domain
*domain
,
832 unsigned long pfn
, int *target_level
)
834 struct dma_pte
*parent
, *pte
= NULL
;
835 int level
= agaw_to_level(domain
->agaw
);
838 BUG_ON(!domain
->pgd
);
840 if (!domain_pfn_supported(domain
, pfn
))
841 /* Address beyond IOMMU's addressing capabilities. */
844 parent
= domain
->pgd
;
849 offset
= pfn_level_offset(pfn
, level
);
850 pte
= &parent
[offset
];
851 if (!*target_level
&& (dma_pte_superpage(pte
) || !dma_pte_present(pte
)))
853 if (level
== *target_level
)
856 if (!dma_pte_present(pte
)) {
859 tmp_page
= alloc_pgtable_page(domain
->nid
);
864 domain_flush_cache(domain
, tmp_page
, VTD_PAGE_SIZE
);
865 pteval
= ((uint64_t)virt_to_dma_pfn(tmp_page
) << VTD_PAGE_SHIFT
) | DMA_PTE_READ
| DMA_PTE_WRITE
;
866 if (cmpxchg64(&pte
->val
, 0ULL, pteval
))
867 /* Someone else set it while we were thinking; use theirs. */
868 free_pgtable_page(tmp_page
);
870 domain_flush_cache(domain
, pte
, sizeof(*pte
));
875 parent
= phys_to_virt(dma_pte_addr(pte
));
880 *target_level
= level
;
886 /* return address's pte at specific level */
887 static struct dma_pte
*dma_pfn_level_pte(struct dmar_domain
*domain
,
889 int level
, int *large_page
)
891 struct dma_pte
*parent
, *pte
= NULL
;
892 int total
= agaw_to_level(domain
->agaw
);
895 parent
= domain
->pgd
;
896 while (level
<= total
) {
897 offset
= pfn_level_offset(pfn
, total
);
898 pte
= &parent
[offset
];
902 if (!dma_pte_present(pte
)) {
907 if (dma_pte_superpage(pte
)) {
912 parent
= phys_to_virt(dma_pte_addr(pte
));
918 /* clear last level pte, a tlb flush should be followed */
919 static void dma_pte_clear_range(struct dmar_domain
*domain
,
920 unsigned long start_pfn
,
921 unsigned long last_pfn
)
923 unsigned int large_page
= 1;
924 struct dma_pte
*first_pte
, *pte
;
926 BUG_ON(!domain_pfn_supported(domain
, start_pfn
));
927 BUG_ON(!domain_pfn_supported(domain
, last_pfn
));
928 BUG_ON(start_pfn
> last_pfn
);
930 /* we don't need lock here; nobody else touches the iova range */
933 first_pte
= pte
= dma_pfn_level_pte(domain
, start_pfn
, 1, &large_page
);
935 start_pfn
= align_to_level(start_pfn
+ 1, large_page
+ 1);
940 start_pfn
+= lvl_to_nr_pages(large_page
);
942 } while (start_pfn
<= last_pfn
&& !first_pte_in_page(pte
));
944 domain_flush_cache(domain
, first_pte
,
945 (void *)pte
- (void *)first_pte
);
947 } while (start_pfn
&& start_pfn
<= last_pfn
);
950 static void dma_pte_free_level(struct dmar_domain
*domain
, int level
,
951 struct dma_pte
*pte
, unsigned long pfn
,
952 unsigned long start_pfn
, unsigned long last_pfn
)
954 pfn
= max(start_pfn
, pfn
);
955 pte
= &pte
[pfn_level_offset(pfn
, level
)];
958 unsigned long level_pfn
;
959 struct dma_pte
*level_pte
;
961 if (!dma_pte_present(pte
) || dma_pte_superpage(pte
))
964 level_pfn
= pfn
& level_mask(level
- 1);
965 level_pte
= phys_to_virt(dma_pte_addr(pte
));
968 dma_pte_free_level(domain
, level
- 1, level_pte
,
969 level_pfn
, start_pfn
, last_pfn
);
971 /* If range covers entire pagetable, free it */
972 if (!(start_pfn
> level_pfn
||
973 last_pfn
< level_pfn
+ level_size(level
) - 1)) {
975 domain_flush_cache(domain
, pte
, sizeof(*pte
));
976 free_pgtable_page(level_pte
);
979 pfn
+= level_size(level
);
980 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
983 /* free page table pages. last level pte should already be cleared */
984 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
985 unsigned long start_pfn
,
986 unsigned long last_pfn
)
988 BUG_ON(!domain_pfn_supported(domain
, start_pfn
));
989 BUG_ON(!domain_pfn_supported(domain
, last_pfn
));
990 BUG_ON(start_pfn
> last_pfn
);
992 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
994 /* We don't need lock here; nobody else touches the iova range */
995 dma_pte_free_level(domain
, agaw_to_level(domain
->agaw
),
996 domain
->pgd
, 0, start_pfn
, last_pfn
);
999 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
1000 free_pgtable_page(domain
->pgd
);
1005 /* When a page at a given level is being unlinked from its parent, we don't
1006 need to *modify* it at all. All we need to do is make a list of all the
1007 pages which can be freed just as soon as we've flushed the IOTLB and we
1008 know the hardware page-walk will no longer touch them.
1009 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1011 static struct page
*dma_pte_list_pagetables(struct dmar_domain
*domain
,
1012 int level
, struct dma_pte
*pte
,
1013 struct page
*freelist
)
1017 pg
= pfn_to_page(dma_pte_addr(pte
) >> PAGE_SHIFT
);
1018 pg
->freelist
= freelist
;
1024 pte
= page_address(pg
);
1026 if (dma_pte_present(pte
) && !dma_pte_superpage(pte
))
1027 freelist
= dma_pte_list_pagetables(domain
, level
- 1,
1030 } while (!first_pte_in_page(pte
));
1035 static struct page
*dma_pte_clear_level(struct dmar_domain
*domain
, int level
,
1036 struct dma_pte
*pte
, unsigned long pfn
,
1037 unsigned long start_pfn
,
1038 unsigned long last_pfn
,
1039 struct page
*freelist
)
1041 struct dma_pte
*first_pte
= NULL
, *last_pte
= NULL
;
1043 pfn
= max(start_pfn
, pfn
);
1044 pte
= &pte
[pfn_level_offset(pfn
, level
)];
1047 unsigned long level_pfn
;
1049 if (!dma_pte_present(pte
))
1052 level_pfn
= pfn
& level_mask(level
);
1054 /* If range covers entire pagetable, free it */
1055 if (start_pfn
<= level_pfn
&&
1056 last_pfn
>= level_pfn
+ level_size(level
) - 1) {
1057 /* These suborbinate page tables are going away entirely. Don't
1058 bother to clear them; we're just going to *free* them. */
1059 if (level
> 1 && !dma_pte_superpage(pte
))
1060 freelist
= dma_pte_list_pagetables(domain
, level
- 1, pte
, freelist
);
1066 } else if (level
> 1) {
1067 /* Recurse down into a level that isn't *entirely* obsolete */
1068 freelist
= dma_pte_clear_level(domain
, level
- 1,
1069 phys_to_virt(dma_pte_addr(pte
)),
1070 level_pfn
, start_pfn
, last_pfn
,
1074 pfn
+= level_size(level
);
1075 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
1078 domain_flush_cache(domain
, first_pte
,
1079 (void *)++last_pte
- (void *)first_pte
);
1084 /* We can't just free the pages because the IOMMU may still be walking
1085 the page tables, and may have cached the intermediate levels. The
1086 pages can only be freed after the IOTLB flush has been done. */
1087 struct page
*domain_unmap(struct dmar_domain
*domain
,
1088 unsigned long start_pfn
,
1089 unsigned long last_pfn
)
1091 struct page
*freelist
= NULL
;
1093 BUG_ON(!domain_pfn_supported(domain
, start_pfn
));
1094 BUG_ON(!domain_pfn_supported(domain
, last_pfn
));
1095 BUG_ON(start_pfn
> last_pfn
);
1097 /* we don't need lock here; nobody else touches the iova range */
1098 freelist
= dma_pte_clear_level(domain
, agaw_to_level(domain
->agaw
),
1099 domain
->pgd
, 0, start_pfn
, last_pfn
, NULL
);
1102 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
1103 struct page
*pgd_page
= virt_to_page(domain
->pgd
);
1104 pgd_page
->freelist
= freelist
;
1105 freelist
= pgd_page
;
1113 void dma_free_pagelist(struct page
*freelist
)
1117 while ((pg
= freelist
)) {
1118 freelist
= pg
->freelist
;
1119 free_pgtable_page(page_address(pg
));
1123 /* iommu handling */
1124 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
1126 struct root_entry
*root
;
1127 unsigned long flags
;
1129 root
= (struct root_entry
*)alloc_pgtable_page(iommu
->node
);
1133 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
1135 spin_lock_irqsave(&iommu
->lock
, flags
);
1136 iommu
->root_entry
= root
;
1137 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1142 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
1148 addr
= iommu
->root_entry
;
1150 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1151 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
1153 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
1155 /* Make sure hardware complete it */
1156 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1157 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
1159 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1162 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
1167 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
1170 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1171 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
1173 /* Make sure hardware complete it */
1174 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1175 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
1177 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1180 /* return value determine if we need a write buffer flush */
1181 static void __iommu_flush_context(struct intel_iommu
*iommu
,
1182 u16 did
, u16 source_id
, u8 function_mask
,
1189 case DMA_CCMD_GLOBAL_INVL
:
1190 val
= DMA_CCMD_GLOBAL_INVL
;
1192 case DMA_CCMD_DOMAIN_INVL
:
1193 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
1195 case DMA_CCMD_DEVICE_INVL
:
1196 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
1197 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
1202 val
|= DMA_CCMD_ICC
;
1204 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1205 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
1207 /* Make sure hardware complete it */
1208 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
1209 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
1211 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1214 /* return value determine if we need a write buffer flush */
1215 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
1216 u64 addr
, unsigned int size_order
, u64 type
)
1218 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
1219 u64 val
= 0, val_iva
= 0;
1223 case DMA_TLB_GLOBAL_FLUSH
:
1224 /* global flush doesn't need set IVA_REG */
1225 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
1227 case DMA_TLB_DSI_FLUSH
:
1228 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1230 case DMA_TLB_PSI_FLUSH
:
1231 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1232 /* IH bit is passed in as part of address */
1233 val_iva
= size_order
| addr
;
1238 /* Note: set drain read/write */
1241 * This is probably to be super secure.. Looks like we can
1242 * ignore it without any impact.
1244 if (cap_read_drain(iommu
->cap
))
1245 val
|= DMA_TLB_READ_DRAIN
;
1247 if (cap_write_drain(iommu
->cap
))
1248 val
|= DMA_TLB_WRITE_DRAIN
;
1250 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1251 /* Note: Only uses first TLB reg currently */
1253 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
1254 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
1256 /* Make sure hardware complete it */
1257 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
1258 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
1260 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1262 /* check IOTLB invalidation granularity */
1263 if (DMA_TLB_IAIG(val
) == 0)
1264 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
1265 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
1266 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1267 (unsigned long long)DMA_TLB_IIRG(type
),
1268 (unsigned long long)DMA_TLB_IAIG(val
));
1271 static struct device_domain_info
*
1272 iommu_support_dev_iotlb (struct dmar_domain
*domain
, struct intel_iommu
*iommu
,
1276 unsigned long flags
;
1277 struct device_domain_info
*info
;
1278 struct pci_dev
*pdev
;
1280 if (!ecap_dev_iotlb_support(iommu
->ecap
))
1286 spin_lock_irqsave(&device_domain_lock
, flags
);
1287 list_for_each_entry(info
, &domain
->devices
, link
)
1288 if (info
->iommu
== iommu
&& info
->bus
== bus
&&
1289 info
->devfn
== devfn
) {
1293 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1295 if (!found
|| !info
->dev
|| !dev_is_pci(info
->dev
))
1298 pdev
= to_pci_dev(info
->dev
);
1300 if (!pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_ATS
))
1303 if (!dmar_find_matched_atsr_unit(pdev
))
1309 static void iommu_enable_dev_iotlb(struct device_domain_info
*info
)
1311 if (!info
|| !dev_is_pci(info
->dev
))
1314 pci_enable_ats(to_pci_dev(info
->dev
), VTD_PAGE_SHIFT
);
1317 static void iommu_disable_dev_iotlb(struct device_domain_info
*info
)
1319 if (!info
->dev
|| !dev_is_pci(info
->dev
) ||
1320 !pci_ats_enabled(to_pci_dev(info
->dev
)))
1323 pci_disable_ats(to_pci_dev(info
->dev
));
1326 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1327 u64 addr
, unsigned mask
)
1330 unsigned long flags
;
1331 struct device_domain_info
*info
;
1333 spin_lock_irqsave(&device_domain_lock
, flags
);
1334 list_for_each_entry(info
, &domain
->devices
, link
) {
1335 struct pci_dev
*pdev
;
1336 if (!info
->dev
|| !dev_is_pci(info
->dev
))
1339 pdev
= to_pci_dev(info
->dev
);
1340 if (!pci_ats_enabled(pdev
))
1343 sid
= info
->bus
<< 8 | info
->devfn
;
1344 qdep
= pci_ats_queue_depth(pdev
);
1345 qi_flush_dev_iotlb(info
->iommu
, sid
, qdep
, addr
, mask
);
1347 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1350 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
1351 unsigned long pfn
, unsigned int pages
, int ih
, int map
)
1353 unsigned int mask
= ilog2(__roundup_pow_of_two(pages
));
1354 uint64_t addr
= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
1361 * Fallback to domain selective flush if no PSI support or the size is
1363 * PSI requires page size to be 2 ^ x, and the base address is naturally
1364 * aligned to the size
1366 if (!cap_pgsel_inv(iommu
->cap
) || mask
> cap_max_amask_val(iommu
->cap
))
1367 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1370 iommu
->flush
.flush_iotlb(iommu
, did
, addr
| ih
, mask
,
1374 * In caching mode, changes of pages from non-present to present require
1375 * flush. However, device IOTLB doesn't need to be flushed in this case.
1377 if (!cap_caching_mode(iommu
->cap
) || !map
)
1378 iommu_flush_dev_iotlb(iommu
->domains
[did
], addr
, mask
);
1381 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1384 unsigned long flags
;
1386 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1387 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1388 pmen
&= ~DMA_PMEN_EPM
;
1389 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1391 /* wait for the protected region status bit to clear */
1392 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1393 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1395 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1398 static void iommu_enable_translation(struct intel_iommu
*iommu
)
1401 unsigned long flags
;
1403 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1404 iommu
->gcmd
|= DMA_GCMD_TE
;
1405 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1407 /* Make sure hardware complete it */
1408 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1409 readl
, (sts
& DMA_GSTS_TES
), sts
);
1411 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1414 static void iommu_disable_translation(struct intel_iommu
*iommu
)
1419 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1420 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1421 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1423 /* Make sure hardware complete it */
1424 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1425 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1427 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1431 static int iommu_init_domains(struct intel_iommu
*iommu
)
1433 unsigned long ndomains
;
1434 unsigned long nlongs
;
1436 ndomains
= cap_ndoms(iommu
->cap
);
1437 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1438 iommu
->seq_id
, ndomains
);
1439 nlongs
= BITS_TO_LONGS(ndomains
);
1441 spin_lock_init(&iommu
->lock
);
1443 /* TBD: there might be 64K domains,
1444 * consider other allocation for future chip
1446 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1447 if (!iommu
->domain_ids
) {
1448 pr_err("IOMMU%d: allocating domain id array failed\n",
1452 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1454 if (!iommu
->domains
) {
1455 pr_err("IOMMU%d: allocating domain array failed\n",
1457 kfree(iommu
->domain_ids
);
1458 iommu
->domain_ids
= NULL
;
1463 * if Caching mode is set, then invalid translations are tagged
1464 * with domainid 0. Hence we need to pre-allocate it.
1466 if (cap_caching_mode(iommu
->cap
))
1467 set_bit(0, iommu
->domain_ids
);
1471 static void free_dmar_iommu(struct intel_iommu
*iommu
)
1473 struct dmar_domain
*domain
;
1476 if ((iommu
->domains
) && (iommu
->domain_ids
)) {
1477 for_each_set_bit(i
, iommu
->domain_ids
, cap_ndoms(iommu
->cap
)) {
1479 * Domain id 0 is reserved for invalid translation
1480 * if hardware supports caching mode.
1482 if (cap_caching_mode(iommu
->cap
) && i
== 0)
1485 domain
= iommu
->domains
[i
];
1486 clear_bit(i
, iommu
->domain_ids
);
1487 if (domain_detach_iommu(domain
, iommu
) == 0 &&
1488 !domain_type_is_vm(domain
))
1489 domain_exit(domain
);
1493 if (iommu
->gcmd
& DMA_GCMD_TE
)
1494 iommu_disable_translation(iommu
);
1496 kfree(iommu
->domains
);
1497 kfree(iommu
->domain_ids
);
1498 iommu
->domains
= NULL
;
1499 iommu
->domain_ids
= NULL
;
1501 g_iommus
[iommu
->seq_id
] = NULL
;
1503 /* free context mapping */
1504 free_context_table(iommu
);
1507 static struct dmar_domain
*alloc_domain(int flags
)
1509 /* domain id for virtual machine, it won't be set in context */
1510 static atomic_t vm_domid
= ATOMIC_INIT(0);
1511 struct dmar_domain
*domain
;
1513 domain
= alloc_domain_mem();
1517 memset(domain
, 0, sizeof(*domain
));
1519 domain
->flags
= flags
;
1520 spin_lock_init(&domain
->iommu_lock
);
1521 INIT_LIST_HEAD(&domain
->devices
);
1522 if (flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1523 domain
->id
= atomic_inc_return(&vm_domid
);
1528 static int __iommu_attach_domain(struct dmar_domain
*domain
,
1529 struct intel_iommu
*iommu
)
1532 unsigned long ndomains
;
1534 ndomains
= cap_ndoms(iommu
->cap
);
1535 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1536 if (num
< ndomains
) {
1537 set_bit(num
, iommu
->domain_ids
);
1538 iommu
->domains
[num
] = domain
;
1546 static int iommu_attach_domain(struct dmar_domain
*domain
,
1547 struct intel_iommu
*iommu
)
1550 unsigned long flags
;
1552 spin_lock_irqsave(&iommu
->lock
, flags
);
1553 num
= __iommu_attach_domain(domain
, iommu
);
1554 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1556 pr_err("IOMMU: no free domain ids\n");
1561 static int iommu_attach_vm_domain(struct dmar_domain
*domain
,
1562 struct intel_iommu
*iommu
)
1565 unsigned long ndomains
;
1567 ndomains
= cap_ndoms(iommu
->cap
);
1568 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
)
1569 if (iommu
->domains
[num
] == domain
)
1572 return __iommu_attach_domain(domain
, iommu
);
1575 static void iommu_detach_domain(struct dmar_domain
*domain
,
1576 struct intel_iommu
*iommu
)
1578 unsigned long flags
;
1581 spin_lock_irqsave(&iommu
->lock
, flags
);
1582 if (domain_type_is_vm_or_si(domain
)) {
1583 ndomains
= cap_ndoms(iommu
->cap
);
1584 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
1585 if (iommu
->domains
[num
] == domain
) {
1586 clear_bit(num
, iommu
->domain_ids
);
1587 iommu
->domains
[num
] = NULL
;
1592 clear_bit(domain
->id
, iommu
->domain_ids
);
1593 iommu
->domains
[domain
->id
] = NULL
;
1595 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1598 static void domain_attach_iommu(struct dmar_domain
*domain
,
1599 struct intel_iommu
*iommu
)
1601 unsigned long flags
;
1603 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1604 if (!test_and_set_bit(iommu
->seq_id
, domain
->iommu_bmp
)) {
1605 domain
->iommu_count
++;
1606 if (domain
->iommu_count
== 1)
1607 domain
->nid
= iommu
->node
;
1608 domain_update_iommu_cap(domain
);
1610 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1613 static int domain_detach_iommu(struct dmar_domain
*domain
,
1614 struct intel_iommu
*iommu
)
1616 unsigned long flags
;
1617 int count
= INT_MAX
;
1619 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1620 if (test_and_clear_bit(iommu
->seq_id
, domain
->iommu_bmp
)) {
1621 count
= --domain
->iommu_count
;
1622 domain_update_iommu_cap(domain
);
1624 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1629 static struct iova_domain reserved_iova_list
;
1630 static struct lock_class_key reserved_rbtree_key
;
1632 static int dmar_init_reserved_ranges(void)
1634 struct pci_dev
*pdev
= NULL
;
1638 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1640 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1641 &reserved_rbtree_key
);
1643 /* IOAPIC ranges shouldn't be accessed by DMA */
1644 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1645 IOVA_PFN(IOAPIC_RANGE_END
));
1647 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1651 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1652 for_each_pci_dev(pdev
) {
1655 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1656 r
= &pdev
->resource
[i
];
1657 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1659 iova
= reserve_iova(&reserved_iova_list
,
1663 printk(KERN_ERR
"Reserve iova failed\n");
1671 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1673 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1676 static inline int guestwidth_to_adjustwidth(int gaw
)
1679 int r
= (gaw
- 12) % 9;
1690 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1692 struct intel_iommu
*iommu
;
1693 int adjust_width
, agaw
;
1694 unsigned long sagaw
;
1696 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1697 domain_reserve_special_ranges(domain
);
1699 /* calculate AGAW */
1700 iommu
= domain_get_iommu(domain
);
1701 if (guest_width
> cap_mgaw(iommu
->cap
))
1702 guest_width
= cap_mgaw(iommu
->cap
);
1703 domain
->gaw
= guest_width
;
1704 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1705 agaw
= width_to_agaw(adjust_width
);
1706 sagaw
= cap_sagaw(iommu
->cap
);
1707 if (!test_bit(agaw
, &sagaw
)) {
1708 /* hardware doesn't support it, choose a bigger one */
1709 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1710 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1714 domain
->agaw
= agaw
;
1716 if (ecap_coherent(iommu
->ecap
))
1717 domain
->iommu_coherency
= 1;
1719 domain
->iommu_coherency
= 0;
1721 if (ecap_sc_support(iommu
->ecap
))
1722 domain
->iommu_snooping
= 1;
1724 domain
->iommu_snooping
= 0;
1726 if (intel_iommu_superpage
)
1727 domain
->iommu_superpage
= fls(cap_super_page_val(iommu
->cap
));
1729 domain
->iommu_superpage
= 0;
1731 domain
->nid
= iommu
->node
;
1733 /* always allocate the top pgd */
1734 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
1737 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1741 static void domain_exit(struct dmar_domain
*domain
)
1743 struct dmar_drhd_unit
*drhd
;
1744 struct intel_iommu
*iommu
;
1745 struct page
*freelist
= NULL
;
1747 /* Domain 0 is reserved, so dont process it */
1751 /* Flush any lazy unmaps that may reference this domain */
1752 if (!intel_iommu_strict
)
1753 flush_unmaps_timeout(0);
1755 /* remove associated devices */
1756 domain_remove_dev_info(domain
);
1759 put_iova_domain(&domain
->iovad
);
1761 freelist
= domain_unmap(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1763 /* clear attached or cached domains */
1765 for_each_active_iommu(iommu
, drhd
)
1766 iommu_detach_domain(domain
, iommu
);
1769 dma_free_pagelist(freelist
);
1771 free_domain_mem(domain
);
1774 static int domain_context_mapping_one(struct dmar_domain
*domain
,
1775 struct intel_iommu
*iommu
,
1776 u8 bus
, u8 devfn
, int translation
)
1778 struct context_entry
*context
;
1779 unsigned long flags
;
1780 struct dma_pte
*pgd
;
1783 struct device_domain_info
*info
= NULL
;
1785 pr_debug("Set context mapping for %02x:%02x.%d\n",
1786 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1788 BUG_ON(!domain
->pgd
);
1789 BUG_ON(translation
!= CONTEXT_TT_PASS_THROUGH
&&
1790 translation
!= CONTEXT_TT_MULTI_LEVEL
);
1792 context
= device_to_context_entry(iommu
, bus
, devfn
);
1795 spin_lock_irqsave(&iommu
->lock
, flags
);
1796 if (context_present(context
)) {
1797 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1804 if (domain_type_is_vm_or_si(domain
)) {
1805 if (domain_type_is_vm(domain
)) {
1806 id
= iommu_attach_vm_domain(domain
, iommu
);
1808 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1809 pr_err("IOMMU: no free domain ids\n");
1814 /* Skip top levels of page tables for
1815 * iommu which has less agaw than default.
1816 * Unnecessary for PT mode.
1818 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1819 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1820 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1821 if (!dma_pte_present(pgd
)) {
1822 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1829 context_set_domain_id(context
, id
);
1831 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1832 info
= iommu_support_dev_iotlb(domain
, iommu
, bus
, devfn
);
1833 translation
= info
? CONTEXT_TT_DEV_IOTLB
:
1834 CONTEXT_TT_MULTI_LEVEL
;
1837 * In pass through mode, AW must be programmed to indicate the largest
1838 * AGAW value supported by hardware. And ASR is ignored by hardware.
1840 if (unlikely(translation
== CONTEXT_TT_PASS_THROUGH
))
1841 context_set_address_width(context
, iommu
->msagaw
);
1843 context_set_address_root(context
, virt_to_phys(pgd
));
1844 context_set_address_width(context
, iommu
->agaw
);
1847 context_set_translation_type(context
, translation
);
1848 context_set_fault_enable(context
);
1849 context_set_present(context
);
1850 domain_flush_cache(domain
, context
, sizeof(*context
));
1853 * It's a non-present to present mapping. If hardware doesn't cache
1854 * non-present entry we only need to flush the write-buffer. If the
1855 * _does_ cache non-present entries, then it does so in the special
1856 * domain #0, which we have to flush:
1858 if (cap_caching_mode(iommu
->cap
)) {
1859 iommu
->flush
.flush_context(iommu
, 0,
1860 (((u16
)bus
) << 8) | devfn
,
1861 DMA_CCMD_MASK_NOBIT
,
1862 DMA_CCMD_DEVICE_INVL
);
1863 iommu
->flush
.flush_iotlb(iommu
, id
, 0, 0, DMA_TLB_DSI_FLUSH
);
1865 iommu_flush_write_buffer(iommu
);
1867 iommu_enable_dev_iotlb(info
);
1868 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1870 domain_attach_iommu(domain
, iommu
);
1875 struct domain_context_mapping_data
{
1876 struct dmar_domain
*domain
;
1877 struct intel_iommu
*iommu
;
1881 static int domain_context_mapping_cb(struct pci_dev
*pdev
,
1882 u16 alias
, void *opaque
)
1884 struct domain_context_mapping_data
*data
= opaque
;
1886 return domain_context_mapping_one(data
->domain
, data
->iommu
,
1887 PCI_BUS_NUM(alias
), alias
& 0xff,
1892 domain_context_mapping(struct dmar_domain
*domain
, struct device
*dev
,
1895 struct intel_iommu
*iommu
;
1897 struct domain_context_mapping_data data
;
1899 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
1903 if (!dev_is_pci(dev
))
1904 return domain_context_mapping_one(domain
, iommu
, bus
, devfn
,
1907 data
.domain
= domain
;
1909 data
.translation
= translation
;
1911 return pci_for_each_dma_alias(to_pci_dev(dev
),
1912 &domain_context_mapping_cb
, &data
);
1915 static int domain_context_mapped_cb(struct pci_dev
*pdev
,
1916 u16 alias
, void *opaque
)
1918 struct intel_iommu
*iommu
= opaque
;
1920 return !device_context_mapped(iommu
, PCI_BUS_NUM(alias
), alias
& 0xff);
1923 static int domain_context_mapped(struct device
*dev
)
1925 struct intel_iommu
*iommu
;
1928 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
1932 if (!dev_is_pci(dev
))
1933 return device_context_mapped(iommu
, bus
, devfn
);
1935 return !pci_for_each_dma_alias(to_pci_dev(dev
),
1936 domain_context_mapped_cb
, iommu
);
1939 /* Returns a number of VTD pages, but aligned to MM page size */
1940 static inline unsigned long aligned_nrpages(unsigned long host_addr
,
1943 host_addr
&= ~PAGE_MASK
;
1944 return PAGE_ALIGN(host_addr
+ size
) >> VTD_PAGE_SHIFT
;
1947 /* Return largest possible superpage level for a given mapping */
1948 static inline int hardware_largepage_caps(struct dmar_domain
*domain
,
1949 unsigned long iov_pfn
,
1950 unsigned long phy_pfn
,
1951 unsigned long pages
)
1953 int support
, level
= 1;
1954 unsigned long pfnmerge
;
1956 support
= domain
->iommu_superpage
;
1958 /* To use a large page, the virtual *and* physical addresses
1959 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1960 of them will mean we have to use smaller pages. So just
1961 merge them and check both at once. */
1962 pfnmerge
= iov_pfn
| phy_pfn
;
1964 while (support
&& !(pfnmerge
& ~VTD_STRIDE_MASK
)) {
1965 pages
>>= VTD_STRIDE_SHIFT
;
1968 pfnmerge
>>= VTD_STRIDE_SHIFT
;
1975 static int __domain_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1976 struct scatterlist
*sg
, unsigned long phys_pfn
,
1977 unsigned long nr_pages
, int prot
)
1979 struct dma_pte
*first_pte
= NULL
, *pte
= NULL
;
1980 phys_addr_t
uninitialized_var(pteval
);
1981 unsigned long sg_res
;
1982 unsigned int largepage_lvl
= 0;
1983 unsigned long lvl_pages
= 0;
1985 BUG_ON(!domain_pfn_supported(domain
, iov_pfn
+ nr_pages
- 1));
1987 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1990 prot
&= DMA_PTE_READ
| DMA_PTE_WRITE
| DMA_PTE_SNP
;
1995 sg_res
= nr_pages
+ 1;
1996 pteval
= ((phys_addr_t
)phys_pfn
<< VTD_PAGE_SHIFT
) | prot
;
1999 while (nr_pages
> 0) {
2003 sg_res
= aligned_nrpages(sg
->offset
, sg
->length
);
2004 sg
->dma_address
= ((dma_addr_t
)iov_pfn
<< VTD_PAGE_SHIFT
) + sg
->offset
;
2005 sg
->dma_length
= sg
->length
;
2006 pteval
= page_to_phys(sg_page(sg
)) | prot
;
2007 phys_pfn
= pteval
>> VTD_PAGE_SHIFT
;
2011 largepage_lvl
= hardware_largepage_caps(domain
, iov_pfn
, phys_pfn
, sg_res
);
2013 first_pte
= pte
= pfn_to_dma_pte(domain
, iov_pfn
, &largepage_lvl
);
2016 /* It is large page*/
2017 if (largepage_lvl
> 1) {
2018 pteval
|= DMA_PTE_LARGE_PAGE
;
2019 lvl_pages
= lvl_to_nr_pages(largepage_lvl
);
2021 * Ensure that old small page tables are
2022 * removed to make room for superpage,
2025 dma_pte_free_pagetable(domain
, iov_pfn
,
2026 iov_pfn
+ lvl_pages
- 1);
2028 pteval
&= ~(uint64_t)DMA_PTE_LARGE_PAGE
;
2032 /* We don't need lock here, nobody else
2033 * touches the iova range
2035 tmp
= cmpxchg64_local(&pte
->val
, 0ULL, pteval
);
2037 static int dumps
= 5;
2038 printk(KERN_CRIT
"ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2039 iov_pfn
, tmp
, (unsigned long long)pteval
);
2042 debug_dma_dump_mappings(NULL
);
2047 lvl_pages
= lvl_to_nr_pages(largepage_lvl
);
2049 BUG_ON(nr_pages
< lvl_pages
);
2050 BUG_ON(sg_res
< lvl_pages
);
2052 nr_pages
-= lvl_pages
;
2053 iov_pfn
+= lvl_pages
;
2054 phys_pfn
+= lvl_pages
;
2055 pteval
+= lvl_pages
* VTD_PAGE_SIZE
;
2056 sg_res
-= lvl_pages
;
2058 /* If the next PTE would be the first in a new page, then we
2059 need to flush the cache on the entries we've just written.
2060 And then we'll need to recalculate 'pte', so clear it and
2061 let it get set again in the if (!pte) block above.
2063 If we're done (!nr_pages) we need to flush the cache too.
2065 Also if we've been setting superpages, we may need to
2066 recalculate 'pte' and switch back to smaller pages for the
2067 end of the mapping, if the trailing size is not enough to
2068 use another superpage (i.e. sg_res < lvl_pages). */
2070 if (!nr_pages
|| first_pte_in_page(pte
) ||
2071 (largepage_lvl
> 1 && sg_res
< lvl_pages
)) {
2072 domain_flush_cache(domain
, first_pte
,
2073 (void *)pte
- (void *)first_pte
);
2077 if (!sg_res
&& nr_pages
)
2083 static inline int domain_sg_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
2084 struct scatterlist
*sg
, unsigned long nr_pages
,
2087 return __domain_mapping(domain
, iov_pfn
, sg
, 0, nr_pages
, prot
);
2090 static inline int domain_pfn_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
2091 unsigned long phys_pfn
, unsigned long nr_pages
,
2094 return __domain_mapping(domain
, iov_pfn
, NULL
, phys_pfn
, nr_pages
, prot
);
2097 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
2102 clear_context_table(iommu
, bus
, devfn
);
2103 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2104 DMA_CCMD_GLOBAL_INVL
);
2105 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2108 static inline void unlink_domain_info(struct device_domain_info
*info
)
2110 assert_spin_locked(&device_domain_lock
);
2111 list_del(&info
->link
);
2112 list_del(&info
->global
);
2114 info
->dev
->archdata
.iommu
= NULL
;
2117 static void domain_remove_dev_info(struct dmar_domain
*domain
)
2119 struct device_domain_info
*info
, *tmp
;
2120 unsigned long flags
;
2122 spin_lock_irqsave(&device_domain_lock
, flags
);
2123 list_for_each_entry_safe(info
, tmp
, &domain
->devices
, link
) {
2124 unlink_domain_info(info
);
2125 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2127 iommu_disable_dev_iotlb(info
);
2128 iommu_detach_dev(info
->iommu
, info
->bus
, info
->devfn
);
2130 if (domain_type_is_vm(domain
)) {
2131 iommu_detach_dependent_devices(info
->iommu
, info
->dev
);
2132 domain_detach_iommu(domain
, info
->iommu
);
2135 free_devinfo_mem(info
);
2136 spin_lock_irqsave(&device_domain_lock
, flags
);
2138 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2143 * Note: we use struct device->archdata.iommu stores the info
2145 static struct dmar_domain
*find_domain(struct device
*dev
)
2147 struct device_domain_info
*info
;
2149 /* No lock here, assumes no domain exit in normal case */
2150 info
= dev
->archdata
.iommu
;
2152 return info
->domain
;
2156 static inline struct device_domain_info
*
2157 dmar_search_domain_by_dev_info(int segment
, int bus
, int devfn
)
2159 struct device_domain_info
*info
;
2161 list_for_each_entry(info
, &device_domain_list
, global
)
2162 if (info
->iommu
->segment
== segment
&& info
->bus
== bus
&&
2163 info
->devfn
== devfn
)
2169 static struct dmar_domain
*dmar_insert_dev_info(struct intel_iommu
*iommu
,
2172 struct dmar_domain
*domain
)
2174 struct dmar_domain
*found
= NULL
;
2175 struct device_domain_info
*info
;
2176 unsigned long flags
;
2178 info
= alloc_devinfo_mem();
2183 info
->devfn
= devfn
;
2185 info
->domain
= domain
;
2186 info
->iommu
= iommu
;
2188 spin_lock_irqsave(&device_domain_lock
, flags
);
2190 found
= find_domain(dev
);
2192 struct device_domain_info
*info2
;
2193 info2
= dmar_search_domain_by_dev_info(iommu
->segment
, bus
, devfn
);
2195 found
= info2
->domain
;
2198 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2199 free_devinfo_mem(info
);
2200 /* Caller must free the original domain */
2204 list_add(&info
->link
, &domain
->devices
);
2205 list_add(&info
->global
, &device_domain_list
);
2207 dev
->archdata
.iommu
= info
;
2208 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2213 static int get_last_alias(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
2215 *(u16
*)opaque
= alias
;
2219 /* domain is initialized */
2220 static struct dmar_domain
*get_domain_for_dev(struct device
*dev
, int gaw
)
2222 struct dmar_domain
*domain
, *tmp
;
2223 struct intel_iommu
*iommu
;
2224 struct device_domain_info
*info
;
2226 unsigned long flags
;
2229 domain
= find_domain(dev
);
2233 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2237 if (dev_is_pci(dev
)) {
2238 struct pci_dev
*pdev
= to_pci_dev(dev
);
2240 pci_for_each_dma_alias(pdev
, get_last_alias
, &dma_alias
);
2242 spin_lock_irqsave(&device_domain_lock
, flags
);
2243 info
= dmar_search_domain_by_dev_info(pci_domain_nr(pdev
->bus
),
2244 PCI_BUS_NUM(dma_alias
),
2247 iommu
= info
->iommu
;
2248 domain
= info
->domain
;
2250 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2252 /* DMA alias already has a domain, uses it */
2257 /* Allocate and initialize new domain for the device */
2258 domain
= alloc_domain(0);
2261 domain
->id
= iommu_attach_domain(domain
, iommu
);
2262 if (domain
->id
< 0) {
2263 free_domain_mem(domain
);
2266 domain_attach_iommu(domain
, iommu
);
2267 if (domain_init(domain
, gaw
)) {
2268 domain_exit(domain
);
2272 /* register PCI DMA alias device */
2273 if (dev_is_pci(dev
)) {
2274 tmp
= dmar_insert_dev_info(iommu
, PCI_BUS_NUM(dma_alias
),
2275 dma_alias
& 0xff, NULL
, domain
);
2277 if (!tmp
|| tmp
!= domain
) {
2278 domain_exit(domain
);
2287 tmp
= dmar_insert_dev_info(iommu
, bus
, devfn
, dev
, domain
);
2289 if (!tmp
|| tmp
!= domain
) {
2290 domain_exit(domain
);
2297 static int iommu_identity_mapping
;
2298 #define IDENTMAP_ALL 1
2299 #define IDENTMAP_GFX 2
2300 #define IDENTMAP_AZALIA 4
2302 static int iommu_domain_identity_map(struct dmar_domain
*domain
,
2303 unsigned long long start
,
2304 unsigned long long end
)
2306 unsigned long first_vpfn
= start
>> VTD_PAGE_SHIFT
;
2307 unsigned long last_vpfn
= end
>> VTD_PAGE_SHIFT
;
2309 if (!reserve_iova(&domain
->iovad
, dma_to_mm_pfn(first_vpfn
),
2310 dma_to_mm_pfn(last_vpfn
))) {
2311 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
2315 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2316 start
, end
, domain
->id
);
2318 * RMRR range might have overlap with physical memory range,
2321 dma_pte_clear_range(domain
, first_vpfn
, last_vpfn
);
2323 return domain_pfn_mapping(domain
, first_vpfn
, first_vpfn
,
2324 last_vpfn
- first_vpfn
+ 1,
2325 DMA_PTE_READ
|DMA_PTE_WRITE
);
2328 static int iommu_prepare_identity_map(struct device
*dev
,
2329 unsigned long long start
,
2330 unsigned long long end
)
2332 struct dmar_domain
*domain
;
2335 domain
= get_domain_for_dev(dev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2339 /* For _hardware_ passthrough, don't bother. But for software
2340 passthrough, we do it anyway -- it may indicate a memory
2341 range which is reserved in E820, so which didn't get set
2342 up to start with in si_domain */
2343 if (domain
== si_domain
&& hw_pass_through
) {
2344 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2345 dev_name(dev
), start
, end
);
2350 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2351 dev_name(dev
), start
, end
);
2354 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2355 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2356 dmi_get_system_info(DMI_BIOS_VENDOR
),
2357 dmi_get_system_info(DMI_BIOS_VERSION
),
2358 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2363 if (end
>> agaw_to_width(domain
->agaw
)) {
2364 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2365 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2366 agaw_to_width(domain
->agaw
),
2367 dmi_get_system_info(DMI_BIOS_VENDOR
),
2368 dmi_get_system_info(DMI_BIOS_VERSION
),
2369 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2374 ret
= iommu_domain_identity_map(domain
, start
, end
);
2378 /* context entry init */
2379 ret
= domain_context_mapping(domain
, dev
, CONTEXT_TT_MULTI_LEVEL
);
2386 domain_exit(domain
);
2390 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
2393 if (dev
->archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2395 return iommu_prepare_identity_map(dev
, rmrr
->base_address
,
2399 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2400 static inline void iommu_prepare_isa(void)
2402 struct pci_dev
*pdev
;
2405 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
2409 printk(KERN_INFO
"IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2410 ret
= iommu_prepare_identity_map(&pdev
->dev
, 0, 16*1024*1024 - 1);
2413 printk(KERN_ERR
"IOMMU: Failed to create 0-16MiB identity map; "
2414 "floppy might not work\n");
2419 static inline void iommu_prepare_isa(void)
2423 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2425 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
);
2427 static int __init
si_domain_init(int hw
)
2429 struct dmar_drhd_unit
*drhd
;
2430 struct intel_iommu
*iommu
;
2434 si_domain
= alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY
);
2438 for_each_active_iommu(iommu
, drhd
) {
2439 ret
= iommu_attach_domain(si_domain
, iommu
);
2441 domain_exit(si_domain
);
2444 si_domain
->id
= ret
;
2446 } else if (si_domain
->id
!= ret
) {
2447 domain_exit(si_domain
);
2450 domain_attach_iommu(si_domain
, iommu
);
2453 if (md_domain_init(si_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2454 domain_exit(si_domain
);
2458 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2464 for_each_online_node(nid
) {
2465 unsigned long start_pfn
, end_pfn
;
2468 for_each_mem_pfn_range(i
, nid
, &start_pfn
, &end_pfn
, NULL
) {
2469 ret
= iommu_domain_identity_map(si_domain
,
2470 PFN_PHYS(start_pfn
), PFN_PHYS(end_pfn
));
2479 static int identity_mapping(struct device
*dev
)
2481 struct device_domain_info
*info
;
2483 if (likely(!iommu_identity_mapping
))
2486 info
= dev
->archdata
.iommu
;
2487 if (info
&& info
!= DUMMY_DEVICE_DOMAIN_INFO
)
2488 return (info
->domain
== si_domain
);
2493 static int domain_add_dev_info(struct dmar_domain
*domain
,
2494 struct device
*dev
, int translation
)
2496 struct dmar_domain
*ndomain
;
2497 struct intel_iommu
*iommu
;
2501 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2505 ndomain
= dmar_insert_dev_info(iommu
, bus
, devfn
, dev
, domain
);
2506 if (ndomain
!= domain
)
2509 ret
= domain_context_mapping(domain
, dev
, translation
);
2511 domain_remove_one_dev_info(domain
, dev
);
2518 static bool device_has_rmrr(struct device
*dev
)
2520 struct dmar_rmrr_unit
*rmrr
;
2525 for_each_rmrr_units(rmrr
) {
2527 * Return TRUE if this RMRR contains the device that
2530 for_each_active_dev_scope(rmrr
->devices
,
2531 rmrr
->devices_cnt
, i
, tmp
)
2542 * There are a couple cases where we need to restrict the functionality of
2543 * devices associated with RMRRs. The first is when evaluating a device for
2544 * identity mapping because problems exist when devices are moved in and out
2545 * of domains and their respective RMRR information is lost. This means that
2546 * a device with associated RMRRs will never be in a "passthrough" domain.
2547 * The second is use of the device through the IOMMU API. This interface
2548 * expects to have full control of the IOVA space for the device. We cannot
2549 * satisfy both the requirement that RMRR access is maintained and have an
2550 * unencumbered IOVA space. We also have no ability to quiesce the device's
2551 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2552 * We therefore prevent devices associated with an RMRR from participating in
2553 * the IOMMU API, which eliminates them from device assignment.
2555 * In both cases we assume that PCI USB devices with RMRRs have them largely
2556 * for historical reasons and that the RMRR space is not actively used post
2557 * boot. This exclusion may change if vendors begin to abuse it.
2559 static bool device_is_rmrr_locked(struct device
*dev
)
2561 if (!device_has_rmrr(dev
))
2564 if (dev_is_pci(dev
)) {
2565 struct pci_dev
*pdev
= to_pci_dev(dev
);
2567 if ((pdev
->class >> 8) == PCI_CLASS_SERIAL_USB
)
2574 static int iommu_should_identity_map(struct device
*dev
, int startup
)
2577 if (dev_is_pci(dev
)) {
2578 struct pci_dev
*pdev
= to_pci_dev(dev
);
2580 if (device_is_rmrr_locked(dev
))
2583 if ((iommu_identity_mapping
& IDENTMAP_AZALIA
) && IS_AZALIA(pdev
))
2586 if ((iommu_identity_mapping
& IDENTMAP_GFX
) && IS_GFX_DEVICE(pdev
))
2589 if (!(iommu_identity_mapping
& IDENTMAP_ALL
))
2593 * We want to start off with all devices in the 1:1 domain, and
2594 * take them out later if we find they can't access all of memory.
2596 * However, we can't do this for PCI devices behind bridges,
2597 * because all PCI devices behind the same bridge will end up
2598 * with the same source-id on their transactions.
2600 * Practically speaking, we can't change things around for these
2601 * devices at run-time, because we can't be sure there'll be no
2602 * DMA transactions in flight for any of their siblings.
2604 * So PCI devices (unless they're on the root bus) as well as
2605 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2606 * the 1:1 domain, just in _case_ one of their siblings turns out
2607 * not to be able to map all of memory.
2609 if (!pci_is_pcie(pdev
)) {
2610 if (!pci_is_root_bus(pdev
->bus
))
2612 if (pdev
->class >> 8 == PCI_CLASS_BRIDGE_PCI
)
2614 } else if (pci_pcie_type(pdev
) == PCI_EXP_TYPE_PCI_BRIDGE
)
2617 if (device_has_rmrr(dev
))
2622 * At boot time, we don't yet know if devices will be 64-bit capable.
2623 * Assume that they will — if they turn out not to be, then we can
2624 * take them out of the 1:1 domain later.
2628 * If the device's dma_mask is less than the system's memory
2629 * size then this is not a candidate for identity mapping.
2631 u64 dma_mask
= *dev
->dma_mask
;
2633 if (dev
->coherent_dma_mask
&&
2634 dev
->coherent_dma_mask
< dma_mask
)
2635 dma_mask
= dev
->coherent_dma_mask
;
2637 return dma_mask
>= dma_get_required_mask(dev
);
2643 static int __init
dev_prepare_static_identity_mapping(struct device
*dev
, int hw
)
2647 if (!iommu_should_identity_map(dev
, 1))
2650 ret
= domain_add_dev_info(si_domain
, dev
,
2651 hw
? CONTEXT_TT_PASS_THROUGH
:
2652 CONTEXT_TT_MULTI_LEVEL
);
2654 pr_info("IOMMU: %s identity mapping for device %s\n",
2655 hw
? "hardware" : "software", dev_name(dev
));
2656 else if (ret
== -ENODEV
)
2657 /* device not associated with an iommu */
2664 static int __init
iommu_prepare_static_identity_mapping(int hw
)
2666 struct pci_dev
*pdev
= NULL
;
2667 struct dmar_drhd_unit
*drhd
;
2668 struct intel_iommu
*iommu
;
2673 ret
= si_domain_init(hw
);
2677 for_each_pci_dev(pdev
) {
2678 ret
= dev_prepare_static_identity_mapping(&pdev
->dev
, hw
);
2683 for_each_active_iommu(iommu
, drhd
)
2684 for_each_active_dev_scope(drhd
->devices
, drhd
->devices_cnt
, i
, dev
) {
2685 struct acpi_device_physical_node
*pn
;
2686 struct acpi_device
*adev
;
2688 if (dev
->bus
!= &acpi_bus_type
)
2691 adev
= to_acpi_device(dev
);
2692 mutex_lock(&adev
->physical_node_lock
);
2693 list_for_each_entry(pn
, &adev
->physical_node_list
, node
) {
2694 ret
= dev_prepare_static_identity_mapping(pn
->dev
, hw
);
2698 mutex_unlock(&adev
->physical_node_lock
);
2706 static int __init
init_dmars(void)
2708 struct dmar_drhd_unit
*drhd
;
2709 struct dmar_rmrr_unit
*rmrr
;
2711 struct intel_iommu
*iommu
;
2717 * initialize and program root entry to not present
2720 for_each_drhd_unit(drhd
) {
2722 * lock not needed as this is only incremented in the single
2723 * threaded kernel __init code path all other access are read
2726 if (g_num_of_iommus
< DMAR_UNITS_SUPPORTED
) {
2730 printk_once(KERN_ERR
"intel-iommu: exceeded %d IOMMUs\n",
2731 DMAR_UNITS_SUPPORTED
);
2734 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
2737 printk(KERN_ERR
"Allocating global iommu array failed\n");
2742 deferred_flush
= kzalloc(g_num_of_iommus
*
2743 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
2744 if (!deferred_flush
) {
2749 for_each_active_iommu(iommu
, drhd
) {
2750 g_iommus
[iommu
->seq_id
] = iommu
;
2752 ret
= iommu_init_domains(iommu
);
2758 * we could share the same root & context tables
2759 * among all IOMMU's. Need to Split it later.
2761 ret
= iommu_alloc_root_entry(iommu
);
2763 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
2766 if (!ecap_pass_through(iommu
->ecap
))
2767 hw_pass_through
= 0;
2771 * Start from the sane iommu hardware state.
2773 for_each_active_iommu(iommu
, drhd
) {
2775 * If the queued invalidation is already initialized by us
2776 * (for example, while enabling interrupt-remapping) then
2777 * we got the things already rolling from a sane state.
2783 * Clear any previous faults.
2785 dmar_fault(-1, iommu
);
2787 * Disable queued invalidation if supported and already enabled
2788 * before OS handover.
2790 dmar_disable_qi(iommu
);
2793 for_each_active_iommu(iommu
, drhd
) {
2794 if (dmar_enable_qi(iommu
)) {
2796 * Queued Invalidate not enabled, use Register Based
2799 iommu
->flush
.flush_context
= __iommu_flush_context
;
2800 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
2801 printk(KERN_INFO
"IOMMU %d 0x%Lx: using Register based "
2804 (unsigned long long)drhd
->reg_base_addr
);
2806 iommu
->flush
.flush_context
= qi_flush_context
;
2807 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
2808 printk(KERN_INFO
"IOMMU %d 0x%Lx: using Queued "
2811 (unsigned long long)drhd
->reg_base_addr
);
2815 if (iommu_pass_through
)
2816 iommu_identity_mapping
|= IDENTMAP_ALL
;
2818 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2819 iommu_identity_mapping
|= IDENTMAP_GFX
;
2822 check_tylersburg_isoch();
2825 * If pass through is not set or not enabled, setup context entries for
2826 * identity mappings for rmrr, gfx, and isa and may fall back to static
2827 * identity mapping if iommu_identity_mapping is set.
2829 if (iommu_identity_mapping
) {
2830 ret
= iommu_prepare_static_identity_mapping(hw_pass_through
);
2832 printk(KERN_CRIT
"Failed to setup IOMMU pass-through\n");
2838 * for each dev attached to rmrr
2840 * locate drhd for dev, alloc domain for dev
2841 * allocate free domain
2842 * allocate page table entries for rmrr
2843 * if context not allocated for bus
2844 * allocate and init context
2845 * set present in root table for this bus
2846 * init context with domain, translation etc
2850 printk(KERN_INFO
"IOMMU: Setting RMRR:\n");
2851 for_each_rmrr_units(rmrr
) {
2852 /* some BIOS lists non-exist devices in DMAR table. */
2853 for_each_active_dev_scope(rmrr
->devices
, rmrr
->devices_cnt
,
2855 ret
= iommu_prepare_rmrr_dev(rmrr
, dev
);
2858 "IOMMU: mapping reserved region failed\n");
2862 iommu_prepare_isa();
2867 * global invalidate context cache
2868 * global invalidate iotlb
2869 * enable translation
2871 for_each_iommu(iommu
, drhd
) {
2872 if (drhd
->ignored
) {
2874 * we always have to disable PMRs or DMA may fail on
2878 iommu_disable_protect_mem_regions(iommu
);
2882 iommu_flush_write_buffer(iommu
);
2884 ret
= dmar_set_interrupt(iommu
);
2888 iommu_set_root_entry(iommu
);
2890 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
2891 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2892 iommu_enable_translation(iommu
);
2893 iommu_disable_protect_mem_regions(iommu
);
2899 for_each_active_iommu(iommu
, drhd
)
2900 free_dmar_iommu(iommu
);
2901 kfree(deferred_flush
);
2908 /* This takes a number of _MM_ pages, not VTD pages */
2909 static struct iova
*intel_alloc_iova(struct device
*dev
,
2910 struct dmar_domain
*domain
,
2911 unsigned long nrpages
, uint64_t dma_mask
)
2913 struct iova
*iova
= NULL
;
2915 /* Restrict dma_mask to the width that the iommu can handle */
2916 dma_mask
= min_t(uint64_t, DOMAIN_MAX_ADDR(domain
->gaw
), dma_mask
);
2918 if (!dmar_forcedac
&& dma_mask
> DMA_BIT_MASK(32)) {
2920 * First try to allocate an io virtual address in
2921 * DMA_BIT_MASK(32) and if that fails then try allocating
2924 iova
= alloc_iova(&domain
->iovad
, nrpages
,
2925 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2929 iova
= alloc_iova(&domain
->iovad
, nrpages
, IOVA_PFN(dma_mask
), 1);
2930 if (unlikely(!iova
)) {
2931 printk(KERN_ERR
"Allocating %ld-page iova for %s failed",
2932 nrpages
, dev_name(dev
));
2939 static struct dmar_domain
*__get_valid_domain_for_dev(struct device
*dev
)
2941 struct dmar_domain
*domain
;
2944 domain
= get_domain_for_dev(dev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2946 printk(KERN_ERR
"Allocating domain for %s failed",
2951 /* make sure context mapping is ok */
2952 if (unlikely(!domain_context_mapped(dev
))) {
2953 ret
= domain_context_mapping(domain
, dev
, CONTEXT_TT_MULTI_LEVEL
);
2955 printk(KERN_ERR
"Domain context map for %s failed",
2964 static inline struct dmar_domain
*get_valid_domain_for_dev(struct device
*dev
)
2966 struct device_domain_info
*info
;
2968 /* No lock here, assumes no domain exit in normal case */
2969 info
= dev
->archdata
.iommu
;
2971 return info
->domain
;
2973 return __get_valid_domain_for_dev(dev
);
2976 static int iommu_dummy(struct device
*dev
)
2978 return dev
->archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
;
2981 /* Check if the dev needs to go through non-identity map and unmap process.*/
2982 static int iommu_no_mapping(struct device
*dev
)
2986 if (iommu_dummy(dev
))
2989 if (!iommu_identity_mapping
)
2992 found
= identity_mapping(dev
);
2994 if (iommu_should_identity_map(dev
, 0))
2998 * 32 bit DMA is removed from si_domain and fall back
2999 * to non-identity mapping.
3001 domain_remove_one_dev_info(si_domain
, dev
);
3002 printk(KERN_INFO
"32bit %s uses non-identity mapping\n",
3008 * In case of a detached 64 bit DMA device from vm, the device
3009 * is put into si_domain for identity mapping.
3011 if (iommu_should_identity_map(dev
, 0)) {
3013 ret
= domain_add_dev_info(si_domain
, dev
,
3015 CONTEXT_TT_PASS_THROUGH
:
3016 CONTEXT_TT_MULTI_LEVEL
);
3018 printk(KERN_INFO
"64bit %s uses identity mapping\n",
3028 static dma_addr_t
__intel_map_single(struct device
*dev
, phys_addr_t paddr
,
3029 size_t size
, int dir
, u64 dma_mask
)
3031 struct dmar_domain
*domain
;
3032 phys_addr_t start_paddr
;
3036 struct intel_iommu
*iommu
;
3037 unsigned long paddr_pfn
= paddr
>> PAGE_SHIFT
;
3039 BUG_ON(dir
== DMA_NONE
);
3041 if (iommu_no_mapping(dev
))
3044 domain
= get_valid_domain_for_dev(dev
);
3048 iommu
= domain_get_iommu(domain
);
3049 size
= aligned_nrpages(paddr
, size
);
3051 iova
= intel_alloc_iova(dev
, domain
, dma_to_mm_pfn(size
), dma_mask
);
3056 * Check if DMAR supports zero-length reads on write only
3059 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
3060 !cap_zlr(iommu
->cap
))
3061 prot
|= DMA_PTE_READ
;
3062 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
3063 prot
|= DMA_PTE_WRITE
;
3065 * paddr - (paddr + size) might be partial page, we should map the whole
3066 * page. Note: if two part of one page are separately mapped, we
3067 * might have two guest_addr mapping to the same host paddr, but this
3068 * is not a big problem
3070 ret
= domain_pfn_mapping(domain
, mm_to_dma_pfn(iova
->pfn_lo
),
3071 mm_to_dma_pfn(paddr_pfn
), size
, prot
);
3075 /* it's a non-present to present mapping. Only flush if caching mode */
3076 if (cap_caching_mode(iommu
->cap
))
3077 iommu_flush_iotlb_psi(iommu
, domain
->id
, mm_to_dma_pfn(iova
->pfn_lo
), size
, 0, 1);
3079 iommu_flush_write_buffer(iommu
);
3081 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
3082 start_paddr
+= paddr
& ~PAGE_MASK
;
3087 __free_iova(&domain
->iovad
, iova
);
3088 printk(KERN_ERR
"Device %s request: %zx@%llx dir %d --- failed\n",
3089 dev_name(dev
), size
, (unsigned long long)paddr
, dir
);
3093 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
3094 unsigned long offset
, size_t size
,
3095 enum dma_data_direction dir
,
3096 struct dma_attrs
*attrs
)
3098 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
3099 dir
, *dev
->dma_mask
);
3102 static void flush_unmaps(void)
3108 /* just flush them all */
3109 for (i
= 0; i
< g_num_of_iommus
; i
++) {
3110 struct intel_iommu
*iommu
= g_iommus
[i
];
3114 if (!deferred_flush
[i
].next
)
3117 /* In caching mode, global flushes turn emulation expensive */
3118 if (!cap_caching_mode(iommu
->cap
))
3119 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3120 DMA_TLB_GLOBAL_FLUSH
);
3121 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
3123 struct iova
*iova
= deferred_flush
[i
].iova
[j
];
3124 struct dmar_domain
*domain
= deferred_flush
[i
].domain
[j
];
3126 /* On real hardware multiple invalidations are expensive */
3127 if (cap_caching_mode(iommu
->cap
))
3128 iommu_flush_iotlb_psi(iommu
, domain
->id
,
3129 iova
->pfn_lo
, iova_size(iova
),
3130 !deferred_flush
[i
].freelist
[j
], 0);
3132 mask
= ilog2(mm_to_dma_pfn(iova_size(iova
)));
3133 iommu_flush_dev_iotlb(deferred_flush
[i
].domain
[j
],
3134 (uint64_t)iova
->pfn_lo
<< PAGE_SHIFT
, mask
);
3136 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
, iova
);
3137 if (deferred_flush
[i
].freelist
[j
])
3138 dma_free_pagelist(deferred_flush
[i
].freelist
[j
]);
3140 deferred_flush
[i
].next
= 0;
3146 static void flush_unmaps_timeout(unsigned long data
)
3148 unsigned long flags
;
3150 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
3152 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
3155 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
, struct page
*freelist
)
3157 unsigned long flags
;
3159 struct intel_iommu
*iommu
;
3161 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
3162 if (list_size
== HIGH_WATER_MARK
)
3165 iommu
= domain_get_iommu(dom
);
3166 iommu_id
= iommu
->seq_id
;
3168 next
= deferred_flush
[iommu_id
].next
;
3169 deferred_flush
[iommu_id
].domain
[next
] = dom
;
3170 deferred_flush
[iommu_id
].iova
[next
] = iova
;
3171 deferred_flush
[iommu_id
].freelist
[next
] = freelist
;
3172 deferred_flush
[iommu_id
].next
++;
3175 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
3179 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
3182 static void intel_unmap(struct device
*dev
, dma_addr_t dev_addr
)
3184 struct dmar_domain
*domain
;
3185 unsigned long start_pfn
, last_pfn
;
3187 struct intel_iommu
*iommu
;
3188 struct page
*freelist
;
3190 if (iommu_no_mapping(dev
))
3193 domain
= find_domain(dev
);
3196 iommu
= domain_get_iommu(domain
);
3198 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
3199 if (WARN_ONCE(!iova
, "Driver unmaps unmatched page at PFN %llx\n",
3200 (unsigned long long)dev_addr
))
3203 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3204 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
3206 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3207 dev_name(dev
), start_pfn
, last_pfn
);
3209 freelist
= domain_unmap(domain
, start_pfn
, last_pfn
);
3211 if (intel_iommu_strict
) {
3212 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
3213 last_pfn
- start_pfn
+ 1, !freelist
, 0);
3215 __free_iova(&domain
->iovad
, iova
);
3216 dma_free_pagelist(freelist
);
3218 add_unmap(domain
, iova
, freelist
);
3220 * queue up the release of the unmap to save the 1/6th of the
3221 * cpu used up by the iotlb flush operation...
3226 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
3227 size_t size
, enum dma_data_direction dir
,
3228 struct dma_attrs
*attrs
)
3230 intel_unmap(dev
, dev_addr
);
3233 static void *intel_alloc_coherent(struct device
*dev
, size_t size
,
3234 dma_addr_t
*dma_handle
, gfp_t flags
,
3235 struct dma_attrs
*attrs
)
3237 struct page
*page
= NULL
;
3240 size
= PAGE_ALIGN(size
);
3241 order
= get_order(size
);
3243 if (!iommu_no_mapping(dev
))
3244 flags
&= ~(GFP_DMA
| GFP_DMA32
);
3245 else if (dev
->coherent_dma_mask
< dma_get_required_mask(dev
)) {
3246 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(32))
3252 if (flags
& __GFP_WAIT
) {
3253 unsigned int count
= size
>> PAGE_SHIFT
;
3255 page
= dma_alloc_from_contiguous(dev
, count
, order
);
3256 if (page
&& iommu_no_mapping(dev
) &&
3257 page_to_phys(page
) + size
> dev
->coherent_dma_mask
) {
3258 dma_release_from_contiguous(dev
, page
, count
);
3264 page
= alloc_pages(flags
, order
);
3267 memset(page_address(page
), 0, size
);
3269 *dma_handle
= __intel_map_single(dev
, page_to_phys(page
), size
,
3271 dev
->coherent_dma_mask
);
3273 return page_address(page
);
3274 if (!dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
))
3275 __free_pages(page
, order
);
3280 static void intel_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
3281 dma_addr_t dma_handle
, struct dma_attrs
*attrs
)
3284 struct page
*page
= virt_to_page(vaddr
);
3286 size
= PAGE_ALIGN(size
);
3287 order
= get_order(size
);
3289 intel_unmap(dev
, dma_handle
);
3290 if (!dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
))
3291 __free_pages(page
, order
);
3294 static void intel_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
3295 int nelems
, enum dma_data_direction dir
,
3296 struct dma_attrs
*attrs
)
3298 intel_unmap(dev
, sglist
[0].dma_address
);
3301 static int intel_nontranslate_map_sg(struct device
*hddev
,
3302 struct scatterlist
*sglist
, int nelems
, int dir
)
3305 struct scatterlist
*sg
;
3307 for_each_sg(sglist
, sg
, nelems
, i
) {
3308 BUG_ON(!sg_page(sg
));
3309 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
3310 sg
->dma_length
= sg
->length
;
3315 static int intel_map_sg(struct device
*dev
, struct scatterlist
*sglist
, int nelems
,
3316 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
3319 struct dmar_domain
*domain
;
3322 struct iova
*iova
= NULL
;
3324 struct scatterlist
*sg
;
3325 unsigned long start_vpfn
;
3326 struct intel_iommu
*iommu
;
3328 BUG_ON(dir
== DMA_NONE
);
3329 if (iommu_no_mapping(dev
))
3330 return intel_nontranslate_map_sg(dev
, sglist
, nelems
, dir
);
3332 domain
= get_valid_domain_for_dev(dev
);
3336 iommu
= domain_get_iommu(domain
);
3338 for_each_sg(sglist
, sg
, nelems
, i
)
3339 size
+= aligned_nrpages(sg
->offset
, sg
->length
);
3341 iova
= intel_alloc_iova(dev
, domain
, dma_to_mm_pfn(size
),
3344 sglist
->dma_length
= 0;
3349 * Check if DMAR supports zero-length reads on write only
3352 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
3353 !cap_zlr(iommu
->cap
))
3354 prot
|= DMA_PTE_READ
;
3355 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
3356 prot
|= DMA_PTE_WRITE
;
3358 start_vpfn
= mm_to_dma_pfn(iova
->pfn_lo
);
3360 ret
= domain_sg_mapping(domain
, start_vpfn
, sglist
, size
, prot
);
3361 if (unlikely(ret
)) {
3362 dma_pte_free_pagetable(domain
, start_vpfn
,
3363 start_vpfn
+ size
- 1);
3364 __free_iova(&domain
->iovad
, iova
);
3368 /* it's a non-present to present mapping. Only flush if caching mode */
3369 if (cap_caching_mode(iommu
->cap
))
3370 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_vpfn
, size
, 0, 1);
3372 iommu_flush_write_buffer(iommu
);
3377 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
3382 struct dma_map_ops intel_dma_ops
= {
3383 .alloc
= intel_alloc_coherent
,
3384 .free
= intel_free_coherent
,
3385 .map_sg
= intel_map_sg
,
3386 .unmap_sg
= intel_unmap_sg
,
3387 .map_page
= intel_map_page
,
3388 .unmap_page
= intel_unmap_page
,
3389 .mapping_error
= intel_mapping_error
,
3392 static inline int iommu_domain_cache_init(void)
3396 iommu_domain_cache
= kmem_cache_create("iommu_domain",
3397 sizeof(struct dmar_domain
),
3402 if (!iommu_domain_cache
) {
3403 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
3410 static inline int iommu_devinfo_cache_init(void)
3414 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
3415 sizeof(struct device_domain_info
),
3419 if (!iommu_devinfo_cache
) {
3420 printk(KERN_ERR
"Couldn't create devinfo cache\n");
3427 static inline int iommu_iova_cache_init(void)
3431 iommu_iova_cache
= kmem_cache_create("iommu_iova",
3432 sizeof(struct iova
),
3436 if (!iommu_iova_cache
) {
3437 printk(KERN_ERR
"Couldn't create iova cache\n");
3444 static int __init
iommu_init_mempool(void)
3447 ret
= iommu_iova_cache_init();
3451 ret
= iommu_domain_cache_init();
3455 ret
= iommu_devinfo_cache_init();
3459 kmem_cache_destroy(iommu_domain_cache
);
3461 kmem_cache_destroy(iommu_iova_cache
);
3466 static void __init
iommu_exit_mempool(void)
3468 kmem_cache_destroy(iommu_devinfo_cache
);
3469 kmem_cache_destroy(iommu_domain_cache
);
3470 kmem_cache_destroy(iommu_iova_cache
);
3474 static void quirk_ioat_snb_local_iommu(struct pci_dev
*pdev
)
3476 struct dmar_drhd_unit
*drhd
;
3480 /* We know that this device on this chipset has its own IOMMU.
3481 * If we find it under a different IOMMU, then the BIOS is lying
3482 * to us. Hope that the IOMMU for this device is actually
3483 * disabled, and it needs no translation...
3485 rc
= pci_bus_read_config_dword(pdev
->bus
, PCI_DEVFN(0, 0), 0xb0, &vtbar
);
3487 /* "can't" happen */
3488 dev_info(&pdev
->dev
, "failed to run vt-d quirk\n");
3491 vtbar
&= 0xffff0000;
3493 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3494 drhd
= dmar_find_matched_drhd_unit(pdev
);
3495 if (WARN_TAINT_ONCE(!drhd
|| drhd
->reg_base_addr
- vtbar
!= 0xa000,
3496 TAINT_FIRMWARE_WORKAROUND
,
3497 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3498 pdev
->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3500 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB
, quirk_ioat_snb_local_iommu
);
3502 static void __init
init_no_remapping_devices(void)
3504 struct dmar_drhd_unit
*drhd
;
3508 for_each_drhd_unit(drhd
) {
3509 if (!drhd
->include_all
) {
3510 for_each_active_dev_scope(drhd
->devices
,
3511 drhd
->devices_cnt
, i
, dev
)
3513 /* ignore DMAR unit if no devices exist */
3514 if (i
== drhd
->devices_cnt
)
3519 for_each_active_drhd_unit(drhd
) {
3520 if (drhd
->include_all
)
3523 for_each_active_dev_scope(drhd
->devices
,
3524 drhd
->devices_cnt
, i
, dev
)
3525 if (!dev_is_pci(dev
) || !IS_GFX_DEVICE(to_pci_dev(dev
)))
3527 if (i
< drhd
->devices_cnt
)
3530 /* This IOMMU has *only* gfx devices. Either bypass it or
3531 set the gfx_mapped flag, as appropriate */
3533 intel_iommu_gfx_mapped
= 1;
3536 for_each_active_dev_scope(drhd
->devices
,
3537 drhd
->devices_cnt
, i
, dev
)
3538 dev
->archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
3543 #ifdef CONFIG_SUSPEND
3544 static int init_iommu_hw(void)
3546 struct dmar_drhd_unit
*drhd
;
3547 struct intel_iommu
*iommu
= NULL
;
3549 for_each_active_iommu(iommu
, drhd
)
3551 dmar_reenable_qi(iommu
);
3553 for_each_iommu(iommu
, drhd
) {
3554 if (drhd
->ignored
) {
3556 * we always have to disable PMRs or DMA may fail on
3560 iommu_disable_protect_mem_regions(iommu
);
3564 iommu_flush_write_buffer(iommu
);
3566 iommu_set_root_entry(iommu
);
3568 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3569 DMA_CCMD_GLOBAL_INVL
);
3570 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
3571 iommu_enable_translation(iommu
);
3572 iommu_disable_protect_mem_regions(iommu
);
3578 static void iommu_flush_all(void)
3580 struct dmar_drhd_unit
*drhd
;
3581 struct intel_iommu
*iommu
;
3583 for_each_active_iommu(iommu
, drhd
) {
3584 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3585 DMA_CCMD_GLOBAL_INVL
);
3586 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3587 DMA_TLB_GLOBAL_FLUSH
);
3591 static int iommu_suspend(void)
3593 struct dmar_drhd_unit
*drhd
;
3594 struct intel_iommu
*iommu
= NULL
;
3597 for_each_active_iommu(iommu
, drhd
) {
3598 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
3600 if (!iommu
->iommu_state
)
3606 for_each_active_iommu(iommu
, drhd
) {
3607 iommu_disable_translation(iommu
);
3609 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3611 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
3612 readl(iommu
->reg
+ DMAR_FECTL_REG
);
3613 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
3614 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
3615 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
3616 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
3617 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
3618 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
3620 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3625 for_each_active_iommu(iommu
, drhd
)
3626 kfree(iommu
->iommu_state
);
3631 static void iommu_resume(void)
3633 struct dmar_drhd_unit
*drhd
;
3634 struct intel_iommu
*iommu
= NULL
;
3637 if (init_iommu_hw()) {
3639 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3641 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3645 for_each_active_iommu(iommu
, drhd
) {
3647 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3649 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
3650 iommu
->reg
+ DMAR_FECTL_REG
);
3651 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
3652 iommu
->reg
+ DMAR_FEDATA_REG
);
3653 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
3654 iommu
->reg
+ DMAR_FEADDR_REG
);
3655 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
3656 iommu
->reg
+ DMAR_FEUADDR_REG
);
3658 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3661 for_each_active_iommu(iommu
, drhd
)
3662 kfree(iommu
->iommu_state
);
3665 static struct syscore_ops iommu_syscore_ops
= {
3666 .resume
= iommu_resume
,
3667 .suspend
= iommu_suspend
,
3670 static void __init
init_iommu_pm_ops(void)
3672 register_syscore_ops(&iommu_syscore_ops
);
3676 static inline void init_iommu_pm_ops(void) {}
3677 #endif /* CONFIG_PM */
3680 int __init
dmar_parse_one_rmrr(struct acpi_dmar_header
*header
, void *arg
)
3682 struct acpi_dmar_reserved_memory
*rmrr
;
3683 struct dmar_rmrr_unit
*rmrru
;
3685 rmrru
= kzalloc(sizeof(*rmrru
), GFP_KERNEL
);
3689 rmrru
->hdr
= header
;
3690 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
3691 rmrru
->base_address
= rmrr
->base_address
;
3692 rmrru
->end_address
= rmrr
->end_address
;
3693 rmrru
->devices
= dmar_alloc_dev_scope((void *)(rmrr
+ 1),
3694 ((void *)rmrr
) + rmrr
->header
.length
,
3695 &rmrru
->devices_cnt
);
3696 if (rmrru
->devices_cnt
&& rmrru
->devices
== NULL
) {
3701 list_add(&rmrru
->list
, &dmar_rmrr_units
);
3706 static struct dmar_atsr_unit
*dmar_find_atsr(struct acpi_dmar_atsr
*atsr
)
3708 struct dmar_atsr_unit
*atsru
;
3709 struct acpi_dmar_atsr
*tmp
;
3711 list_for_each_entry_rcu(atsru
, &dmar_atsr_units
, list
) {
3712 tmp
= (struct acpi_dmar_atsr
*)atsru
->hdr
;
3713 if (atsr
->segment
!= tmp
->segment
)
3715 if (atsr
->header
.length
!= tmp
->header
.length
)
3717 if (memcmp(atsr
, tmp
, atsr
->header
.length
) == 0)
3724 int dmar_parse_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
3726 struct acpi_dmar_atsr
*atsr
;
3727 struct dmar_atsr_unit
*atsru
;
3729 if (system_state
!= SYSTEM_BOOTING
&& !intel_iommu_enabled
)
3732 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
3733 atsru
= dmar_find_atsr(atsr
);
3737 atsru
= kzalloc(sizeof(*atsru
) + hdr
->length
, GFP_KERNEL
);
3742 * If memory is allocated from slab by ACPI _DSM method, we need to
3743 * copy the memory content because the memory buffer will be freed
3746 atsru
->hdr
= (void *)(atsru
+ 1);
3747 memcpy(atsru
->hdr
, hdr
, hdr
->length
);
3748 atsru
->include_all
= atsr
->flags
& 0x1;
3749 if (!atsru
->include_all
) {
3750 atsru
->devices
= dmar_alloc_dev_scope((void *)(atsr
+ 1),
3751 (void *)atsr
+ atsr
->header
.length
,
3752 &atsru
->devices_cnt
);
3753 if (atsru
->devices_cnt
&& atsru
->devices
== NULL
) {
3759 list_add_rcu(&atsru
->list
, &dmar_atsr_units
);
3764 static void intel_iommu_free_atsr(struct dmar_atsr_unit
*atsru
)
3766 dmar_free_dev_scope(&atsru
->devices
, &atsru
->devices_cnt
);
3770 int dmar_release_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
3772 struct acpi_dmar_atsr
*atsr
;
3773 struct dmar_atsr_unit
*atsru
;
3775 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
3776 atsru
= dmar_find_atsr(atsr
);
3778 list_del_rcu(&atsru
->list
);
3780 intel_iommu_free_atsr(atsru
);
3786 int dmar_check_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
3790 struct acpi_dmar_atsr
*atsr
;
3791 struct dmar_atsr_unit
*atsru
;
3793 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
3794 atsru
= dmar_find_atsr(atsr
);
3798 if (!atsru
->include_all
&& atsru
->devices
&& atsru
->devices_cnt
)
3799 for_each_active_dev_scope(atsru
->devices
, atsru
->devices_cnt
,
3806 int dmar_iommu_hotplug(struct dmar_drhd_unit
*dmaru
, bool insert
)
3808 return intel_iommu_enabled
? -ENOSYS
: 0;
3811 static void intel_iommu_free_dmars(void)
3813 struct dmar_rmrr_unit
*rmrru
, *rmrr_n
;
3814 struct dmar_atsr_unit
*atsru
, *atsr_n
;
3816 list_for_each_entry_safe(rmrru
, rmrr_n
, &dmar_rmrr_units
, list
) {
3817 list_del(&rmrru
->list
);
3818 dmar_free_dev_scope(&rmrru
->devices
, &rmrru
->devices_cnt
);
3822 list_for_each_entry_safe(atsru
, atsr_n
, &dmar_atsr_units
, list
) {
3823 list_del(&atsru
->list
);
3824 intel_iommu_free_atsr(atsru
);
3828 int dmar_find_matched_atsr_unit(struct pci_dev
*dev
)
3831 struct pci_bus
*bus
;
3832 struct pci_dev
*bridge
= NULL
;
3834 struct acpi_dmar_atsr
*atsr
;
3835 struct dmar_atsr_unit
*atsru
;
3837 dev
= pci_physfn(dev
);
3838 for (bus
= dev
->bus
; bus
; bus
= bus
->parent
) {
3840 if (!bridge
|| !pci_is_pcie(bridge
) ||
3841 pci_pcie_type(bridge
) == PCI_EXP_TYPE_PCI_BRIDGE
)
3843 if (pci_pcie_type(bridge
) == PCI_EXP_TYPE_ROOT_PORT
)
3850 list_for_each_entry_rcu(atsru
, &dmar_atsr_units
, list
) {
3851 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
3852 if (atsr
->segment
!= pci_domain_nr(dev
->bus
))
3855 for_each_dev_scope(atsru
->devices
, atsru
->devices_cnt
, i
, tmp
)
3856 if (tmp
== &bridge
->dev
)
3859 if (atsru
->include_all
)
3869 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info
*info
)
3872 struct dmar_rmrr_unit
*rmrru
;
3873 struct dmar_atsr_unit
*atsru
;
3874 struct acpi_dmar_atsr
*atsr
;
3875 struct acpi_dmar_reserved_memory
*rmrr
;
3877 if (!intel_iommu_enabled
&& system_state
!= SYSTEM_BOOTING
)
3880 list_for_each_entry(rmrru
, &dmar_rmrr_units
, list
) {
3881 rmrr
= container_of(rmrru
->hdr
,
3882 struct acpi_dmar_reserved_memory
, header
);
3883 if (info
->event
== BUS_NOTIFY_ADD_DEVICE
) {
3884 ret
= dmar_insert_dev_scope(info
, (void *)(rmrr
+ 1),
3885 ((void *)rmrr
) + rmrr
->header
.length
,
3886 rmrr
->segment
, rmrru
->devices
,
3887 rmrru
->devices_cnt
);
3890 } else if (info
->event
== BUS_NOTIFY_DEL_DEVICE
) {
3891 dmar_remove_dev_scope(info
, rmrr
->segment
,
3892 rmrru
->devices
, rmrru
->devices_cnt
);
3896 list_for_each_entry(atsru
, &dmar_atsr_units
, list
) {
3897 if (atsru
->include_all
)
3900 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
3901 if (info
->event
== BUS_NOTIFY_ADD_DEVICE
) {
3902 ret
= dmar_insert_dev_scope(info
, (void *)(atsr
+ 1),
3903 (void *)atsr
+ atsr
->header
.length
,
3904 atsr
->segment
, atsru
->devices
,
3905 atsru
->devices_cnt
);
3910 } else if (info
->event
== BUS_NOTIFY_DEL_DEVICE
) {
3911 if (dmar_remove_dev_scope(info
, atsr
->segment
,
3912 atsru
->devices
, atsru
->devices_cnt
))
3921 * Here we only respond to action of unbound device from driver.
3923 * Added device is not attached to its DMAR domain here yet. That will happen
3924 * when mapping the device to iova.
3926 static int device_notifier(struct notifier_block
*nb
,
3927 unsigned long action
, void *data
)
3929 struct device
*dev
= data
;
3930 struct dmar_domain
*domain
;
3932 if (iommu_dummy(dev
))
3935 if (action
!= BUS_NOTIFY_REMOVED_DEVICE
)
3939 * If the device is still attached to a device driver we can't
3940 * tear down the domain yet as DMA mappings may still be in use.
3941 * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
3943 if (action
== BUS_NOTIFY_DEL_DEVICE
&& dev
->driver
!= NULL
)
3946 domain
= find_domain(dev
);
3950 down_read(&dmar_global_lock
);
3951 domain_remove_one_dev_info(domain
, dev
);
3952 if (!domain_type_is_vm_or_si(domain
) && list_empty(&domain
->devices
))
3953 domain_exit(domain
);
3954 up_read(&dmar_global_lock
);
3959 static struct notifier_block device_nb
= {
3960 .notifier_call
= device_notifier
,
3963 static int intel_iommu_memory_notifier(struct notifier_block
*nb
,
3964 unsigned long val
, void *v
)
3966 struct memory_notify
*mhp
= v
;
3967 unsigned long long start
, end
;
3968 unsigned long start_vpfn
, last_vpfn
;
3971 case MEM_GOING_ONLINE
:
3972 start
= mhp
->start_pfn
<< PAGE_SHIFT
;
3973 end
= ((mhp
->start_pfn
+ mhp
->nr_pages
) << PAGE_SHIFT
) - 1;
3974 if (iommu_domain_identity_map(si_domain
, start
, end
)) {
3975 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
3982 case MEM_CANCEL_ONLINE
:
3983 start_vpfn
= mm_to_dma_pfn(mhp
->start_pfn
);
3984 last_vpfn
= mm_to_dma_pfn(mhp
->start_pfn
+ mhp
->nr_pages
- 1);
3985 while (start_vpfn
<= last_vpfn
) {
3987 struct dmar_drhd_unit
*drhd
;
3988 struct intel_iommu
*iommu
;
3989 struct page
*freelist
;
3991 iova
= find_iova(&si_domain
->iovad
, start_vpfn
);
3993 pr_debug("dmar: failed get IOVA for PFN %lx\n",
3998 iova
= split_and_remove_iova(&si_domain
->iovad
, iova
,
3999 start_vpfn
, last_vpfn
);
4001 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
4002 start_vpfn
, last_vpfn
);
4006 freelist
= domain_unmap(si_domain
, iova
->pfn_lo
,
4010 for_each_active_iommu(iommu
, drhd
)
4011 iommu_flush_iotlb_psi(iommu
, si_domain
->id
,
4012 iova
->pfn_lo
, iova_size(iova
),
4015 dma_free_pagelist(freelist
);
4017 start_vpfn
= iova
->pfn_hi
+ 1;
4018 free_iova_mem(iova
);
4026 static struct notifier_block intel_iommu_memory_nb
= {
4027 .notifier_call
= intel_iommu_memory_notifier
,
4032 static ssize_t
intel_iommu_show_version(struct device
*dev
,
4033 struct device_attribute
*attr
,
4036 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4037 u32 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
4038 return sprintf(buf
, "%d:%d\n",
4039 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
));
4041 static DEVICE_ATTR(version
, S_IRUGO
, intel_iommu_show_version
, NULL
);
4043 static ssize_t
intel_iommu_show_address(struct device
*dev
,
4044 struct device_attribute
*attr
,
4047 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4048 return sprintf(buf
, "%llx\n", iommu
->reg_phys
);
4050 static DEVICE_ATTR(address
, S_IRUGO
, intel_iommu_show_address
, NULL
);
4052 static ssize_t
intel_iommu_show_cap(struct device
*dev
,
4053 struct device_attribute
*attr
,
4056 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4057 return sprintf(buf
, "%llx\n", iommu
->cap
);
4059 static DEVICE_ATTR(cap
, S_IRUGO
, intel_iommu_show_cap
, NULL
);
4061 static ssize_t
intel_iommu_show_ecap(struct device
*dev
,
4062 struct device_attribute
*attr
,
4065 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4066 return sprintf(buf
, "%llx\n", iommu
->ecap
);
4068 static DEVICE_ATTR(ecap
, S_IRUGO
, intel_iommu_show_ecap
, NULL
);
4070 static struct attribute
*intel_iommu_attrs
[] = {
4071 &dev_attr_version
.attr
,
4072 &dev_attr_address
.attr
,
4074 &dev_attr_ecap
.attr
,
4078 static struct attribute_group intel_iommu_group
= {
4079 .name
= "intel-iommu",
4080 .attrs
= intel_iommu_attrs
,
4083 const struct attribute_group
*intel_iommu_groups
[] = {
4088 int __init
intel_iommu_init(void)
4091 struct dmar_drhd_unit
*drhd
;
4092 struct intel_iommu
*iommu
;
4094 /* VT-d is required for a TXT/tboot launch, so enforce that */
4095 force_on
= tboot_force_iommu();
4097 if (iommu_init_mempool()) {
4099 panic("tboot: Failed to initialize iommu memory\n");
4103 down_write(&dmar_global_lock
);
4104 if (dmar_table_init()) {
4106 panic("tboot: Failed to initialize DMAR table\n");
4111 * Disable translation if already enabled prior to OS handover.
4113 for_each_active_iommu(iommu
, drhd
)
4114 if (iommu
->gcmd
& DMA_GCMD_TE
)
4115 iommu_disable_translation(iommu
);
4117 if (dmar_dev_scope_init() < 0) {
4119 panic("tboot: Failed to initialize DMAR device scope\n");
4123 if (no_iommu
|| dmar_disabled
)
4126 if (list_empty(&dmar_rmrr_units
))
4127 printk(KERN_INFO
"DMAR: No RMRR found\n");
4129 if (list_empty(&dmar_atsr_units
))
4130 printk(KERN_INFO
"DMAR: No ATSR found\n");
4132 if (dmar_init_reserved_ranges()) {
4134 panic("tboot: Failed to reserve iommu ranges\n");
4135 goto out_free_reserved_range
;
4138 init_no_remapping_devices();
4143 panic("tboot: Failed to initialize DMARs\n");
4144 printk(KERN_ERR
"IOMMU: dmar init failed\n");
4145 goto out_free_reserved_range
;
4147 up_write(&dmar_global_lock
);
4149 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4151 init_timer(&unmap_timer
);
4152 #ifdef CONFIG_SWIOTLB
4155 dma_ops
= &intel_dma_ops
;
4157 init_iommu_pm_ops();
4159 for_each_active_iommu(iommu
, drhd
)
4160 iommu
->iommu_dev
= iommu_device_create(NULL
, iommu
,
4164 bus_set_iommu(&pci_bus_type
, &intel_iommu_ops
);
4165 bus_register_notifier(&pci_bus_type
, &device_nb
);
4166 if (si_domain
&& !hw_pass_through
)
4167 register_memory_notifier(&intel_iommu_memory_nb
);
4169 intel_iommu_enabled
= 1;
4173 out_free_reserved_range
:
4174 put_iova_domain(&reserved_iova_list
);
4176 intel_iommu_free_dmars();
4177 up_write(&dmar_global_lock
);
4178 iommu_exit_mempool();
4182 static int iommu_detach_dev_cb(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
4184 struct intel_iommu
*iommu
= opaque
;
4186 iommu_detach_dev(iommu
, PCI_BUS_NUM(alias
), alias
& 0xff);
4191 * NB - intel-iommu lacks any sort of reference counting for the users of
4192 * dependent devices. If multiple endpoints have intersecting dependent
4193 * devices, unbinding the driver from any one of them will possibly leave
4194 * the others unable to operate.
4196 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
4199 if (!iommu
|| !dev
|| !dev_is_pci(dev
))
4202 pci_for_each_dma_alias(to_pci_dev(dev
), &iommu_detach_dev_cb
, iommu
);
4205 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
4208 struct device_domain_info
*info
, *tmp
;
4209 struct intel_iommu
*iommu
;
4210 unsigned long flags
;
4214 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4218 spin_lock_irqsave(&device_domain_lock
, flags
);
4219 list_for_each_entry_safe(info
, tmp
, &domain
->devices
, link
) {
4220 if (info
->iommu
== iommu
&& info
->bus
== bus
&&
4221 info
->devfn
== devfn
) {
4222 unlink_domain_info(info
);
4223 spin_unlock_irqrestore(&device_domain_lock
, flags
);
4225 iommu_disable_dev_iotlb(info
);
4226 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
4227 iommu_detach_dependent_devices(iommu
, dev
);
4228 free_devinfo_mem(info
);
4230 spin_lock_irqsave(&device_domain_lock
, flags
);
4238 /* if there is no other devices under the same iommu
4239 * owned by this domain, clear this iommu in iommu_bmp
4240 * update iommu count and coherency
4242 if (info
->iommu
== iommu
)
4246 spin_unlock_irqrestore(&device_domain_lock
, flags
);
4249 domain_detach_iommu(domain
, iommu
);
4250 if (!domain_type_is_vm_or_si(domain
))
4251 iommu_detach_domain(domain
, iommu
);
4255 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
)
4259 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
4260 domain_reserve_special_ranges(domain
);
4262 /* calculate AGAW */
4263 domain
->gaw
= guest_width
;
4264 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
4265 domain
->agaw
= width_to_agaw(adjust_width
);
4267 domain
->iommu_coherency
= 0;
4268 domain
->iommu_snooping
= 0;
4269 domain
->iommu_superpage
= 0;
4270 domain
->max_addr
= 0;
4272 /* always allocate the top pgd */
4273 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
4276 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
4280 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
4282 struct dmar_domain
*dmar_domain
;
4284 dmar_domain
= alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE
);
4287 "intel_iommu_domain_init: dmar_domain == NULL\n");
4290 if (md_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
4292 "intel_iommu_domain_init() failed\n");
4293 domain_exit(dmar_domain
);
4296 domain_update_iommu_cap(dmar_domain
);
4297 domain
->priv
= dmar_domain
;
4299 domain
->geometry
.aperture_start
= 0;
4300 domain
->geometry
.aperture_end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
);
4301 domain
->geometry
.force_aperture
= true;
4306 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
4308 struct dmar_domain
*dmar_domain
= domain
->priv
;
4310 domain
->priv
= NULL
;
4311 domain_exit(dmar_domain
);
4314 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
4317 struct dmar_domain
*dmar_domain
= domain
->priv
;
4318 struct intel_iommu
*iommu
;
4322 if (device_is_rmrr_locked(dev
)) {
4323 dev_warn(dev
, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4327 /* normally dev is not mapped */
4328 if (unlikely(domain_context_mapped(dev
))) {
4329 struct dmar_domain
*old_domain
;
4331 old_domain
= find_domain(dev
);
4333 if (domain_type_is_vm_or_si(dmar_domain
))
4334 domain_remove_one_dev_info(old_domain
, dev
);
4336 domain_remove_dev_info(old_domain
);
4340 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4344 /* check if this iommu agaw is sufficient for max mapped address */
4345 addr_width
= agaw_to_width(iommu
->agaw
);
4346 if (addr_width
> cap_mgaw(iommu
->cap
))
4347 addr_width
= cap_mgaw(iommu
->cap
);
4349 if (dmar_domain
->max_addr
> (1LL << addr_width
)) {
4350 printk(KERN_ERR
"%s: iommu width (%d) is not "
4351 "sufficient for the mapped address (%llx)\n",
4352 __func__
, addr_width
, dmar_domain
->max_addr
);
4355 dmar_domain
->gaw
= addr_width
;
4358 * Knock out extra levels of page tables if necessary
4360 while (iommu
->agaw
< dmar_domain
->agaw
) {
4361 struct dma_pte
*pte
;
4363 pte
= dmar_domain
->pgd
;
4364 if (dma_pte_present(pte
)) {
4365 dmar_domain
->pgd
= (struct dma_pte
*)
4366 phys_to_virt(dma_pte_addr(pte
));
4367 free_pgtable_page(pte
);
4369 dmar_domain
->agaw
--;
4372 return domain_add_dev_info(dmar_domain
, dev
, CONTEXT_TT_MULTI_LEVEL
);
4375 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
4378 struct dmar_domain
*dmar_domain
= domain
->priv
;
4380 domain_remove_one_dev_info(dmar_domain
, dev
);
4383 static int intel_iommu_map(struct iommu_domain
*domain
,
4384 unsigned long iova
, phys_addr_t hpa
,
4385 size_t size
, int iommu_prot
)
4387 struct dmar_domain
*dmar_domain
= domain
->priv
;
4392 if (iommu_prot
& IOMMU_READ
)
4393 prot
|= DMA_PTE_READ
;
4394 if (iommu_prot
& IOMMU_WRITE
)
4395 prot
|= DMA_PTE_WRITE
;
4396 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
4397 prot
|= DMA_PTE_SNP
;
4399 max_addr
= iova
+ size
;
4400 if (dmar_domain
->max_addr
< max_addr
) {
4403 /* check if minimum agaw is sufficient for mapped address */
4404 end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
) + 1;
4405 if (end
< max_addr
) {
4406 printk(KERN_ERR
"%s: iommu width (%d) is not "
4407 "sufficient for the mapped address (%llx)\n",
4408 __func__
, dmar_domain
->gaw
, max_addr
);
4411 dmar_domain
->max_addr
= max_addr
;
4413 /* Round up size to next multiple of PAGE_SIZE, if it and
4414 the low bits of hpa would take us onto the next page */
4415 size
= aligned_nrpages(hpa
, size
);
4416 ret
= domain_pfn_mapping(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
4417 hpa
>> VTD_PAGE_SHIFT
, size
, prot
);
4421 static size_t intel_iommu_unmap(struct iommu_domain
*domain
,
4422 unsigned long iova
, size_t size
)
4424 struct dmar_domain
*dmar_domain
= domain
->priv
;
4425 struct page
*freelist
= NULL
;
4426 struct intel_iommu
*iommu
;
4427 unsigned long start_pfn
, last_pfn
;
4428 unsigned int npages
;
4429 int iommu_id
, num
, ndomains
, level
= 0;
4431 /* Cope with horrid API which requires us to unmap more than the
4432 size argument if it happens to be a large-page mapping. */
4433 if (!pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, &level
))
4436 if (size
< VTD_PAGE_SIZE
<< level_to_offset_bits(level
))
4437 size
= VTD_PAGE_SIZE
<< level_to_offset_bits(level
);
4439 start_pfn
= iova
>> VTD_PAGE_SHIFT
;
4440 last_pfn
= (iova
+ size
- 1) >> VTD_PAGE_SHIFT
;
4442 freelist
= domain_unmap(dmar_domain
, start_pfn
, last_pfn
);
4444 npages
= last_pfn
- start_pfn
+ 1;
4446 for_each_set_bit(iommu_id
, dmar_domain
->iommu_bmp
, g_num_of_iommus
) {
4447 iommu
= g_iommus
[iommu_id
];
4450 * find bit position of dmar_domain
4452 ndomains
= cap_ndoms(iommu
->cap
);
4453 for_each_set_bit(num
, iommu
->domain_ids
, ndomains
) {
4454 if (iommu
->domains
[num
] == dmar_domain
)
4455 iommu_flush_iotlb_psi(iommu
, num
, start_pfn
,
4456 npages
, !freelist
, 0);
4461 dma_free_pagelist(freelist
);
4463 if (dmar_domain
->max_addr
== iova
+ size
)
4464 dmar_domain
->max_addr
= iova
;
4469 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
4472 struct dmar_domain
*dmar_domain
= domain
->priv
;
4473 struct dma_pte
*pte
;
4477 pte
= pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, &level
);
4479 phys
= dma_pte_addr(pte
);
4484 static bool intel_iommu_capable(enum iommu_cap cap
)
4486 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
4487 return domain_update_iommu_snooping(NULL
) == 1;
4488 if (cap
== IOMMU_CAP_INTR_REMAP
)
4489 return irq_remapping_enabled
== 1;
4494 static int intel_iommu_add_device(struct device
*dev
)
4496 struct intel_iommu
*iommu
;
4497 struct iommu_group
*group
;
4500 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4504 iommu_device_link(iommu
->iommu_dev
, dev
);
4506 group
= iommu_group_get_for_dev(dev
);
4509 return PTR_ERR(group
);
4511 iommu_group_put(group
);
4515 static void intel_iommu_remove_device(struct device
*dev
)
4517 struct intel_iommu
*iommu
;
4520 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4524 iommu_group_remove_device(dev
);
4526 iommu_device_unlink(iommu
->iommu_dev
, dev
);
4529 static const struct iommu_ops intel_iommu_ops
= {
4530 .capable
= intel_iommu_capable
,
4531 .domain_init
= intel_iommu_domain_init
,
4532 .domain_destroy
= intel_iommu_domain_destroy
,
4533 .attach_dev
= intel_iommu_attach_device
,
4534 .detach_dev
= intel_iommu_detach_device
,
4535 .map
= intel_iommu_map
,
4536 .unmap
= intel_iommu_unmap
,
4537 .iova_to_phys
= intel_iommu_iova_to_phys
,
4538 .add_device
= intel_iommu_add_device
,
4539 .remove_device
= intel_iommu_remove_device
,
4540 .pgsize_bitmap
= INTEL_IOMMU_PGSIZES
,
4543 static void quirk_iommu_g4x_gfx(struct pci_dev
*dev
)
4545 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4546 printk(KERN_INFO
"DMAR: Disabling IOMMU for graphics on this chipset\n");
4550 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_g4x_gfx
);
4551 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_g4x_gfx
);
4552 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_g4x_gfx
);
4553 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_g4x_gfx
);
4554 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_g4x_gfx
);
4555 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_g4x_gfx
);
4556 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_g4x_gfx
);
4558 static void quirk_iommu_rwbf(struct pci_dev
*dev
)
4561 * Mobile 4 Series Chipset neglects to set RWBF capability,
4562 * but needs it. Same seems to hold for the desktop versions.
4564 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
4568 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);
4569 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_rwbf
);
4570 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_rwbf
);
4571 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_rwbf
);
4572 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_rwbf
);
4573 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_rwbf
);
4574 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_rwbf
);
4577 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4578 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4579 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4580 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4581 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4582 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4583 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4584 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4586 static void quirk_calpella_no_shadow_gtt(struct pci_dev
*dev
)
4590 if (pci_read_config_word(dev
, GGC
, &ggc
))
4593 if (!(ggc
& GGC_MEMORY_VT_ENABLED
)) {
4594 printk(KERN_INFO
"DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4596 } else if (dmar_map_gfx
) {
4597 /* we have to ensure the gfx device is idle before we flush */
4598 printk(KERN_INFO
"DMAR: Disabling batched IOTLB flush on Ironlake\n");
4599 intel_iommu_strict
= 1;
4602 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0040, quirk_calpella_no_shadow_gtt
);
4603 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0044, quirk_calpella_no_shadow_gtt
);
4604 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0062, quirk_calpella_no_shadow_gtt
);
4605 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x006a, quirk_calpella_no_shadow_gtt
);
4607 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4608 ISOCH DMAR unit for the Azalia sound device, but not give it any
4609 TLB entries, which causes it to deadlock. Check for that. We do
4610 this in a function called from init_dmars(), instead of in a PCI
4611 quirk, because we don't want to print the obnoxious "BIOS broken"
4612 message if VT-d is actually disabled.
4614 static void __init
check_tylersburg_isoch(void)
4616 struct pci_dev
*pdev
;
4617 uint32_t vtisochctrl
;
4619 /* If there's no Azalia in the system anyway, forget it. */
4620 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x3a3e, NULL
);
4625 /* System Management Registers. Might be hidden, in which case
4626 we can't do the sanity check. But that's OK, because the
4627 known-broken BIOSes _don't_ actually hide it, so far. */
4628 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x342e, NULL
);
4632 if (pci_read_config_dword(pdev
, 0x188, &vtisochctrl
)) {
4639 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4640 if (vtisochctrl
& 1)
4643 /* Drop all bits other than the number of TLB entries */
4644 vtisochctrl
&= 0x1c;
4646 /* If we have the recommended number of TLB entries (16), fine. */
4647 if (vtisochctrl
== 0x10)
4650 /* Zero TLB entries? You get to ride the short bus to school. */
4652 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4653 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4654 dmi_get_system_info(DMI_BIOS_VENDOR
),
4655 dmi_get_system_info(DMI_BIOS_VERSION
),
4656 dmi_get_system_info(DMI_PRODUCT_VERSION
));
4657 iommu_identity_mapping
|= IDENTMAP_AZALIA
;
4661 printk(KERN_WARNING
"DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",