2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/intel-iommu.h>
38 #include <asm/cacheflush.h>
39 #include <asm/iommu.h>
42 #define ROOT_SIZE VTD_PAGE_SIZE
43 #define CONTEXT_SIZE VTD_PAGE_SIZE
45 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
46 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
48 #define IOAPIC_RANGE_START (0xfee00000)
49 #define IOAPIC_RANGE_END (0xfeefffff)
50 #define IOVA_START_ADDR (0x1000)
52 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
54 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
56 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
57 #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
58 #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
63 * 12-63: Context Ptr (12 - (haw-1))
70 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
71 static inline bool root_present(struct root_entry
*root
)
73 return (root
->val
& 1);
75 static inline void set_root_present(struct root_entry
*root
)
79 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
81 root
->val
|= value
& VTD_PAGE_MASK
;
84 static inline struct context_entry
*
85 get_context_addr_from_root(struct root_entry
*root
)
87 return (struct context_entry
*)
88 (root_present(root
)?phys_to_virt(
89 root
->val
& VTD_PAGE_MASK
) :
93 static void flush_unmaps_timeout(unsigned long data
);
95 DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
97 #define HIGH_WATER_MARK 250
98 struct deferred_flush_tables
{
100 struct iova
*iova
[HIGH_WATER_MARK
];
101 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
104 static struct deferred_flush_tables
*deferred_flush
;
106 /* bitmap for indexing intel_iommus */
107 static int g_num_of_iommus
;
109 static DEFINE_SPINLOCK(async_umap_flush_lock
);
110 static LIST_HEAD(unmaps_to_do
);
113 static long list_size
;
115 static void domain_remove_dev_info(struct dmar_domain
*domain
);
118 static int __initdata dmar_map_gfx
= 1;
119 static int dmar_forcedac
;
120 static int intel_iommu_strict
;
122 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
123 static DEFINE_SPINLOCK(device_domain_lock
);
124 static LIST_HEAD(device_domain_list
);
126 static int __init
intel_iommu_setup(char *str
)
131 if (!strncmp(str
, "off", 3)) {
133 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
134 } else if (!strncmp(str
, "igfx_off", 8)) {
137 "Intel-IOMMU: disable GFX device mapping\n");
138 } else if (!strncmp(str
, "forcedac", 8)) {
140 "Intel-IOMMU: Forcing DAC for PCI devices\n");
142 } else if (!strncmp(str
, "strict", 6)) {
144 "Intel-IOMMU: disable batched IOTLB flush\n");
145 intel_iommu_strict
= 1;
148 str
+= strcspn(str
, ",");
154 __setup("intel_iommu=", intel_iommu_setup
);
156 static struct kmem_cache
*iommu_domain_cache
;
157 static struct kmem_cache
*iommu_devinfo_cache
;
158 static struct kmem_cache
*iommu_iova_cache
;
160 static inline void *iommu_kmem_cache_alloc(struct kmem_cache
*cachep
)
165 /* trying to avoid low memory issues */
166 flags
= current
->flags
& PF_MEMALLOC
;
167 current
->flags
|= PF_MEMALLOC
;
168 vaddr
= kmem_cache_alloc(cachep
, GFP_ATOMIC
);
169 current
->flags
&= (~PF_MEMALLOC
| flags
);
174 static inline void *alloc_pgtable_page(void)
179 /* trying to avoid low memory issues */
180 flags
= current
->flags
& PF_MEMALLOC
;
181 current
->flags
|= PF_MEMALLOC
;
182 vaddr
= (void *)get_zeroed_page(GFP_ATOMIC
);
183 current
->flags
&= (~PF_MEMALLOC
| flags
);
187 static inline void free_pgtable_page(void *vaddr
)
189 free_page((unsigned long)vaddr
);
192 static inline void *alloc_domain_mem(void)
194 return iommu_kmem_cache_alloc(iommu_domain_cache
);
197 static void free_domain_mem(void *vaddr
)
199 kmem_cache_free(iommu_domain_cache
, vaddr
);
202 static inline void * alloc_devinfo_mem(void)
204 return iommu_kmem_cache_alloc(iommu_devinfo_cache
);
207 static inline void free_devinfo_mem(void *vaddr
)
209 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
212 struct iova
*alloc_iova_mem(void)
214 return iommu_kmem_cache_alloc(iommu_iova_cache
);
217 void free_iova_mem(struct iova
*iova
)
219 kmem_cache_free(iommu_iova_cache
, iova
);
222 /* Gets context entry for a given bus and devfn */
223 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
226 struct root_entry
*root
;
227 struct context_entry
*context
;
228 unsigned long phy_addr
;
231 spin_lock_irqsave(&iommu
->lock
, flags
);
232 root
= &iommu
->root_entry
[bus
];
233 context
= get_context_addr_from_root(root
);
235 context
= (struct context_entry
*)alloc_pgtable_page();
237 spin_unlock_irqrestore(&iommu
->lock
, flags
);
240 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
241 phy_addr
= virt_to_phys((void *)context
);
242 set_root_value(root
, phy_addr
);
243 set_root_present(root
);
244 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
246 spin_unlock_irqrestore(&iommu
->lock
, flags
);
247 return &context
[devfn
];
250 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
252 struct root_entry
*root
;
253 struct context_entry
*context
;
257 spin_lock_irqsave(&iommu
->lock
, flags
);
258 root
= &iommu
->root_entry
[bus
];
259 context
= get_context_addr_from_root(root
);
264 ret
= context_present(context
[devfn
]);
266 spin_unlock_irqrestore(&iommu
->lock
, flags
);
270 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
272 struct root_entry
*root
;
273 struct context_entry
*context
;
276 spin_lock_irqsave(&iommu
->lock
, flags
);
277 root
= &iommu
->root_entry
[bus
];
278 context
= get_context_addr_from_root(root
);
280 context_clear_entry(context
[devfn
]);
281 __iommu_flush_cache(iommu
, &context
[devfn
], \
284 spin_unlock_irqrestore(&iommu
->lock
, flags
);
287 static void free_context_table(struct intel_iommu
*iommu
)
289 struct root_entry
*root
;
292 struct context_entry
*context
;
294 spin_lock_irqsave(&iommu
->lock
, flags
);
295 if (!iommu
->root_entry
) {
298 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
299 root
= &iommu
->root_entry
[i
];
300 context
= get_context_addr_from_root(root
);
302 free_pgtable_page(context
);
304 free_pgtable_page(iommu
->root_entry
);
305 iommu
->root_entry
= NULL
;
307 spin_unlock_irqrestore(&iommu
->lock
, flags
);
310 /* page table handling */
311 #define LEVEL_STRIDE (9)
312 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
314 static inline int agaw_to_level(int agaw
)
319 static inline int agaw_to_width(int agaw
)
321 return 30 + agaw
* LEVEL_STRIDE
;
325 static inline int width_to_agaw(int width
)
327 return (width
- 30) / LEVEL_STRIDE
;
330 static inline unsigned int level_to_offset_bits(int level
)
332 return (12 + (level
- 1) * LEVEL_STRIDE
);
335 static inline int address_level_offset(u64 addr
, int level
)
337 return ((addr
>> level_to_offset_bits(level
)) & LEVEL_MASK
);
340 static inline u64
level_mask(int level
)
342 return ((u64
)-1 << level_to_offset_bits(level
));
345 static inline u64
level_size(int level
)
347 return ((u64
)1 << level_to_offset_bits(level
));
350 static inline u64
align_to_level(u64 addr
, int level
)
352 return ((addr
+ level_size(level
) - 1) & level_mask(level
));
355 static struct dma_pte
* addr_to_dma_pte(struct dmar_domain
*domain
, u64 addr
)
357 int addr_width
= agaw_to_width(domain
->agaw
);
358 struct dma_pte
*parent
, *pte
= NULL
;
359 int level
= agaw_to_level(domain
->agaw
);
363 BUG_ON(!domain
->pgd
);
365 addr
&= (((u64
)1) << addr_width
) - 1;
366 parent
= domain
->pgd
;
368 spin_lock_irqsave(&domain
->mapping_lock
, flags
);
372 offset
= address_level_offset(addr
, level
);
373 pte
= &parent
[offset
];
377 if (!dma_pte_present(*pte
)) {
378 tmp_page
= alloc_pgtable_page();
381 spin_unlock_irqrestore(&domain
->mapping_lock
,
385 __iommu_flush_cache(domain
->iommu
, tmp_page
,
387 dma_set_pte_addr(*pte
, virt_to_phys(tmp_page
));
389 * high level table always sets r/w, last level page
390 * table control read/write
392 dma_set_pte_readable(*pte
);
393 dma_set_pte_writable(*pte
);
394 __iommu_flush_cache(domain
->iommu
, pte
, sizeof(*pte
));
396 parent
= phys_to_virt(dma_pte_addr(*pte
));
400 spin_unlock_irqrestore(&domain
->mapping_lock
, flags
);
404 /* return address's pte at specific level */
405 static struct dma_pte
*dma_addr_level_pte(struct dmar_domain
*domain
, u64 addr
,
408 struct dma_pte
*parent
, *pte
= NULL
;
409 int total
= agaw_to_level(domain
->agaw
);
412 parent
= domain
->pgd
;
413 while (level
<= total
) {
414 offset
= address_level_offset(addr
, total
);
415 pte
= &parent
[offset
];
419 if (!dma_pte_present(*pte
))
421 parent
= phys_to_virt(dma_pte_addr(*pte
));
427 /* clear one page's page table */
428 static void dma_pte_clear_one(struct dmar_domain
*domain
, u64 addr
)
430 struct dma_pte
*pte
= NULL
;
432 /* get last level pte */
433 pte
= dma_addr_level_pte(domain
, addr
, 1);
437 __iommu_flush_cache(domain
->iommu
, pte
, sizeof(*pte
));
441 /* clear last level pte, a tlb flush should be followed */
442 static void dma_pte_clear_range(struct dmar_domain
*domain
, u64 start
, u64 end
)
444 int addr_width
= agaw_to_width(domain
->agaw
);
446 start
&= (((u64
)1) << addr_width
) - 1;
447 end
&= (((u64
)1) << addr_width
) - 1;
448 /* in case it's partial page */
449 start
= PAGE_ALIGN(start
);
452 /* we don't need lock here, nobody else touches the iova range */
453 while (start
< end
) {
454 dma_pte_clear_one(domain
, start
);
455 start
+= VTD_PAGE_SIZE
;
459 /* free page table pages. last level pte should already be cleared */
460 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
463 int addr_width
= agaw_to_width(domain
->agaw
);
465 int total
= agaw_to_level(domain
->agaw
);
469 start
&= (((u64
)1) << addr_width
) - 1;
470 end
&= (((u64
)1) << addr_width
) - 1;
472 /* we don't need lock here, nobody else touches the iova range */
474 while (level
<= total
) {
475 tmp
= align_to_level(start
, level
);
476 if (tmp
>= end
|| (tmp
+ level_size(level
) > end
))
480 pte
= dma_addr_level_pte(domain
, tmp
, level
);
483 phys_to_virt(dma_pte_addr(*pte
)));
485 __iommu_flush_cache(domain
->iommu
,
488 tmp
+= level_size(level
);
493 if (start
== 0 && end
>= ((((u64
)1) << addr_width
) - 1)) {
494 free_pgtable_page(domain
->pgd
);
500 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
502 struct root_entry
*root
;
505 root
= (struct root_entry
*)alloc_pgtable_page();
509 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
511 spin_lock_irqsave(&iommu
->lock
, flags
);
512 iommu
->root_entry
= root
;
513 spin_unlock_irqrestore(&iommu
->lock
, flags
);
518 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
524 addr
= iommu
->root_entry
;
526 spin_lock_irqsave(&iommu
->register_lock
, flag
);
527 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
529 cmd
= iommu
->gcmd
| DMA_GCMD_SRTP
;
530 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
532 /* Make sure hardware complete it */
533 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
534 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
536 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
539 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
544 if (!cap_rwbf(iommu
->cap
))
546 val
= iommu
->gcmd
| DMA_GCMD_WBF
;
548 spin_lock_irqsave(&iommu
->register_lock
, flag
);
549 writel(val
, iommu
->reg
+ DMAR_GCMD_REG
);
551 /* Make sure hardware complete it */
552 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
553 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
555 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
558 /* return value determine if we need a write buffer flush */
559 static int __iommu_flush_context(struct intel_iommu
*iommu
,
560 u16 did
, u16 source_id
, u8 function_mask
, u64 type
,
561 int non_present_entry_flush
)
567 * In the non-present entry flush case, if hardware doesn't cache
568 * non-present entry we do nothing and if hardware cache non-present
569 * entry, we flush entries of domain 0 (the domain id is used to cache
570 * any non-present entries)
572 if (non_present_entry_flush
) {
573 if (!cap_caching_mode(iommu
->cap
))
580 case DMA_CCMD_GLOBAL_INVL
:
581 val
= DMA_CCMD_GLOBAL_INVL
;
583 case DMA_CCMD_DOMAIN_INVL
:
584 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
586 case DMA_CCMD_DEVICE_INVL
:
587 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
588 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
595 spin_lock_irqsave(&iommu
->register_lock
, flag
);
596 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
598 /* Make sure hardware complete it */
599 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
600 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
602 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
604 /* flush context entry will implicitly flush write buffer */
608 /* return value determine if we need a write buffer flush */
609 static int __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
610 u64 addr
, unsigned int size_order
, u64 type
,
611 int non_present_entry_flush
)
613 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
614 u64 val
= 0, val_iva
= 0;
618 * In the non-present entry flush case, if hardware doesn't cache
619 * non-present entry we do nothing and if hardware cache non-present
620 * entry, we flush entries of domain 0 (the domain id is used to cache
621 * any non-present entries)
623 if (non_present_entry_flush
) {
624 if (!cap_caching_mode(iommu
->cap
))
631 case DMA_TLB_GLOBAL_FLUSH
:
632 /* global flush doesn't need set IVA_REG */
633 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
635 case DMA_TLB_DSI_FLUSH
:
636 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
638 case DMA_TLB_PSI_FLUSH
:
639 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
640 /* Note: always flush non-leaf currently */
641 val_iva
= size_order
| addr
;
646 /* Note: set drain read/write */
649 * This is probably to be super secure.. Looks like we can
650 * ignore it without any impact.
652 if (cap_read_drain(iommu
->cap
))
653 val
|= DMA_TLB_READ_DRAIN
;
655 if (cap_write_drain(iommu
->cap
))
656 val
|= DMA_TLB_WRITE_DRAIN
;
658 spin_lock_irqsave(&iommu
->register_lock
, flag
);
659 /* Note: Only uses first TLB reg currently */
661 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
662 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
664 /* Make sure hardware complete it */
665 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
666 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
668 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
670 /* check IOTLB invalidation granularity */
671 if (DMA_TLB_IAIG(val
) == 0)
672 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
673 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
674 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
675 (unsigned long long)DMA_TLB_IIRG(type
),
676 (unsigned long long)DMA_TLB_IAIG(val
));
677 /* flush iotlb entry will implicitly flush write buffer */
681 static int iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
682 u64 addr
, unsigned int pages
, int non_present_entry_flush
)
686 BUG_ON(addr
& (~VTD_PAGE_MASK
));
689 /* Fallback to domain selective flush if no PSI support */
690 if (!cap_pgsel_inv(iommu
->cap
))
691 return iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
693 non_present_entry_flush
);
696 * PSI requires page size to be 2 ^ x, and the base address is naturally
697 * aligned to the size
699 mask
= ilog2(__roundup_pow_of_two(pages
));
700 /* Fallback to domain selective flush if size is too big */
701 if (mask
> cap_max_amask_val(iommu
->cap
))
702 return iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
703 DMA_TLB_DSI_FLUSH
, non_present_entry_flush
);
705 return iommu
->flush
.flush_iotlb(iommu
, did
, addr
, mask
,
707 non_present_entry_flush
);
710 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
715 spin_lock_irqsave(&iommu
->register_lock
, flags
);
716 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
717 pmen
&= ~DMA_PMEN_EPM
;
718 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
720 /* wait for the protected region status bit to clear */
721 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
722 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
724 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
727 static int iommu_enable_translation(struct intel_iommu
*iommu
)
732 spin_lock_irqsave(&iommu
->register_lock
, flags
);
733 writel(iommu
->gcmd
|DMA_GCMD_TE
, iommu
->reg
+ DMAR_GCMD_REG
);
735 /* Make sure hardware complete it */
736 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
737 readl
, (sts
& DMA_GSTS_TES
), sts
);
739 iommu
->gcmd
|= DMA_GCMD_TE
;
740 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
744 static int iommu_disable_translation(struct intel_iommu
*iommu
)
749 spin_lock_irqsave(&iommu
->register_lock
, flag
);
750 iommu
->gcmd
&= ~DMA_GCMD_TE
;
751 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
753 /* Make sure hardware complete it */
754 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
755 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
757 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
761 /* iommu interrupt handling. Most stuff are MSI-like. */
763 static const char *fault_reason_strings
[] =
766 "Present bit in root entry is clear",
767 "Present bit in context entry is clear",
768 "Invalid context entry",
769 "Access beyond MGAW",
770 "PTE Write access is not set",
771 "PTE Read access is not set",
772 "Next page table ptr is invalid",
773 "Root table address invalid",
774 "Context table ptr is invalid",
775 "non-zero reserved fields in RTP",
776 "non-zero reserved fields in CTP",
777 "non-zero reserved fields in PTE",
779 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
781 const char *dmar_get_fault_reason(u8 fault_reason
)
783 if (fault_reason
> MAX_FAULT_REASON_IDX
)
786 return fault_reason_strings
[fault_reason
];
789 void dmar_msi_unmask(unsigned int irq
)
791 struct intel_iommu
*iommu
= get_irq_data(irq
);
795 spin_lock_irqsave(&iommu
->register_lock
, flag
);
796 writel(0, iommu
->reg
+ DMAR_FECTL_REG
);
797 /* Read a reg to force flush the post write */
798 readl(iommu
->reg
+ DMAR_FECTL_REG
);
799 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
802 void dmar_msi_mask(unsigned int irq
)
805 struct intel_iommu
*iommu
= get_irq_data(irq
);
808 spin_lock_irqsave(&iommu
->register_lock
, flag
);
809 writel(DMA_FECTL_IM
, iommu
->reg
+ DMAR_FECTL_REG
);
810 /* Read a reg to force flush the post write */
811 readl(iommu
->reg
+ DMAR_FECTL_REG
);
812 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
815 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
817 struct intel_iommu
*iommu
= get_irq_data(irq
);
820 spin_lock_irqsave(&iommu
->register_lock
, flag
);
821 writel(msg
->data
, iommu
->reg
+ DMAR_FEDATA_REG
);
822 writel(msg
->address_lo
, iommu
->reg
+ DMAR_FEADDR_REG
);
823 writel(msg
->address_hi
, iommu
->reg
+ DMAR_FEUADDR_REG
);
824 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
827 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
829 struct intel_iommu
*iommu
= get_irq_data(irq
);
832 spin_lock_irqsave(&iommu
->register_lock
, flag
);
833 msg
->data
= readl(iommu
->reg
+ DMAR_FEDATA_REG
);
834 msg
->address_lo
= readl(iommu
->reg
+ DMAR_FEADDR_REG
);
835 msg
->address_hi
= readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
836 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
839 static int iommu_page_fault_do_one(struct intel_iommu
*iommu
, int type
,
840 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
844 reason
= dmar_get_fault_reason(fault_reason
);
847 "DMAR:[%s] Request device [%02x:%02x.%d] "
849 "DMAR:[fault reason %02d] %s\n",
850 (type
? "DMA Read" : "DMA Write"),
851 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
852 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
856 #define PRIMARY_FAULT_REG_LEN (16)
857 static irqreturn_t
iommu_page_fault(int irq
, void *dev_id
)
859 struct intel_iommu
*iommu
= dev_id
;
860 int reg
, fault_index
;
864 spin_lock_irqsave(&iommu
->register_lock
, flag
);
865 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
867 /* TBD: ignore advanced fault log currently */
868 if (!(fault_status
& DMA_FSTS_PPF
))
871 fault_index
= dma_fsts_fault_record_index(fault_status
);
872 reg
= cap_fault_reg_offset(iommu
->cap
);
880 /* highest 32 bits */
881 data
= readl(iommu
->reg
+ reg
+
882 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
883 if (!(data
& DMA_FRCD_F
))
886 fault_reason
= dma_frcd_fault_reason(data
);
887 type
= dma_frcd_type(data
);
889 data
= readl(iommu
->reg
+ reg
+
890 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
891 source_id
= dma_frcd_source_id(data
);
893 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
894 fault_index
* PRIMARY_FAULT_REG_LEN
);
895 guest_addr
= dma_frcd_page_addr(guest_addr
);
896 /* clear the fault */
897 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
898 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
900 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
902 iommu_page_fault_do_one(iommu
, type
, fault_reason
,
903 source_id
, guest_addr
);
906 if (fault_index
> cap_num_fault_regs(iommu
->cap
))
908 spin_lock_irqsave(&iommu
->register_lock
, flag
);
911 /* clear primary fault overflow */
912 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
913 if (fault_status
& DMA_FSTS_PFO
)
914 writel(DMA_FSTS_PFO
, iommu
->reg
+ DMAR_FSTS_REG
);
916 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
920 int dmar_set_interrupt(struct intel_iommu
*iommu
)
926 printk(KERN_ERR
"IOMMU: no free vectors\n");
930 set_irq_data(irq
, iommu
);
933 ret
= arch_setup_dmar_msi(irq
);
935 set_irq_data(irq
, NULL
);
941 /* Force fault register is cleared */
942 iommu_page_fault(irq
, iommu
);
944 ret
= request_irq(irq
, iommu_page_fault
, 0, iommu
->name
, iommu
);
946 printk(KERN_ERR
"IOMMU: can't request irq\n");
950 static int iommu_init_domains(struct intel_iommu
*iommu
)
952 unsigned long ndomains
;
953 unsigned long nlongs
;
955 ndomains
= cap_ndoms(iommu
->cap
);
956 pr_debug("Number of Domains supportd <%ld>\n", ndomains
);
957 nlongs
= BITS_TO_LONGS(ndomains
);
959 /* TBD: there might be 64K domains,
960 * consider other allocation for future chip
962 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
963 if (!iommu
->domain_ids
) {
964 printk(KERN_ERR
"Allocating domain id array failed\n");
967 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
969 if (!iommu
->domains
) {
970 printk(KERN_ERR
"Allocating domain array failed\n");
971 kfree(iommu
->domain_ids
);
975 spin_lock_init(&iommu
->lock
);
978 * if Caching mode is set, then invalid translations are tagged
979 * with domainid 0. Hence we need to pre-allocate it.
981 if (cap_caching_mode(iommu
->cap
))
982 set_bit(0, iommu
->domain_ids
);
987 static void domain_exit(struct dmar_domain
*domain
);
989 void free_dmar_iommu(struct intel_iommu
*iommu
)
991 struct dmar_domain
*domain
;
994 i
= find_first_bit(iommu
->domain_ids
, cap_ndoms(iommu
->cap
));
995 for (; i
< cap_ndoms(iommu
->cap
); ) {
996 domain
= iommu
->domains
[i
];
997 clear_bit(i
, iommu
->domain_ids
);
999 i
= find_next_bit(iommu
->domain_ids
,
1000 cap_ndoms(iommu
->cap
), i
+1);
1003 if (iommu
->gcmd
& DMA_GCMD_TE
)
1004 iommu_disable_translation(iommu
);
1007 set_irq_data(iommu
->irq
, NULL
);
1008 /* This will mask the irq */
1009 free_irq(iommu
->irq
, iommu
);
1010 destroy_irq(iommu
->irq
);
1013 kfree(iommu
->domains
);
1014 kfree(iommu
->domain_ids
);
1016 /* free context mapping */
1017 free_context_table(iommu
);
1020 static struct dmar_domain
* iommu_alloc_domain(struct intel_iommu
*iommu
)
1023 unsigned long ndomains
;
1024 struct dmar_domain
*domain
;
1025 unsigned long flags
;
1027 domain
= alloc_domain_mem();
1031 ndomains
= cap_ndoms(iommu
->cap
);
1033 spin_lock_irqsave(&iommu
->lock
, flags
);
1034 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1035 if (num
>= ndomains
) {
1036 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1037 free_domain_mem(domain
);
1038 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1042 set_bit(num
, iommu
->domain_ids
);
1044 domain
->iommu
= iommu
;
1045 iommu
->domains
[num
] = domain
;
1046 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1051 static void iommu_free_domain(struct dmar_domain
*domain
)
1053 unsigned long flags
;
1055 spin_lock_irqsave(&domain
->iommu
->lock
, flags
);
1056 clear_bit(domain
->id
, domain
->iommu
->domain_ids
);
1057 spin_unlock_irqrestore(&domain
->iommu
->lock
, flags
);
1060 static struct iova_domain reserved_iova_list
;
1061 static struct lock_class_key reserved_alloc_key
;
1062 static struct lock_class_key reserved_rbtree_key
;
1064 static void dmar_init_reserved_ranges(void)
1066 struct pci_dev
*pdev
= NULL
;
1071 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1073 lockdep_set_class(&reserved_iova_list
.iova_alloc_lock
,
1074 &reserved_alloc_key
);
1075 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1076 &reserved_rbtree_key
);
1078 /* IOAPIC ranges shouldn't be accessed by DMA */
1079 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1080 IOVA_PFN(IOAPIC_RANGE_END
));
1082 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1084 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1085 for_each_pci_dev(pdev
) {
1088 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1089 r
= &pdev
->resource
[i
];
1090 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1094 size
= r
->end
- addr
;
1095 size
= PAGE_ALIGN(size
);
1096 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(addr
),
1097 IOVA_PFN(size
+ addr
) - 1);
1099 printk(KERN_ERR
"Reserve iova failed\n");
1105 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1107 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1110 static inline int guestwidth_to_adjustwidth(int gaw
)
1113 int r
= (gaw
- 12) % 9;
1124 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1126 struct intel_iommu
*iommu
;
1127 int adjust_width
, agaw
;
1128 unsigned long sagaw
;
1130 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1131 spin_lock_init(&domain
->mapping_lock
);
1133 domain_reserve_special_ranges(domain
);
1135 /* calculate AGAW */
1136 iommu
= domain
->iommu
;
1137 if (guest_width
> cap_mgaw(iommu
->cap
))
1138 guest_width
= cap_mgaw(iommu
->cap
);
1139 domain
->gaw
= guest_width
;
1140 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1141 agaw
= width_to_agaw(adjust_width
);
1142 sagaw
= cap_sagaw(iommu
->cap
);
1143 if (!test_bit(agaw
, &sagaw
)) {
1144 /* hardware doesn't support it, choose a bigger one */
1145 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1146 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1150 domain
->agaw
= agaw
;
1151 INIT_LIST_HEAD(&domain
->devices
);
1153 /* always allocate the top pgd */
1154 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
1157 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1161 static void domain_exit(struct dmar_domain
*domain
)
1165 /* Domain 0 is reserved, so dont process it */
1169 domain_remove_dev_info(domain
);
1171 put_iova_domain(&domain
->iovad
);
1172 end
= DOMAIN_MAX_ADDR(domain
->gaw
);
1173 end
= end
& (~PAGE_MASK
);
1176 dma_pte_clear_range(domain
, 0, end
);
1178 /* free page tables */
1179 dma_pte_free_pagetable(domain
, 0, end
);
1181 iommu_free_domain(domain
);
1182 free_domain_mem(domain
);
1185 static int domain_context_mapping_one(struct dmar_domain
*domain
,
1188 struct context_entry
*context
;
1189 struct intel_iommu
*iommu
= domain
->iommu
;
1190 unsigned long flags
;
1192 pr_debug("Set context mapping for %02x:%02x.%d\n",
1193 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1194 BUG_ON(!domain
->pgd
);
1195 context
= device_to_context_entry(iommu
, bus
, devfn
);
1198 spin_lock_irqsave(&iommu
->lock
, flags
);
1199 if (context_present(*context
)) {
1200 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1204 context_set_domain_id(*context
, domain
->id
);
1205 context_set_address_width(*context
, domain
->agaw
);
1206 context_set_address_root(*context
, virt_to_phys(domain
->pgd
));
1207 context_set_translation_type(*context
, CONTEXT_TT_MULTI_LEVEL
);
1208 context_set_fault_enable(*context
);
1209 context_set_present(*context
);
1210 __iommu_flush_cache(iommu
, context
, sizeof(*context
));
1212 /* it's a non-present to present mapping */
1213 if (iommu
->flush
.flush_context(iommu
, domain
->id
,
1214 (((u16
)bus
) << 8) | devfn
, DMA_CCMD_MASK_NOBIT
,
1215 DMA_CCMD_DEVICE_INVL
, 1))
1216 iommu_flush_write_buffer(iommu
);
1218 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_DSI_FLUSH
, 0);
1220 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1225 domain_context_mapping(struct dmar_domain
*domain
, struct pci_dev
*pdev
)
1228 struct pci_dev
*tmp
, *parent
;
1230 ret
= domain_context_mapping_one(domain
, pdev
->bus
->number
,
1235 /* dependent device mapping */
1236 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1239 /* Secondary interface's bus number and devfn 0 */
1240 parent
= pdev
->bus
->self
;
1241 while (parent
!= tmp
) {
1242 ret
= domain_context_mapping_one(domain
, parent
->bus
->number
,
1246 parent
= parent
->bus
->self
;
1248 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
1249 return domain_context_mapping_one(domain
,
1250 tmp
->subordinate
->number
, 0);
1251 else /* this is a legacy PCI bridge */
1252 return domain_context_mapping_one(domain
,
1253 tmp
->bus
->number
, tmp
->devfn
);
1256 static int domain_context_mapped(struct dmar_domain
*domain
,
1257 struct pci_dev
*pdev
)
1260 struct pci_dev
*tmp
, *parent
;
1262 ret
= device_context_mapped(domain
->iommu
,
1263 pdev
->bus
->number
, pdev
->devfn
);
1266 /* dependent device mapping */
1267 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1270 /* Secondary interface's bus number and devfn 0 */
1271 parent
= pdev
->bus
->self
;
1272 while (parent
!= tmp
) {
1273 ret
= device_context_mapped(domain
->iommu
, parent
->bus
->number
,
1277 parent
= parent
->bus
->self
;
1280 return device_context_mapped(domain
->iommu
,
1281 tmp
->subordinate
->number
, 0);
1283 return device_context_mapped(domain
->iommu
,
1284 tmp
->bus
->number
, tmp
->devfn
);
1288 domain_page_mapping(struct dmar_domain
*domain
, dma_addr_t iova
,
1289 u64 hpa
, size_t size
, int prot
)
1291 u64 start_pfn
, end_pfn
;
1292 struct dma_pte
*pte
;
1294 int addr_width
= agaw_to_width(domain
->agaw
);
1296 hpa
&= (((u64
)1) << addr_width
) - 1;
1298 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1301 start_pfn
= ((u64
)hpa
) >> VTD_PAGE_SHIFT
;
1302 end_pfn
= (VTD_PAGE_ALIGN(((u64
)hpa
) + size
)) >> VTD_PAGE_SHIFT
;
1304 while (start_pfn
< end_pfn
) {
1305 pte
= addr_to_dma_pte(domain
, iova
+ VTD_PAGE_SIZE
* index
);
1308 /* We don't need lock here, nobody else
1309 * touches the iova range
1311 BUG_ON(dma_pte_addr(*pte
));
1312 dma_set_pte_addr(*pte
, start_pfn
<< VTD_PAGE_SHIFT
);
1313 dma_set_pte_prot(*pte
, prot
);
1314 __iommu_flush_cache(domain
->iommu
, pte
, sizeof(*pte
));
1321 static void detach_domain_for_dev(struct dmar_domain
*domain
, u8 bus
, u8 devfn
)
1323 clear_context_table(domain
->iommu
, bus
, devfn
);
1324 domain
->iommu
->flush
.flush_context(domain
->iommu
, 0, 0, 0,
1325 DMA_CCMD_GLOBAL_INVL
, 0);
1326 domain
->iommu
->flush
.flush_iotlb(domain
->iommu
, 0, 0, 0,
1327 DMA_TLB_GLOBAL_FLUSH
, 0);
1330 static void domain_remove_dev_info(struct dmar_domain
*domain
)
1332 struct device_domain_info
*info
;
1333 unsigned long flags
;
1335 spin_lock_irqsave(&device_domain_lock
, flags
);
1336 while (!list_empty(&domain
->devices
)) {
1337 info
= list_entry(domain
->devices
.next
,
1338 struct device_domain_info
, link
);
1339 list_del(&info
->link
);
1340 list_del(&info
->global
);
1342 info
->dev
->dev
.archdata
.iommu
= NULL
;
1343 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1345 detach_domain_for_dev(info
->domain
, info
->bus
, info
->devfn
);
1346 free_devinfo_mem(info
);
1348 spin_lock_irqsave(&device_domain_lock
, flags
);
1350 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1355 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1357 static struct dmar_domain
*
1358 find_domain(struct pci_dev
*pdev
)
1360 struct device_domain_info
*info
;
1362 /* No lock here, assumes no domain exit in normal case */
1363 info
= pdev
->dev
.archdata
.iommu
;
1365 return info
->domain
;
1369 /* domain is initialized */
1370 static struct dmar_domain
*get_domain_for_dev(struct pci_dev
*pdev
, int gaw
)
1372 struct dmar_domain
*domain
, *found
= NULL
;
1373 struct intel_iommu
*iommu
;
1374 struct dmar_drhd_unit
*drhd
;
1375 struct device_domain_info
*info
, *tmp
;
1376 struct pci_dev
*dev_tmp
;
1377 unsigned long flags
;
1378 int bus
= 0, devfn
= 0;
1380 domain
= find_domain(pdev
);
1384 dev_tmp
= pci_find_upstream_pcie_bridge(pdev
);
1386 if (dev_tmp
->is_pcie
) {
1387 bus
= dev_tmp
->subordinate
->number
;
1390 bus
= dev_tmp
->bus
->number
;
1391 devfn
= dev_tmp
->devfn
;
1393 spin_lock_irqsave(&device_domain_lock
, flags
);
1394 list_for_each_entry(info
, &device_domain_list
, global
) {
1395 if (info
->bus
== bus
&& info
->devfn
== devfn
) {
1396 found
= info
->domain
;
1400 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1401 /* pcie-pci bridge already has a domain, uses it */
1408 /* Allocate new domain for the device */
1409 drhd
= dmar_find_matched_drhd_unit(pdev
);
1411 printk(KERN_ERR
"IOMMU: can't find DMAR for device %s\n",
1415 iommu
= drhd
->iommu
;
1417 domain
= iommu_alloc_domain(iommu
);
1421 if (domain_init(domain
, gaw
)) {
1422 domain_exit(domain
);
1426 /* register pcie-to-pci device */
1428 info
= alloc_devinfo_mem();
1430 domain_exit(domain
);
1434 info
->devfn
= devfn
;
1436 info
->domain
= domain
;
1437 /* This domain is shared by devices under p2p bridge */
1438 domain
->flags
|= DOMAIN_FLAG_MULTIPLE_DEVICES
;
1440 /* pcie-to-pci bridge already has a domain, uses it */
1442 spin_lock_irqsave(&device_domain_lock
, flags
);
1443 list_for_each_entry(tmp
, &device_domain_list
, global
) {
1444 if (tmp
->bus
== bus
&& tmp
->devfn
== devfn
) {
1445 found
= tmp
->domain
;
1450 free_devinfo_mem(info
);
1451 domain_exit(domain
);
1454 list_add(&info
->link
, &domain
->devices
);
1455 list_add(&info
->global
, &device_domain_list
);
1457 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1461 info
= alloc_devinfo_mem();
1464 info
->bus
= pdev
->bus
->number
;
1465 info
->devfn
= pdev
->devfn
;
1467 info
->domain
= domain
;
1468 spin_lock_irqsave(&device_domain_lock
, flags
);
1469 /* somebody is fast */
1470 found
= find_domain(pdev
);
1471 if (found
!= NULL
) {
1472 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1473 if (found
!= domain
) {
1474 domain_exit(domain
);
1477 free_devinfo_mem(info
);
1480 list_add(&info
->link
, &domain
->devices
);
1481 list_add(&info
->global
, &device_domain_list
);
1482 pdev
->dev
.archdata
.iommu
= info
;
1483 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1486 /* recheck it here, maybe others set it */
1487 return find_domain(pdev
);
1490 static int iommu_prepare_identity_map(struct pci_dev
*pdev
,
1491 unsigned long long start
,
1492 unsigned long long end
)
1494 struct dmar_domain
*domain
;
1496 unsigned long long base
;
1500 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1501 pci_name(pdev
), start
, end
);
1502 /* page table init */
1503 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
1507 /* The address might not be aligned */
1508 base
= start
& PAGE_MASK
;
1510 size
= PAGE_ALIGN(size
);
1511 if (!reserve_iova(&domain
->iovad
, IOVA_PFN(base
),
1512 IOVA_PFN(base
+ size
) - 1)) {
1513 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
1518 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1519 size
, base
, pci_name(pdev
));
1521 * RMRR range might have overlap with physical memory range,
1524 dma_pte_clear_range(domain
, base
, base
+ size
);
1526 ret
= domain_page_mapping(domain
, base
, base
, size
,
1527 DMA_PTE_READ
|DMA_PTE_WRITE
);
1531 /* context entry init */
1532 ret
= domain_context_mapping(domain
, pdev
);
1536 domain_exit(domain
);
1541 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
1542 struct pci_dev
*pdev
)
1544 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
1546 return iommu_prepare_identity_map(pdev
, rmrr
->base_address
,
1547 rmrr
->end_address
+ 1);
1550 #ifdef CONFIG_DMAR_GFX_WA
1551 struct iommu_prepare_data
{
1552 struct pci_dev
*pdev
;
1556 static int __init
iommu_prepare_work_fn(unsigned long start_pfn
,
1557 unsigned long end_pfn
, void *datax
)
1559 struct iommu_prepare_data
*data
;
1561 data
= (struct iommu_prepare_data
*)datax
;
1563 data
->ret
= iommu_prepare_identity_map(data
->pdev
,
1564 start_pfn
<<PAGE_SHIFT
, end_pfn
<<PAGE_SHIFT
);
1569 static int __init
iommu_prepare_with_active_regions(struct pci_dev
*pdev
)
1572 struct iommu_prepare_data data
;
1577 for_each_online_node(nid
) {
1578 work_with_active_regions(nid
, iommu_prepare_work_fn
, &data
);
1585 static void __init
iommu_prepare_gfx_mapping(void)
1587 struct pci_dev
*pdev
= NULL
;
1590 for_each_pci_dev(pdev
) {
1591 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
||
1592 !IS_GFX_DEVICE(pdev
))
1594 printk(KERN_INFO
"IOMMU: gfx device %s 1-1 mapping\n",
1596 ret
= iommu_prepare_with_active_regions(pdev
);
1598 printk(KERN_ERR
"IOMMU: mapping reserved region failed\n");
1603 #ifdef CONFIG_DMAR_FLOPPY_WA
1604 static inline void iommu_prepare_isa(void)
1606 struct pci_dev
*pdev
;
1609 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
1613 printk(KERN_INFO
"IOMMU: Prepare 0-16M unity mapping for LPC\n");
1614 ret
= iommu_prepare_identity_map(pdev
, 0, 16*1024*1024);
1617 printk("IOMMU: Failed to create 0-64M identity map, "
1618 "floppy might not work\n");
1622 static inline void iommu_prepare_isa(void)
1626 #endif /* !CONFIG_DMAR_FLPY_WA */
1628 static int __init
init_dmars(void)
1630 struct dmar_drhd_unit
*drhd
;
1631 struct dmar_rmrr_unit
*rmrr
;
1632 struct pci_dev
*pdev
;
1633 struct intel_iommu
*iommu
;
1634 int i
, ret
, unit
= 0;
1639 * initialize and program root entry to not present
1642 for_each_drhd_unit(drhd
) {
1645 * lock not needed as this is only incremented in the single
1646 * threaded kernel __init code path all other access are read
1651 deferred_flush
= kzalloc(g_num_of_iommus
*
1652 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
1653 if (!deferred_flush
) {
1658 for_each_drhd_unit(drhd
) {
1662 iommu
= drhd
->iommu
;
1664 ret
= iommu_init_domains(iommu
);
1670 * we could share the same root & context tables
1671 * amoung all IOMMU's. Need to Split it later.
1673 ret
= iommu_alloc_root_entry(iommu
);
1675 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
1680 for_each_drhd_unit(drhd
) {
1684 iommu
= drhd
->iommu
;
1685 if (dmar_enable_qi(iommu
)) {
1687 * Queued Invalidate not enabled, use Register Based
1690 iommu
->flush
.flush_context
= __iommu_flush_context
;
1691 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
1692 printk(KERN_INFO
"IOMMU 0x%Lx: using Register based "
1694 (unsigned long long)drhd
->reg_base_addr
);
1696 iommu
->flush
.flush_context
= qi_flush_context
;
1697 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
1698 printk(KERN_INFO
"IOMMU 0x%Lx: using Queued "
1700 (unsigned long long)drhd
->reg_base_addr
);
1706 * for each dev attached to rmrr
1708 * locate drhd for dev, alloc domain for dev
1709 * allocate free domain
1710 * allocate page table entries for rmrr
1711 * if context not allocated for bus
1712 * allocate and init context
1713 * set present in root table for this bus
1714 * init context with domain, translation etc
1718 for_each_rmrr_units(rmrr
) {
1719 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
1720 pdev
= rmrr
->devices
[i
];
1721 /* some BIOS lists non-exist devices in DMAR table */
1724 ret
= iommu_prepare_rmrr_dev(rmrr
, pdev
);
1727 "IOMMU: mapping reserved region failed\n");
1731 iommu_prepare_gfx_mapping();
1733 iommu_prepare_isa();
1738 * global invalidate context cache
1739 * global invalidate iotlb
1740 * enable translation
1742 for_each_drhd_unit(drhd
) {
1745 iommu
= drhd
->iommu
;
1746 sprintf (iommu
->name
, "dmar%d", unit
++);
1748 iommu_flush_write_buffer(iommu
);
1750 ret
= dmar_set_interrupt(iommu
);
1754 iommu_set_root_entry(iommu
);
1756 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
,
1758 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
,
1760 iommu_disable_protect_mem_regions(iommu
);
1762 ret
= iommu_enable_translation(iommu
);
1769 for_each_drhd_unit(drhd
) {
1772 iommu
= drhd
->iommu
;
1778 static inline u64
aligned_size(u64 host_addr
, size_t size
)
1781 addr
= (host_addr
& (~PAGE_MASK
)) + size
;
1782 return PAGE_ALIGN(addr
);
1786 iommu_alloc_iova(struct dmar_domain
*domain
, size_t size
, u64 end
)
1790 /* Make sure it's in range */
1791 end
= min_t(u64
, DOMAIN_MAX_ADDR(domain
->gaw
), end
);
1792 if (!size
|| (IOVA_START_ADDR
+ size
> end
))
1795 piova
= alloc_iova(&domain
->iovad
,
1796 size
>> PAGE_SHIFT
, IOVA_PFN(end
), 1);
1800 static struct iova
*
1801 __intel_alloc_iova(struct device
*dev
, struct dmar_domain
*domain
,
1802 size_t size
, u64 dma_mask
)
1804 struct pci_dev
*pdev
= to_pci_dev(dev
);
1805 struct iova
*iova
= NULL
;
1807 if (dma_mask
<= DMA_32BIT_MASK
|| dmar_forcedac
)
1808 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
1811 * First try to allocate an io virtual address in
1812 * DMA_32BIT_MASK and if that fails then try allocating
1815 iova
= iommu_alloc_iova(domain
, size
, DMA_32BIT_MASK
);
1817 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
1821 printk(KERN_ERR
"Allocating iova for %s failed", pci_name(pdev
));
1828 static struct dmar_domain
*
1829 get_valid_domain_for_dev(struct pci_dev
*pdev
)
1831 struct dmar_domain
*domain
;
1834 domain
= get_domain_for_dev(pdev
,
1835 DEFAULT_DOMAIN_ADDRESS_WIDTH
);
1838 "Allocating domain for %s failed", pci_name(pdev
));
1842 /* make sure context mapping is ok */
1843 if (unlikely(!domain_context_mapped(domain
, pdev
))) {
1844 ret
= domain_context_mapping(domain
, pdev
);
1847 "Domain context map for %s failed",
1856 static dma_addr_t
__intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
1857 size_t size
, int dir
, u64 dma_mask
)
1859 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
1860 struct dmar_domain
*domain
;
1861 phys_addr_t start_paddr
;
1866 BUG_ON(dir
== DMA_NONE
);
1867 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
1870 domain
= get_valid_domain_for_dev(pdev
);
1874 size
= aligned_size((u64
)paddr
, size
);
1876 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
1880 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
1883 * Check if DMAR supports zero-length reads on write only
1886 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
1887 !cap_zlr(domain
->iommu
->cap
))
1888 prot
|= DMA_PTE_READ
;
1889 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
1890 prot
|= DMA_PTE_WRITE
;
1892 * paddr - (paddr + size) might be partial page, we should map the whole
1893 * page. Note: if two part of one page are separately mapped, we
1894 * might have two guest_addr mapping to the same host paddr, but this
1895 * is not a big problem
1897 ret
= domain_page_mapping(domain
, start_paddr
,
1898 ((u64
)paddr
) & PAGE_MASK
, size
, prot
);
1902 /* it's a non-present to present mapping */
1903 ret
= iommu_flush_iotlb_psi(domain
->iommu
, domain
->id
,
1904 start_paddr
, size
>> VTD_PAGE_SHIFT
, 1);
1906 iommu_flush_write_buffer(domain
->iommu
);
1908 return start_paddr
+ ((u64
)paddr
& (~PAGE_MASK
));
1912 __free_iova(&domain
->iovad
, iova
);
1913 printk(KERN_ERR
"Device %s request: %lx@%llx dir %d --- failed\n",
1914 pci_name(pdev
), size
, (unsigned long long)paddr
, dir
);
1918 dma_addr_t
intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
1919 size_t size
, int dir
)
1921 return __intel_map_single(hwdev
, paddr
, size
, dir
,
1922 to_pci_dev(hwdev
)->dma_mask
);
1925 static void flush_unmaps(void)
1931 /* just flush them all */
1932 for (i
= 0; i
< g_num_of_iommus
; i
++) {
1933 if (deferred_flush
[i
].next
) {
1934 struct intel_iommu
*iommu
=
1935 deferred_flush
[i
].domain
[0]->iommu
;
1937 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
1938 DMA_TLB_GLOBAL_FLUSH
, 0);
1939 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
1940 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
,
1941 deferred_flush
[i
].iova
[j
]);
1943 deferred_flush
[i
].next
= 0;
1950 static void flush_unmaps_timeout(unsigned long data
)
1952 unsigned long flags
;
1954 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
1956 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
1959 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
)
1961 unsigned long flags
;
1964 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
1965 if (list_size
== HIGH_WATER_MARK
)
1968 iommu_id
= dom
->iommu
->seq_id
;
1970 next
= deferred_flush
[iommu_id
].next
;
1971 deferred_flush
[iommu_id
].domain
[next
] = dom
;
1972 deferred_flush
[iommu_id
].iova
[next
] = iova
;
1973 deferred_flush
[iommu_id
].next
++;
1976 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
1980 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
1983 void intel_unmap_single(struct device
*dev
, dma_addr_t dev_addr
, size_t size
,
1986 struct pci_dev
*pdev
= to_pci_dev(dev
);
1987 struct dmar_domain
*domain
;
1988 unsigned long start_addr
;
1991 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
1993 domain
= find_domain(pdev
);
1996 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
2000 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2001 size
= aligned_size((u64
)dev_addr
, size
);
2003 pr_debug("Device %s unmapping: %lx@%llx\n",
2004 pci_name(pdev
), size
, (unsigned long long)start_addr
);
2006 /* clear the whole page */
2007 dma_pte_clear_range(domain
, start_addr
, start_addr
+ size
);
2008 /* free page tables */
2009 dma_pte_free_pagetable(domain
, start_addr
, start_addr
+ size
);
2010 if (intel_iommu_strict
) {
2011 if (iommu_flush_iotlb_psi(domain
->iommu
,
2012 domain
->id
, start_addr
, size
>> VTD_PAGE_SHIFT
, 0))
2013 iommu_flush_write_buffer(domain
->iommu
);
2015 __free_iova(&domain
->iovad
, iova
);
2017 add_unmap(domain
, iova
);
2019 * queue up the release of the unmap to save the 1/6th of the
2020 * cpu used up by the iotlb flush operation...
2025 void *intel_alloc_coherent(struct device
*hwdev
, size_t size
,
2026 dma_addr_t
*dma_handle
, gfp_t flags
)
2031 size
= PAGE_ALIGN(size
);
2032 order
= get_order(size
);
2033 flags
&= ~(GFP_DMA
| GFP_DMA32
);
2035 vaddr
= (void *)__get_free_pages(flags
, order
);
2038 memset(vaddr
, 0, size
);
2040 *dma_handle
= __intel_map_single(hwdev
, virt_to_bus(vaddr
), size
,
2042 hwdev
->coherent_dma_mask
);
2045 free_pages((unsigned long)vaddr
, order
);
2049 void intel_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
2050 dma_addr_t dma_handle
)
2054 size
= PAGE_ALIGN(size
);
2055 order
= get_order(size
);
2057 intel_unmap_single(hwdev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
2058 free_pages((unsigned long)vaddr
, order
);
2061 #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
2063 void intel_unmap_sg(struct device
*hwdev
, struct scatterlist
*sglist
,
2064 int nelems
, int dir
)
2067 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2068 struct dmar_domain
*domain
;
2069 unsigned long start_addr
;
2073 struct scatterlist
*sg
;
2075 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2078 domain
= find_domain(pdev
);
2080 iova
= find_iova(&domain
->iovad
, IOVA_PFN(sglist
[0].dma_address
));
2083 for_each_sg(sglist
, sg
, nelems
, i
) {
2084 addr
= SG_ENT_VIRT_ADDRESS(sg
);
2085 size
+= aligned_size((u64
)addr
, sg
->length
);
2088 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2090 /* clear the whole page */
2091 dma_pte_clear_range(domain
, start_addr
, start_addr
+ size
);
2092 /* free page tables */
2093 dma_pte_free_pagetable(domain
, start_addr
, start_addr
+ size
);
2095 if (iommu_flush_iotlb_psi(domain
->iommu
, domain
->id
, start_addr
,
2096 size
>> VTD_PAGE_SHIFT
, 0))
2097 iommu_flush_write_buffer(domain
->iommu
);
2100 __free_iova(&domain
->iovad
, iova
);
2103 static int intel_nontranslate_map_sg(struct device
*hddev
,
2104 struct scatterlist
*sglist
, int nelems
, int dir
)
2107 struct scatterlist
*sg
;
2109 for_each_sg(sglist
, sg
, nelems
, i
) {
2110 BUG_ON(!sg_page(sg
));
2111 sg
->dma_address
= virt_to_bus(SG_ENT_VIRT_ADDRESS(sg
));
2112 sg
->dma_length
= sg
->length
;
2117 int intel_map_sg(struct device
*hwdev
, struct scatterlist
*sglist
, int nelems
,
2122 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2123 struct dmar_domain
*domain
;
2127 struct iova
*iova
= NULL
;
2129 struct scatterlist
*sg
;
2130 unsigned long start_addr
;
2132 BUG_ON(dir
== DMA_NONE
);
2133 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2134 return intel_nontranslate_map_sg(hwdev
, sglist
, nelems
, dir
);
2136 domain
= get_valid_domain_for_dev(pdev
);
2140 for_each_sg(sglist
, sg
, nelems
, i
) {
2141 addr
= SG_ENT_VIRT_ADDRESS(sg
);
2142 addr
= (void *)virt_to_phys(addr
);
2143 size
+= aligned_size((u64
)addr
, sg
->length
);
2146 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2148 sglist
->dma_length
= 0;
2153 * Check if DMAR supports zero-length reads on write only
2156 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2157 !cap_zlr(domain
->iommu
->cap
))
2158 prot
|= DMA_PTE_READ
;
2159 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2160 prot
|= DMA_PTE_WRITE
;
2162 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2164 for_each_sg(sglist
, sg
, nelems
, i
) {
2165 addr
= SG_ENT_VIRT_ADDRESS(sg
);
2166 addr
= (void *)virt_to_phys(addr
);
2167 size
= aligned_size((u64
)addr
, sg
->length
);
2168 ret
= domain_page_mapping(domain
, start_addr
+ offset
,
2169 ((u64
)addr
) & PAGE_MASK
,
2172 /* clear the page */
2173 dma_pte_clear_range(domain
, start_addr
,
2174 start_addr
+ offset
);
2175 /* free page tables */
2176 dma_pte_free_pagetable(domain
, start_addr
,
2177 start_addr
+ offset
);
2179 __free_iova(&domain
->iovad
, iova
);
2182 sg
->dma_address
= start_addr
+ offset
+
2183 ((u64
)addr
& (~PAGE_MASK
));
2184 sg
->dma_length
= sg
->length
;
2188 /* it's a non-present to present mapping */
2189 if (iommu_flush_iotlb_psi(domain
->iommu
, domain
->id
,
2190 start_addr
, offset
>> VTD_PAGE_SHIFT
, 1))
2191 iommu_flush_write_buffer(domain
->iommu
);
2195 static struct dma_mapping_ops intel_dma_ops
= {
2196 .alloc_coherent
= intel_alloc_coherent
,
2197 .free_coherent
= intel_free_coherent
,
2198 .map_single
= intel_map_single
,
2199 .unmap_single
= intel_unmap_single
,
2200 .map_sg
= intel_map_sg
,
2201 .unmap_sg
= intel_unmap_sg
,
2204 static inline int iommu_domain_cache_init(void)
2208 iommu_domain_cache
= kmem_cache_create("iommu_domain",
2209 sizeof(struct dmar_domain
),
2214 if (!iommu_domain_cache
) {
2215 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
2222 static inline int iommu_devinfo_cache_init(void)
2226 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
2227 sizeof(struct device_domain_info
),
2231 if (!iommu_devinfo_cache
) {
2232 printk(KERN_ERR
"Couldn't create devinfo cache\n");
2239 static inline int iommu_iova_cache_init(void)
2243 iommu_iova_cache
= kmem_cache_create("iommu_iova",
2244 sizeof(struct iova
),
2248 if (!iommu_iova_cache
) {
2249 printk(KERN_ERR
"Couldn't create iova cache\n");
2256 static int __init
iommu_init_mempool(void)
2259 ret
= iommu_iova_cache_init();
2263 ret
= iommu_domain_cache_init();
2267 ret
= iommu_devinfo_cache_init();
2271 kmem_cache_destroy(iommu_domain_cache
);
2273 kmem_cache_destroy(iommu_iova_cache
);
2278 static void __init
iommu_exit_mempool(void)
2280 kmem_cache_destroy(iommu_devinfo_cache
);
2281 kmem_cache_destroy(iommu_domain_cache
);
2282 kmem_cache_destroy(iommu_iova_cache
);
2286 static void __init
init_no_remapping_devices(void)
2288 struct dmar_drhd_unit
*drhd
;
2290 for_each_drhd_unit(drhd
) {
2291 if (!drhd
->include_all
) {
2293 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2294 if (drhd
->devices
[i
] != NULL
)
2296 /* ignore DMAR unit if no pci devices exist */
2297 if (i
== drhd
->devices_cnt
)
2305 for_each_drhd_unit(drhd
) {
2307 if (drhd
->ignored
|| drhd
->include_all
)
2310 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2311 if (drhd
->devices
[i
] &&
2312 !IS_GFX_DEVICE(drhd
->devices
[i
]))
2315 if (i
< drhd
->devices_cnt
)
2318 /* bypass IOMMU if it is just for gfx devices */
2320 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
2321 if (!drhd
->devices
[i
])
2323 drhd
->devices
[i
]->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
2328 int __init
intel_iommu_init(void)
2332 if (dmar_table_init())
2335 if (dmar_dev_scope_init())
2339 * Check the need for DMA-remapping initialization now.
2340 * Above initialization will also be used by Interrupt-remapping.
2342 if (no_iommu
|| swiotlb
|| dmar_disabled
)
2345 iommu_init_mempool();
2346 dmar_init_reserved_ranges();
2348 init_no_remapping_devices();
2352 printk(KERN_ERR
"IOMMU: dmar init failed\n");
2353 put_iova_domain(&reserved_iova_list
);
2354 iommu_exit_mempool();
2358 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
2360 init_timer(&unmap_timer
);
2362 dma_ops
= &intel_dma_ops
;
2366 void intel_iommu_domain_exit(struct dmar_domain
*domain
)
2370 /* Domain 0 is reserved, so dont process it */
2374 end
= DOMAIN_MAX_ADDR(domain
->gaw
);
2375 end
= end
& (~VTD_PAGE_MASK
);
2378 dma_pte_clear_range(domain
, 0, end
);
2380 /* free page tables */
2381 dma_pte_free_pagetable(domain
, 0, end
);
2383 iommu_free_domain(domain
);
2384 free_domain_mem(domain
);
2386 EXPORT_SYMBOL_GPL(intel_iommu_domain_exit
);
2388 struct dmar_domain
*intel_iommu_domain_alloc(struct pci_dev
*pdev
)
2390 struct dmar_drhd_unit
*drhd
;
2391 struct dmar_domain
*domain
;
2392 struct intel_iommu
*iommu
;
2394 drhd
= dmar_find_matched_drhd_unit(pdev
);
2396 printk(KERN_ERR
"intel_iommu_domain_alloc: drhd == NULL\n");
2400 iommu
= drhd
->iommu
;
2403 "intel_iommu_domain_alloc: iommu == NULL\n");
2406 domain
= iommu_alloc_domain(iommu
);
2409 "intel_iommu_domain_alloc: domain == NULL\n");
2412 if (domain_init(domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2414 "intel_iommu_domain_alloc: domain_init() failed\n");
2415 intel_iommu_domain_exit(domain
);
2420 EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc
);
2422 int intel_iommu_context_mapping(
2423 struct dmar_domain
*domain
, struct pci_dev
*pdev
)
2426 rc
= domain_context_mapping(domain
, pdev
);
2429 EXPORT_SYMBOL_GPL(intel_iommu_context_mapping
);
2431 int intel_iommu_page_mapping(
2432 struct dmar_domain
*domain
, dma_addr_t iova
,
2433 u64 hpa
, size_t size
, int prot
)
2436 rc
= domain_page_mapping(domain
, iova
, hpa
, size
, prot
);
2439 EXPORT_SYMBOL_GPL(intel_iommu_page_mapping
);
2441 void intel_iommu_detach_dev(struct dmar_domain
*domain
, u8 bus
, u8 devfn
)
2443 detach_domain_for_dev(domain
, bus
, devfn
);
2445 EXPORT_SYMBOL_GPL(intel_iommu_detach_dev
);
2447 struct dmar_domain
*
2448 intel_iommu_find_domain(struct pci_dev
*pdev
)
2450 return find_domain(pdev
);
2452 EXPORT_SYMBOL_GPL(intel_iommu_find_domain
);
2454 int intel_iommu_found(void)
2456 return g_num_of_iommus
;
2458 EXPORT_SYMBOL_GPL(intel_iommu_found
);
2460 u64
intel_iommu_iova_to_pfn(struct dmar_domain
*domain
, u64 iova
)
2462 struct dma_pte
*pte
;
2466 pte
= addr_to_dma_pte(domain
, iova
);
2469 pfn
= dma_pte_addr(*pte
);
2471 return pfn
>> VTD_PAGE_SHIFT
;
2473 EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn
);