2 * VFIO: IOMMU DMA mapping support for Type1 IOMMU
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
15 * We arbitrarily define a Type1 IOMMU as one matching the below code.
16 * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel
17 * VT-d, but that makes it harder to re-use as theoretically anyone
18 * implementing a similar IOMMU could make use of this. We expect the
19 * IOMMU to support the IOMMU API and have few to no restrictions around
20 * the IOVA range that can be mapped. The Type1 IOMMU is currently
21 * optimized for relatively static mappings of a userspace process with
22 * userpsace pages pinned into memory. We also assume devices and IOMMU
23 * domains are PCI based as the IOMMU API is still centered around a
24 * device/bus interface rather than a group interface.
27 #include <linux/compat.h>
28 #include <linux/device.h>
30 #include <linux/iommu.h>
31 #include <linux/module.h>
33 #include <linux/pci.h> /* pci_bus_type */
34 #include <linux/rbtree.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/uaccess.h>
38 #include <linux/vfio.h>
39 #include <linux/workqueue.h>
41 #define DRIVER_VERSION "0.2"
42 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
43 #define DRIVER_DESC "Type1 IOMMU driver for VFIO"
45 static bool allow_unsafe_interrupts
;
46 module_param_named(allow_unsafe_interrupts
,
47 allow_unsafe_interrupts
, bool, S_IRUGO
| S_IWUSR
);
48 MODULE_PARM_DESC(allow_unsafe_interrupts
,
49 "Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
51 static bool disable_hugepages
;
52 module_param_named(disable_hugepages
,
53 disable_hugepages
, bool, S_IRUGO
| S_IWUSR
);
54 MODULE_PARM_DESC(disable_hugepages
,
55 "Disable VFIO IOMMU support for IOMMU hugepages.");
58 struct iommu_domain
*domain
;
60 struct rb_root dma_list
;
61 struct list_head group_list
;
67 dma_addr_t iova
; /* Device address */
68 unsigned long vaddr
; /* Process virtual addr */
69 size_t size
; /* Map size (bytes) */
70 int prot
; /* IOMMU_READ/WRITE */
74 struct iommu_group
*iommu_group
;
75 struct list_head next
;
79 * This code handles mapping and unmapping of user data buffers
80 * into DMA'ble space using the IOMMU
83 static struct vfio_dma
*vfio_find_dma(struct vfio_iommu
*iommu
,
84 dma_addr_t start
, size_t size
)
86 struct rb_node
*node
= iommu
->dma_list
.rb_node
;
89 struct vfio_dma
*dma
= rb_entry(node
, struct vfio_dma
, node
);
91 if (start
+ size
<= dma
->iova
)
93 else if (start
>= dma
->iova
+ dma
->size
)
94 node
= node
->rb_right
;
102 static void vfio_insert_dma(struct vfio_iommu
*iommu
, struct vfio_dma
*new)
104 struct rb_node
**link
= &iommu
->dma_list
.rb_node
, *parent
= NULL
;
105 struct vfio_dma
*dma
;
109 dma
= rb_entry(parent
, struct vfio_dma
, node
);
111 if (new->iova
+ new->size
<= dma
->iova
)
112 link
= &(*link
)->rb_left
;
114 link
= &(*link
)->rb_right
;
117 rb_link_node(&new->node
, parent
, link
);
118 rb_insert_color(&new->node
, &iommu
->dma_list
);
121 static void vfio_remove_dma(struct vfio_iommu
*iommu
, struct vfio_dma
*old
)
123 rb_erase(&old
->node
, &iommu
->dma_list
);
127 struct mm_struct
*mm
;
129 struct work_struct work
;
132 /* delayed decrement/increment for locked_vm */
133 static void vfio_lock_acct_bg(struct work_struct
*work
)
135 struct vwork
*vwork
= container_of(work
, struct vwork
, work
);
136 struct mm_struct
*mm
;
139 down_write(&mm
->mmap_sem
);
140 mm
->locked_vm
+= vwork
->npage
;
141 up_write(&mm
->mmap_sem
);
146 static void vfio_lock_acct(long npage
)
149 struct mm_struct
*mm
;
151 if (!current
->mm
|| !npage
)
152 return; /* process exited or nothing to do */
154 if (down_write_trylock(¤t
->mm
->mmap_sem
)) {
155 current
->mm
->locked_vm
+= npage
;
156 up_write(¤t
->mm
->mmap_sem
);
161 * Couldn't get mmap_sem lock, so must setup to update
162 * mm->locked_vm later. If locked_vm were atomic, we
163 * wouldn't need this silliness
165 vwork
= kmalloc(sizeof(struct vwork
), GFP_KERNEL
);
168 mm
= get_task_mm(current
);
173 INIT_WORK(&vwork
->work
, vfio_lock_acct_bg
);
175 vwork
->npage
= npage
;
176 schedule_work(&vwork
->work
);
180 * Some mappings aren't backed by a struct page, for example an mmap'd
181 * MMIO range for our own or another device. These use a different
182 * pfn conversion and shouldn't be tracked as locked pages.
184 static bool is_invalid_reserved_pfn(unsigned long pfn
)
186 if (pfn_valid(pfn
)) {
188 struct page
*tail
= pfn_to_page(pfn
);
189 struct page
*head
= compound_trans_head(tail
);
190 reserved
= !!(PageReserved(head
));
193 * "head" is not a dangling pointer
194 * (compound_trans_head takes care of that)
195 * but the hugepage may have been split
196 * from under us (and we may not hold a
197 * reference count on the head page so it can
198 * be reused before we run PageReferenced), so
199 * we've to check PageTail before returning
206 return PageReserved(tail
);
212 static int put_pfn(unsigned long pfn
, int prot
)
214 if (!is_invalid_reserved_pfn(pfn
)) {
215 struct page
*page
= pfn_to_page(pfn
);
216 if (prot
& IOMMU_WRITE
)
224 static int vaddr_get_pfn(unsigned long vaddr
, int prot
, unsigned long *pfn
)
226 struct page
*page
[1];
227 struct vm_area_struct
*vma
;
230 if (get_user_pages_fast(vaddr
, 1, !!(prot
& IOMMU_WRITE
), page
) == 1) {
231 *pfn
= page_to_pfn(page
[0]);
235 down_read(¤t
->mm
->mmap_sem
);
237 vma
= find_vma_intersection(current
->mm
, vaddr
, vaddr
+ 1);
239 if (vma
&& vma
->vm_flags
& VM_PFNMAP
) {
240 *pfn
= ((vaddr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
241 if (is_invalid_reserved_pfn(*pfn
))
245 up_read(¤t
->mm
->mmap_sem
);
251 * Attempt to pin pages. We really don't want to track all the pfns and
252 * the iommu can only map chunks of consecutive pfns anyway, so get the
253 * first page and all consecutive pages with the same locking.
255 static long vfio_pin_pages(unsigned long vaddr
, long npage
,
256 int prot
, unsigned long *pfn_base
)
258 unsigned long limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
259 bool lock_cap
= capable(CAP_IPC_LOCK
);
265 ret
= vaddr_get_pfn(vaddr
, prot
, pfn_base
);
269 if (is_invalid_reserved_pfn(*pfn_base
))
272 if (!lock_cap
&& current
->mm
->locked_vm
+ 1 > limit
) {
273 put_pfn(*pfn_base
, prot
);
274 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__
,
275 limit
<< PAGE_SHIFT
);
279 if (unlikely(disable_hugepages
)) {
284 /* Lock all the consecutive pages from pfn_base */
285 for (i
= 1, vaddr
+= PAGE_SIZE
; i
< npage
; i
++, vaddr
+= PAGE_SIZE
) {
286 unsigned long pfn
= 0;
288 ret
= vaddr_get_pfn(vaddr
, prot
, &pfn
);
292 if (pfn
!= *pfn_base
+ i
|| is_invalid_reserved_pfn(pfn
)) {
297 if (!lock_cap
&& current
->mm
->locked_vm
+ i
+ 1 > limit
) {
299 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
300 __func__
, limit
<< PAGE_SHIFT
);
310 static long vfio_unpin_pages(unsigned long pfn
, long npage
,
311 int prot
, bool do_accounting
)
313 unsigned long unlocked
= 0;
316 for (i
= 0; i
< npage
; i
++)
317 unlocked
+= put_pfn(pfn
++, prot
);
320 vfio_lock_acct(-unlocked
);
325 static int vfio_unmap_unpin(struct vfio_iommu
*iommu
, struct vfio_dma
*dma
,
326 dma_addr_t iova
, size_t *size
)
328 dma_addr_t start
= iova
, end
= iova
+ *size
;
336 * We use the IOMMU to track the physical address. This
337 * saves us from having a lot more entries in our mapping
338 * tree. The downside is that we don't track the size
339 * used to do the mapping. We request unmap of a single
340 * page, but expect IOMMUs that support large pages to
341 * unmap a larger chunk.
343 phys
= iommu_iova_to_phys(iommu
->domain
, iova
);
344 if (WARN_ON(!phys
)) {
349 unmapped
= iommu_unmap(iommu
->domain
, iova
, PAGE_SIZE
);
353 unlocked
+= vfio_unpin_pages(phys
>> PAGE_SHIFT
,
354 unmapped
>> PAGE_SHIFT
,
359 vfio_lock_acct(-unlocked
);
361 *size
= iova
- start
;
366 static int vfio_remove_dma_overlap(struct vfio_iommu
*iommu
, dma_addr_t start
,
367 size_t *size
, struct vfio_dma
*dma
)
369 size_t offset
, overlap
, tmp
;
370 struct vfio_dma
*split
;
374 * Existing dma region is completely covered, unmap all. This is
375 * the likely case since userspace tends to map and unmap buffers
376 * in one shot rather than multiple mappings within a buffer.
378 if (likely(start
<= dma
->iova
&&
379 start
+ *size
>= dma
->iova
+ dma
->size
)) {
381 ret
= vfio_unmap_unpin(iommu
, dma
, dma
->iova
, size
);
386 * Did we remove more than we have? Should never happen
387 * since a vfio_dma is contiguous in iova and vaddr.
389 WARN_ON(*size
!= dma
->size
);
391 vfio_remove_dma(iommu
, dma
);
396 /* Overlap low address of existing range */
397 if (start
<= dma
->iova
) {
398 overlap
= start
+ *size
- dma
->iova
;
399 ret
= vfio_unmap_unpin(iommu
, dma
, dma
->iova
, &overlap
);
403 vfio_remove_dma(iommu
, dma
);
406 * Check, we may have removed to whole vfio_dma. If not
407 * fixup and re-insert.
409 if (overlap
< dma
->size
) {
410 dma
->iova
+= overlap
;
411 dma
->vaddr
+= overlap
;
412 dma
->size
-= overlap
;
413 vfio_insert_dma(iommu
, dma
);
419 /* Overlap high address of existing range */
420 if (start
+ *size
>= dma
->iova
+ dma
->size
) {
421 offset
= start
- dma
->iova
;
422 overlap
= dma
->size
- offset
;
424 ret
= vfio_unmap_unpin(iommu
, dma
, start
, &overlap
);
429 * We may have unmapped the entire vfio_dma if the user is
430 * trying to unmap a sub-region of what was originally
431 * mapped. If anything left, we can resize in place since
434 if (overlap
< dma
->size
)
435 dma
->size
-= overlap
;
437 vfio_remove_dma(iommu
, dma
);
444 offset
= start
- dma
->iova
;
446 ret
= vfio_unmap_unpin(iommu
, dma
, start
, size
);
454 * Resize the lower vfio_dma in place, insert new for remaining
459 if (offset
+ *size
< tmp
) {
460 split
= kzalloc(sizeof(*split
), GFP_KERNEL
);
464 split
->size
= tmp
- offset
- *size
;
465 split
->iova
= dma
->iova
+ offset
+ *size
;
466 split
->vaddr
= dma
->vaddr
+ offset
+ *size
;
467 split
->prot
= dma
->prot
;
468 vfio_insert_dma(iommu
, split
);
474 static int vfio_dma_do_unmap(struct vfio_iommu
*iommu
,
475 struct vfio_iommu_type1_dma_unmap
*unmap
)
478 struct vfio_dma
*dma
;
479 size_t unmapped
= 0, size
;
482 mask
= ((uint64_t)1 << __ffs(iommu
->domain
->ops
->pgsize_bitmap
)) - 1;
484 if (unmap
->iova
& mask
)
486 if (unmap
->size
& mask
)
489 WARN_ON(mask
& PAGE_MASK
);
491 mutex_lock(&iommu
->lock
);
493 while ((dma
= vfio_find_dma(iommu
, unmap
->iova
, unmap
->size
))) {
495 ret
= vfio_remove_dma_overlap(iommu
, unmap
->iova
, &size
, dma
);
501 mutex_unlock(&iommu
->lock
);
504 * We may unmap more than requested, update the unmap struct so
505 * userspace can know.
507 unmap
->size
= unmapped
;
513 * Turns out AMD IOMMU has a page table bug where it won't map large pages
514 * to a region that previously mapped smaller pages. This should be fixed
515 * soon, so this is just a temporary workaround to break mappings down into
516 * PAGE_SIZE. Better to map smaller pages than nothing.
518 static int map_try_harder(struct vfio_iommu
*iommu
, dma_addr_t iova
,
519 unsigned long pfn
, long npage
, int prot
)
524 for (i
= 0; i
< npage
; i
++, pfn
++, iova
+= PAGE_SIZE
) {
525 ret
= iommu_map(iommu
->domain
, iova
,
526 (phys_addr_t
)pfn
<< PAGE_SHIFT
,
532 for (; i
< npage
&& i
> 0; i
--, iova
-= PAGE_SIZE
)
533 iommu_unmap(iommu
->domain
, iova
, PAGE_SIZE
);
538 static int vfio_dma_do_map(struct vfio_iommu
*iommu
,
539 struct vfio_iommu_type1_dma_map
*map
)
541 dma_addr_t end
, iova
;
542 unsigned long vaddr
= map
->vaddr
;
543 size_t size
= map
->size
;
545 int ret
= 0, prot
= 0;
548 end
= map
->iova
+ map
->size
;
550 mask
= ((uint64_t)1 << __ffs(iommu
->domain
->ops
->pgsize_bitmap
)) - 1;
552 /* READ/WRITE from device perspective */
553 if (map
->flags
& VFIO_DMA_MAP_FLAG_WRITE
)
555 if (map
->flags
& VFIO_DMA_MAP_FLAG_READ
)
559 return -EINVAL
; /* No READ/WRITE? */
566 if (map
->iova
& mask
)
568 if (!map
->size
|| map
->size
& mask
)
571 WARN_ON(mask
& PAGE_MASK
);
573 /* Don't allow IOVA wrap */
574 if (end
&& end
< map
->iova
)
577 /* Don't allow virtual address wrap */
578 if (vaddr
+ map
->size
&& vaddr
+ map
->size
< vaddr
)
581 mutex_lock(&iommu
->lock
);
583 if (vfio_find_dma(iommu
, map
->iova
, map
->size
)) {
584 mutex_unlock(&iommu
->lock
);
588 for (iova
= map
->iova
; iova
< end
; iova
+= size
, vaddr
+= size
) {
589 struct vfio_dma
*dma
= NULL
;
593 /* Pin a contiguous chunk of memory */
594 npage
= vfio_pin_pages(vaddr
, (end
- iova
) >> PAGE_SHIFT
,
602 /* Verify pages are not already mapped */
603 for (i
= 0; i
< npage
; i
++) {
604 if (iommu_iova_to_phys(iommu
->domain
,
605 iova
+ (i
<< PAGE_SHIFT
))) {
606 vfio_unpin_pages(pfn
, npage
, prot
, true);
612 ret
= iommu_map(iommu
->domain
, iova
,
613 (phys_addr_t
)pfn
<< PAGE_SHIFT
,
614 npage
<< PAGE_SHIFT
, prot
);
617 map_try_harder(iommu
, iova
, pfn
, npage
, prot
)) {
618 vfio_unpin_pages(pfn
, npage
, prot
, true);
623 size
= npage
<< PAGE_SHIFT
;
626 * Check if we abut a region below - nothing below 0.
627 * This is the most likely case when mapping chunks of
628 * physically contiguous regions within a virtual address
629 * range. Update the abutting entry in place since iova
633 struct vfio_dma
*tmp
;
634 tmp
= vfio_find_dma(iommu
, iova
- 1, 1);
635 if (tmp
&& tmp
->prot
== prot
&&
636 tmp
->vaddr
+ tmp
->size
== vaddr
) {
646 /* Check if we abut a region above - nothing above ~0 + 1 */
647 if (likely(iova
+ size
)) {
648 struct vfio_dma
*tmp
;
650 tmp
= vfio_find_dma(iommu
, iova
+ size
, 1);
651 if (tmp
&& tmp
->prot
== prot
&&
652 tmp
->vaddr
== vaddr
+ size
) {
653 vfio_remove_dma(iommu
, tmp
);
655 dma
->size
+= tmp
->size
;
663 dma
= kzalloc(sizeof(*dma
), GFP_KERNEL
);
665 iommu_unmap(iommu
->domain
, iova
, size
);
666 vfio_unpin_pages(pfn
, npage
, prot
, true);
675 vfio_insert_dma(iommu
, dma
);
680 struct vfio_dma
*tmp
;
683 while ((tmp
= vfio_find_dma(iommu
, iova
, size
))) {
684 if (vfio_remove_dma_overlap(iommu
, iova
, &size
, tmp
)) {
685 pr_warn("%s: Error rolling back failed map\n",
692 mutex_unlock(&iommu
->lock
);
696 static int vfio_iommu_type1_attach_group(void *iommu_data
,
697 struct iommu_group
*iommu_group
)
699 struct vfio_iommu
*iommu
= iommu_data
;
700 struct vfio_group
*group
, *tmp
;
703 group
= kzalloc(sizeof(*group
), GFP_KERNEL
);
707 mutex_lock(&iommu
->lock
);
709 list_for_each_entry(tmp
, &iommu
->group_list
, next
) {
710 if (tmp
->iommu_group
== iommu_group
) {
711 mutex_unlock(&iommu
->lock
);
718 * TODO: Domain have capabilities that might change as we add
719 * groups (see iommu->cache, currently never set). Check for
720 * them and potentially disallow groups to be attached when it
721 * would change capabilities (ugh).
723 ret
= iommu_attach_group(iommu
->domain
, iommu_group
);
725 mutex_unlock(&iommu
->lock
);
730 group
->iommu_group
= iommu_group
;
731 list_add(&group
->next
, &iommu
->group_list
);
733 mutex_unlock(&iommu
->lock
);
738 static void vfio_iommu_type1_detach_group(void *iommu_data
,
739 struct iommu_group
*iommu_group
)
741 struct vfio_iommu
*iommu
= iommu_data
;
742 struct vfio_group
*group
;
744 mutex_lock(&iommu
->lock
);
746 list_for_each_entry(group
, &iommu
->group_list
, next
) {
747 if (group
->iommu_group
== iommu_group
) {
748 iommu_detach_group(iommu
->domain
, iommu_group
);
749 list_del(&group
->next
);
755 mutex_unlock(&iommu
->lock
);
758 static void *vfio_iommu_type1_open(unsigned long arg
)
760 struct vfio_iommu
*iommu
;
762 if (arg
!= VFIO_TYPE1_IOMMU
)
763 return ERR_PTR(-EINVAL
);
765 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
767 return ERR_PTR(-ENOMEM
);
769 INIT_LIST_HEAD(&iommu
->group_list
);
770 iommu
->dma_list
= RB_ROOT
;
771 mutex_init(&iommu
->lock
);
774 * Wish we didn't have to know about bus_type here.
776 iommu
->domain
= iommu_domain_alloc(&pci_bus_type
);
777 if (!iommu
->domain
) {
779 return ERR_PTR(-EIO
);
783 * Wish we could specify required capabilities rather than create
784 * a domain, see what comes out and hope it doesn't change along
785 * the way. Fortunately we know interrupt remapping is global for
788 if (!allow_unsafe_interrupts
&&
789 !iommu_domain_has_cap(iommu
->domain
, IOMMU_CAP_INTR_REMAP
)) {
790 pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
792 iommu_domain_free(iommu
->domain
);
794 return ERR_PTR(-EPERM
);
800 static void vfio_iommu_type1_release(void *iommu_data
)
802 struct vfio_iommu
*iommu
= iommu_data
;
803 struct vfio_group
*group
, *group_tmp
;
804 struct rb_node
*node
;
806 list_for_each_entry_safe(group
, group_tmp
, &iommu
->group_list
, next
) {
807 iommu_detach_group(iommu
->domain
, group
->iommu_group
);
808 list_del(&group
->next
);
812 while ((node
= rb_first(&iommu
->dma_list
))) {
813 struct vfio_dma
*dma
= rb_entry(node
, struct vfio_dma
, node
);
814 size_t size
= dma
->size
;
815 vfio_remove_dma_overlap(iommu
, dma
->iova
, &size
, dma
);
818 iommu_domain_free(iommu
->domain
);
819 iommu
->domain
= NULL
;
823 static long vfio_iommu_type1_ioctl(void *iommu_data
,
824 unsigned int cmd
, unsigned long arg
)
826 struct vfio_iommu
*iommu
= iommu_data
;
829 if (cmd
== VFIO_CHECK_EXTENSION
) {
831 case VFIO_TYPE1_IOMMU
:
836 } else if (cmd
== VFIO_IOMMU_GET_INFO
) {
837 struct vfio_iommu_type1_info info
;
839 minsz
= offsetofend(struct vfio_iommu_type1_info
, iova_pgsizes
);
841 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
844 if (info
.argsz
< minsz
)
849 info
.iova_pgsizes
= iommu
->domain
->ops
->pgsize_bitmap
;
851 return copy_to_user((void __user
*)arg
, &info
, minsz
);
853 } else if (cmd
== VFIO_IOMMU_MAP_DMA
) {
854 struct vfio_iommu_type1_dma_map map
;
855 uint32_t mask
= VFIO_DMA_MAP_FLAG_READ
|
856 VFIO_DMA_MAP_FLAG_WRITE
;
858 minsz
= offsetofend(struct vfio_iommu_type1_dma_map
, size
);
860 if (copy_from_user(&map
, (void __user
*)arg
, minsz
))
863 if (map
.argsz
< minsz
|| map
.flags
& ~mask
)
866 return vfio_dma_do_map(iommu
, &map
);
868 } else if (cmd
== VFIO_IOMMU_UNMAP_DMA
) {
869 struct vfio_iommu_type1_dma_unmap unmap
;
872 minsz
= offsetofend(struct vfio_iommu_type1_dma_unmap
, size
);
874 if (copy_from_user(&unmap
, (void __user
*)arg
, minsz
))
877 if (unmap
.argsz
< minsz
|| unmap
.flags
)
880 ret
= vfio_dma_do_unmap(iommu
, &unmap
);
884 return copy_to_user((void __user
*)arg
, &unmap
, minsz
);
890 static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1
= {
891 .name
= "vfio-iommu-type1",
892 .owner
= THIS_MODULE
,
893 .open
= vfio_iommu_type1_open
,
894 .release
= vfio_iommu_type1_release
,
895 .ioctl
= vfio_iommu_type1_ioctl
,
896 .attach_group
= vfio_iommu_type1_attach_group
,
897 .detach_group
= vfio_iommu_type1_detach_group
,
900 static int __init
vfio_iommu_type1_init(void)
902 if (!iommu_present(&pci_bus_type
))
905 return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1
);
908 static void __exit
vfio_iommu_type1_cleanup(void)
910 vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1
);
913 module_init(vfio_iommu_type1_init
);
914 module_exit(vfio_iommu_type1_cleanup
);
916 MODULE_VERSION(DRIVER_VERSION
);
917 MODULE_LICENSE("GPL v2");
918 MODULE_AUTHOR(DRIVER_AUTHOR
);
919 MODULE_DESCRIPTION(DRIVER_DESC
);