Merge branch 'acpica'
[deliverable/linux.git] / drivers / vfio / vfio_iommu_type1.c
1 /*
2 * VFIO: IOMMU DMA mapping support for Type1 IOMMU
3 *
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
14 *
15 * We arbitrarily define a Type1 IOMMU as one matching the below code.
16 * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel
17 * VT-d, but that makes it harder to re-use as theoretically anyone
18 * implementing a similar IOMMU could make use of this. We expect the
19 * IOMMU to support the IOMMU API and have few to no restrictions around
20 * the IOVA range that can be mapped. The Type1 IOMMU is currently
21 * optimized for relatively static mappings of a userspace process with
22 * userpsace pages pinned into memory. We also assume devices and IOMMU
23 * domains are PCI based as the IOMMU API is still centered around a
24 * device/bus interface rather than a group interface.
25 */
26
27 #include <linux/compat.h>
28 #include <linux/device.h>
29 #include <linux/fs.h>
30 #include <linux/iommu.h>
31 #include <linux/module.h>
32 #include <linux/mm.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/uaccess.h>
37 #include <linux/vfio.h>
38 #include <linux/workqueue.h>
39
40 #define DRIVER_VERSION "0.2"
41 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
42 #define DRIVER_DESC "Type1 IOMMU driver for VFIO"
43
44 static bool allow_unsafe_interrupts;
45 module_param_named(allow_unsafe_interrupts,
46 allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR);
47 MODULE_PARM_DESC(allow_unsafe_interrupts,
48 "Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
49
50 static bool disable_hugepages;
51 module_param_named(disable_hugepages,
52 disable_hugepages, bool, S_IRUGO | S_IWUSR);
53 MODULE_PARM_DESC(disable_hugepages,
54 "Disable VFIO IOMMU support for IOMMU hugepages.");
55
56 struct vfio_iommu {
57 struct list_head domain_list;
58 struct mutex lock;
59 struct rb_root dma_list;
60 bool v2;
61 bool nesting;
62 };
63
64 struct vfio_domain {
65 struct iommu_domain *domain;
66 struct list_head next;
67 struct list_head group_list;
68 int prot; /* IOMMU_CACHE */
69 bool fgsp; /* Fine-grained super pages */
70 };
71
72 struct vfio_dma {
73 struct rb_node node;
74 dma_addr_t iova; /* Device address */
75 unsigned long vaddr; /* Process virtual addr */
76 size_t size; /* Map size (bytes) */
77 int prot; /* IOMMU_READ/WRITE */
78 };
79
80 struct vfio_group {
81 struct iommu_group *iommu_group;
82 struct list_head next;
83 };
84
85 /*
86 * This code handles mapping and unmapping of user data buffers
87 * into DMA'ble space using the IOMMU
88 */
89
90 static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
91 dma_addr_t start, size_t size)
92 {
93 struct rb_node *node = iommu->dma_list.rb_node;
94
95 while (node) {
96 struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
97
98 if (start + size <= dma->iova)
99 node = node->rb_left;
100 else if (start >= dma->iova + dma->size)
101 node = node->rb_right;
102 else
103 return dma;
104 }
105
106 return NULL;
107 }
108
109 static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
110 {
111 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
112 struct vfio_dma *dma;
113
114 while (*link) {
115 parent = *link;
116 dma = rb_entry(parent, struct vfio_dma, node);
117
118 if (new->iova + new->size <= dma->iova)
119 link = &(*link)->rb_left;
120 else
121 link = &(*link)->rb_right;
122 }
123
124 rb_link_node(&new->node, parent, link);
125 rb_insert_color(&new->node, &iommu->dma_list);
126 }
127
128 static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
129 {
130 rb_erase(&old->node, &iommu->dma_list);
131 }
132
133 struct vwork {
134 struct mm_struct *mm;
135 long npage;
136 struct work_struct work;
137 };
138
139 /* delayed decrement/increment for locked_vm */
140 static void vfio_lock_acct_bg(struct work_struct *work)
141 {
142 struct vwork *vwork = container_of(work, struct vwork, work);
143 struct mm_struct *mm;
144
145 mm = vwork->mm;
146 down_write(&mm->mmap_sem);
147 mm->locked_vm += vwork->npage;
148 up_write(&mm->mmap_sem);
149 mmput(mm);
150 kfree(vwork);
151 }
152
153 static void vfio_lock_acct(long npage)
154 {
155 struct vwork *vwork;
156 struct mm_struct *mm;
157
158 if (!current->mm || !npage)
159 return; /* process exited or nothing to do */
160
161 if (down_write_trylock(&current->mm->mmap_sem)) {
162 current->mm->locked_vm += npage;
163 up_write(&current->mm->mmap_sem);
164 return;
165 }
166
167 /*
168 * Couldn't get mmap_sem lock, so must setup to update
169 * mm->locked_vm later. If locked_vm were atomic, we
170 * wouldn't need this silliness
171 */
172 vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
173 if (!vwork)
174 return;
175 mm = get_task_mm(current);
176 if (!mm) {
177 kfree(vwork);
178 return;
179 }
180 INIT_WORK(&vwork->work, vfio_lock_acct_bg);
181 vwork->mm = mm;
182 vwork->npage = npage;
183 schedule_work(&vwork->work);
184 }
185
186 /*
187 * Some mappings aren't backed by a struct page, for example an mmap'd
188 * MMIO range for our own or another device. These use a different
189 * pfn conversion and shouldn't be tracked as locked pages.
190 */
191 static bool is_invalid_reserved_pfn(unsigned long pfn)
192 {
193 if (pfn_valid(pfn)) {
194 bool reserved;
195 struct page *tail = pfn_to_page(pfn);
196 struct page *head = compound_head(tail);
197 reserved = !!(PageReserved(head));
198 if (head != tail) {
199 /*
200 * "head" is not a dangling pointer
201 * (compound_head takes care of that)
202 * but the hugepage may have been split
203 * from under us (and we may not hold a
204 * reference count on the head page so it can
205 * be reused before we run PageReferenced), so
206 * we've to check PageTail before returning
207 * what we just read.
208 */
209 smp_rmb();
210 if (PageTail(tail))
211 return reserved;
212 }
213 return PageReserved(tail);
214 }
215
216 return true;
217 }
218
219 static int put_pfn(unsigned long pfn, int prot)
220 {
221 if (!is_invalid_reserved_pfn(pfn)) {
222 struct page *page = pfn_to_page(pfn);
223 if (prot & IOMMU_WRITE)
224 SetPageDirty(page);
225 put_page(page);
226 return 1;
227 }
228 return 0;
229 }
230
231 static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
232 {
233 struct page *page[1];
234 struct vm_area_struct *vma;
235 int ret = -EFAULT;
236
237 if (get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), page) == 1) {
238 *pfn = page_to_pfn(page[0]);
239 return 0;
240 }
241
242 down_read(&current->mm->mmap_sem);
243
244 vma = find_vma_intersection(current->mm, vaddr, vaddr + 1);
245
246 if (vma && vma->vm_flags & VM_PFNMAP) {
247 *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
248 if (is_invalid_reserved_pfn(*pfn))
249 ret = 0;
250 }
251
252 up_read(&current->mm->mmap_sem);
253
254 return ret;
255 }
256
257 /*
258 * Attempt to pin pages. We really don't want to track all the pfns and
259 * the iommu can only map chunks of consecutive pfns anyway, so get the
260 * first page and all consecutive pages with the same locking.
261 */
262 static long vfio_pin_pages(unsigned long vaddr, long npage,
263 int prot, unsigned long *pfn_base)
264 {
265 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
266 bool lock_cap = capable(CAP_IPC_LOCK);
267 long ret, i;
268 bool rsvd;
269
270 if (!current->mm)
271 return -ENODEV;
272
273 ret = vaddr_get_pfn(vaddr, prot, pfn_base);
274 if (ret)
275 return ret;
276
277 rsvd = is_invalid_reserved_pfn(*pfn_base);
278
279 if (!rsvd && !lock_cap && current->mm->locked_vm + 1 > limit) {
280 put_pfn(*pfn_base, prot);
281 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
282 limit << PAGE_SHIFT);
283 return -ENOMEM;
284 }
285
286 if (unlikely(disable_hugepages)) {
287 if (!rsvd)
288 vfio_lock_acct(1);
289 return 1;
290 }
291
292 /* Lock all the consecutive pages from pfn_base */
293 for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
294 unsigned long pfn = 0;
295
296 ret = vaddr_get_pfn(vaddr, prot, &pfn);
297 if (ret)
298 break;
299
300 if (pfn != *pfn_base + i ||
301 rsvd != is_invalid_reserved_pfn(pfn)) {
302 put_pfn(pfn, prot);
303 break;
304 }
305
306 if (!rsvd && !lock_cap &&
307 current->mm->locked_vm + i + 1 > limit) {
308 put_pfn(pfn, prot);
309 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
310 __func__, limit << PAGE_SHIFT);
311 break;
312 }
313 }
314
315 if (!rsvd)
316 vfio_lock_acct(i);
317
318 return i;
319 }
320
321 static long vfio_unpin_pages(unsigned long pfn, long npage,
322 int prot, bool do_accounting)
323 {
324 unsigned long unlocked = 0;
325 long i;
326
327 for (i = 0; i < npage; i++)
328 unlocked += put_pfn(pfn++, prot);
329
330 if (do_accounting)
331 vfio_lock_acct(-unlocked);
332
333 return unlocked;
334 }
335
336 static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
337 {
338 dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
339 struct vfio_domain *domain, *d;
340 long unlocked = 0;
341
342 if (!dma->size)
343 return;
344 /*
345 * We use the IOMMU to track the physical addresses, otherwise we'd
346 * need a much more complicated tracking system. Unfortunately that
347 * means we need to use one of the iommu domains to figure out the
348 * pfns to unpin. The rest need to be unmapped in advance so we have
349 * no iommu translations remaining when the pages are unpinned.
350 */
351 domain = d = list_first_entry(&iommu->domain_list,
352 struct vfio_domain, next);
353
354 list_for_each_entry_continue(d, &iommu->domain_list, next) {
355 iommu_unmap(d->domain, dma->iova, dma->size);
356 cond_resched();
357 }
358
359 while (iova < end) {
360 size_t unmapped, len;
361 phys_addr_t phys, next;
362
363 phys = iommu_iova_to_phys(domain->domain, iova);
364 if (WARN_ON(!phys)) {
365 iova += PAGE_SIZE;
366 continue;
367 }
368
369 /*
370 * To optimize for fewer iommu_unmap() calls, each of which
371 * may require hardware cache flushing, try to find the
372 * largest contiguous physical memory chunk to unmap.
373 */
374 for (len = PAGE_SIZE;
375 !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
376 next = iommu_iova_to_phys(domain->domain, iova + len);
377 if (next != phys + len)
378 break;
379 }
380
381 unmapped = iommu_unmap(domain->domain, iova, len);
382 if (WARN_ON(!unmapped))
383 break;
384
385 unlocked += vfio_unpin_pages(phys >> PAGE_SHIFT,
386 unmapped >> PAGE_SHIFT,
387 dma->prot, false);
388 iova += unmapped;
389
390 cond_resched();
391 }
392
393 vfio_lock_acct(-unlocked);
394 }
395
396 static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
397 {
398 vfio_unmap_unpin(iommu, dma);
399 vfio_unlink_dma(iommu, dma);
400 kfree(dma);
401 }
402
403 static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
404 {
405 struct vfio_domain *domain;
406 unsigned long bitmap = PAGE_MASK;
407
408 mutex_lock(&iommu->lock);
409 list_for_each_entry(domain, &iommu->domain_list, next)
410 bitmap &= domain->domain->ops->pgsize_bitmap;
411 mutex_unlock(&iommu->lock);
412
413 return bitmap;
414 }
415
416 static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
417 struct vfio_iommu_type1_dma_unmap *unmap)
418 {
419 uint64_t mask;
420 struct vfio_dma *dma;
421 size_t unmapped = 0;
422 int ret = 0;
423
424 mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
425
426 if (unmap->iova & mask)
427 return -EINVAL;
428 if (!unmap->size || unmap->size & mask)
429 return -EINVAL;
430
431 WARN_ON(mask & PAGE_MASK);
432
433 mutex_lock(&iommu->lock);
434
435 /*
436 * vfio-iommu-type1 (v1) - User mappings were coalesced together to
437 * avoid tracking individual mappings. This means that the granularity
438 * of the original mapping was lost and the user was allowed to attempt
439 * to unmap any range. Depending on the contiguousness of physical
440 * memory and page sizes supported by the IOMMU, arbitrary unmaps may
441 * or may not have worked. We only guaranteed unmap granularity
442 * matching the original mapping; even though it was untracked here,
443 * the original mappings are reflected in IOMMU mappings. This
444 * resulted in a couple unusual behaviors. First, if a range is not
445 * able to be unmapped, ex. a set of 4k pages that was mapped as a
446 * 2M hugepage into the IOMMU, the unmap ioctl returns success but with
447 * a zero sized unmap. Also, if an unmap request overlaps the first
448 * address of a hugepage, the IOMMU will unmap the entire hugepage.
449 * This also returns success and the returned unmap size reflects the
450 * actual size unmapped.
451 *
452 * We attempt to maintain compatibility with this "v1" interface, but
453 * we take control out of the hands of the IOMMU. Therefore, an unmap
454 * request offset from the beginning of the original mapping will
455 * return success with zero sized unmap. And an unmap request covering
456 * the first iova of mapping will unmap the entire range.
457 *
458 * The v2 version of this interface intends to be more deterministic.
459 * Unmap requests must fully cover previous mappings. Multiple
460 * mappings may still be unmaped by specifying large ranges, but there
461 * must not be any previous mappings bisected by the range. An error
462 * will be returned if these conditions are not met. The v2 interface
463 * will only return success and a size of zero if there were no
464 * mappings within the range.
465 */
466 if (iommu->v2) {
467 dma = vfio_find_dma(iommu, unmap->iova, 0);
468 if (dma && dma->iova != unmap->iova) {
469 ret = -EINVAL;
470 goto unlock;
471 }
472 dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0);
473 if (dma && dma->iova + dma->size != unmap->iova + unmap->size) {
474 ret = -EINVAL;
475 goto unlock;
476 }
477 }
478
479 while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) {
480 if (!iommu->v2 && unmap->iova > dma->iova)
481 break;
482 unmapped += dma->size;
483 vfio_remove_dma(iommu, dma);
484 }
485
486 unlock:
487 mutex_unlock(&iommu->lock);
488
489 /* Report how much was unmapped */
490 unmap->size = unmapped;
491
492 return ret;
493 }
494
495 /*
496 * Turns out AMD IOMMU has a page table bug where it won't map large pages
497 * to a region that previously mapped smaller pages. This should be fixed
498 * soon, so this is just a temporary workaround to break mappings down into
499 * PAGE_SIZE. Better to map smaller pages than nothing.
500 */
501 static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova,
502 unsigned long pfn, long npage, int prot)
503 {
504 long i;
505 int ret;
506
507 for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
508 ret = iommu_map(domain->domain, iova,
509 (phys_addr_t)pfn << PAGE_SHIFT,
510 PAGE_SIZE, prot | domain->prot);
511 if (ret)
512 break;
513 }
514
515 for (; i < npage && i > 0; i--, iova -= PAGE_SIZE)
516 iommu_unmap(domain->domain, iova, PAGE_SIZE);
517
518 return ret;
519 }
520
521 static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
522 unsigned long pfn, long npage, int prot)
523 {
524 struct vfio_domain *d;
525 int ret;
526
527 list_for_each_entry(d, &iommu->domain_list, next) {
528 ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
529 npage << PAGE_SHIFT, prot | d->prot);
530 if (ret) {
531 if (ret != -EBUSY ||
532 map_try_harder(d, iova, pfn, npage, prot))
533 goto unwind;
534 }
535
536 cond_resched();
537 }
538
539 return 0;
540
541 unwind:
542 list_for_each_entry_continue_reverse(d, &iommu->domain_list, next)
543 iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
544
545 return ret;
546 }
547
548 static int vfio_dma_do_map(struct vfio_iommu *iommu,
549 struct vfio_iommu_type1_dma_map *map)
550 {
551 dma_addr_t iova = map->iova;
552 unsigned long vaddr = map->vaddr;
553 size_t size = map->size;
554 long npage;
555 int ret = 0, prot = 0;
556 uint64_t mask;
557 struct vfio_dma *dma;
558 unsigned long pfn;
559
560 /* Verify that none of our __u64 fields overflow */
561 if (map->size != size || map->vaddr != vaddr || map->iova != iova)
562 return -EINVAL;
563
564 mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
565
566 WARN_ON(mask & PAGE_MASK);
567
568 /* READ/WRITE from device perspective */
569 if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
570 prot |= IOMMU_WRITE;
571 if (map->flags & VFIO_DMA_MAP_FLAG_READ)
572 prot |= IOMMU_READ;
573
574 if (!prot || !size || (size | iova | vaddr) & mask)
575 return -EINVAL;
576
577 /* Don't allow IOVA or virtual address wrap */
578 if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
579 return -EINVAL;
580
581 mutex_lock(&iommu->lock);
582
583 if (vfio_find_dma(iommu, iova, size)) {
584 mutex_unlock(&iommu->lock);
585 return -EEXIST;
586 }
587
588 dma = kzalloc(sizeof(*dma), GFP_KERNEL);
589 if (!dma) {
590 mutex_unlock(&iommu->lock);
591 return -ENOMEM;
592 }
593
594 dma->iova = iova;
595 dma->vaddr = vaddr;
596 dma->prot = prot;
597
598 /* Insert zero-sized and grow as we map chunks of it */
599 vfio_link_dma(iommu, dma);
600
601 while (size) {
602 /* Pin a contiguous chunk of memory */
603 npage = vfio_pin_pages(vaddr + dma->size,
604 size >> PAGE_SHIFT, prot, &pfn);
605 if (npage <= 0) {
606 WARN_ON(!npage);
607 ret = (int)npage;
608 break;
609 }
610
611 /* Map it! */
612 ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, prot);
613 if (ret) {
614 vfio_unpin_pages(pfn, npage, prot, true);
615 break;
616 }
617
618 size -= npage << PAGE_SHIFT;
619 dma->size += npage << PAGE_SHIFT;
620 }
621
622 if (ret)
623 vfio_remove_dma(iommu, dma);
624
625 mutex_unlock(&iommu->lock);
626 return ret;
627 }
628
629 static int vfio_bus_type(struct device *dev, void *data)
630 {
631 struct bus_type **bus = data;
632
633 if (*bus && *bus != dev->bus)
634 return -EINVAL;
635
636 *bus = dev->bus;
637
638 return 0;
639 }
640
641 static int vfio_iommu_replay(struct vfio_iommu *iommu,
642 struct vfio_domain *domain)
643 {
644 struct vfio_domain *d;
645 struct rb_node *n;
646 int ret;
647
648 /* Arbitrarily pick the first domain in the list for lookups */
649 d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
650 n = rb_first(&iommu->dma_list);
651
652 /* If there's not a domain, there better not be any mappings */
653 if (WARN_ON(n && !d))
654 return -EINVAL;
655
656 for (; n; n = rb_next(n)) {
657 struct vfio_dma *dma;
658 dma_addr_t iova;
659
660 dma = rb_entry(n, struct vfio_dma, node);
661 iova = dma->iova;
662
663 while (iova < dma->iova + dma->size) {
664 phys_addr_t phys = iommu_iova_to_phys(d->domain, iova);
665 size_t size;
666
667 if (WARN_ON(!phys)) {
668 iova += PAGE_SIZE;
669 continue;
670 }
671
672 size = PAGE_SIZE;
673
674 while (iova + size < dma->iova + dma->size &&
675 phys + size == iommu_iova_to_phys(d->domain,
676 iova + size))
677 size += PAGE_SIZE;
678
679 ret = iommu_map(domain->domain, iova, phys,
680 size, dma->prot | domain->prot);
681 if (ret)
682 return ret;
683
684 iova += size;
685 }
686 }
687
688 return 0;
689 }
690
691 /*
692 * We change our unmap behavior slightly depending on whether the IOMMU
693 * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage
694 * for practically any contiguous power-of-two mapping we give it. This means
695 * we don't need to look for contiguous chunks ourselves to make unmapping
696 * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d
697 * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
698 * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
699 * hugetlbfs is in use.
700 */
701 static void vfio_test_domain_fgsp(struct vfio_domain *domain)
702 {
703 struct page *pages;
704 int ret, order = get_order(PAGE_SIZE * 2);
705
706 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
707 if (!pages)
708 return;
709
710 ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
711 IOMMU_READ | IOMMU_WRITE | domain->prot);
712 if (!ret) {
713 size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
714
715 if (unmapped == PAGE_SIZE)
716 iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
717 else
718 domain->fgsp = true;
719 }
720
721 __free_pages(pages, order);
722 }
723
724 static int vfio_iommu_type1_attach_group(void *iommu_data,
725 struct iommu_group *iommu_group)
726 {
727 struct vfio_iommu *iommu = iommu_data;
728 struct vfio_group *group, *g;
729 struct vfio_domain *domain, *d;
730 struct bus_type *bus = NULL;
731 int ret;
732
733 mutex_lock(&iommu->lock);
734
735 list_for_each_entry(d, &iommu->domain_list, next) {
736 list_for_each_entry(g, &d->group_list, next) {
737 if (g->iommu_group != iommu_group)
738 continue;
739
740 mutex_unlock(&iommu->lock);
741 return -EINVAL;
742 }
743 }
744
745 group = kzalloc(sizeof(*group), GFP_KERNEL);
746 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
747 if (!group || !domain) {
748 ret = -ENOMEM;
749 goto out_free;
750 }
751
752 group->iommu_group = iommu_group;
753
754 /* Determine bus_type in order to allocate a domain */
755 ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
756 if (ret)
757 goto out_free;
758
759 domain->domain = iommu_domain_alloc(bus);
760 if (!domain->domain) {
761 ret = -EIO;
762 goto out_free;
763 }
764
765 if (iommu->nesting) {
766 int attr = 1;
767
768 ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING,
769 &attr);
770 if (ret)
771 goto out_domain;
772 }
773
774 ret = iommu_attach_group(domain->domain, iommu_group);
775 if (ret)
776 goto out_domain;
777
778 INIT_LIST_HEAD(&domain->group_list);
779 list_add(&group->next, &domain->group_list);
780
781 if (!allow_unsafe_interrupts &&
782 !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) {
783 pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
784 __func__);
785 ret = -EPERM;
786 goto out_detach;
787 }
788
789 if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
790 domain->prot |= IOMMU_CACHE;
791
792 /*
793 * Try to match an existing compatible domain. We don't want to
794 * preclude an IOMMU driver supporting multiple bus_types and being
795 * able to include different bus_types in the same IOMMU domain, so
796 * we test whether the domains use the same iommu_ops rather than
797 * testing if they're on the same bus_type.
798 */
799 list_for_each_entry(d, &iommu->domain_list, next) {
800 if (d->domain->ops == domain->domain->ops &&
801 d->prot == domain->prot) {
802 iommu_detach_group(domain->domain, iommu_group);
803 if (!iommu_attach_group(d->domain, iommu_group)) {
804 list_add(&group->next, &d->group_list);
805 iommu_domain_free(domain->domain);
806 kfree(domain);
807 mutex_unlock(&iommu->lock);
808 return 0;
809 }
810
811 ret = iommu_attach_group(domain->domain, iommu_group);
812 if (ret)
813 goto out_domain;
814 }
815 }
816
817 vfio_test_domain_fgsp(domain);
818
819 /* replay mappings on new domains */
820 ret = vfio_iommu_replay(iommu, domain);
821 if (ret)
822 goto out_detach;
823
824 list_add(&domain->next, &iommu->domain_list);
825
826 mutex_unlock(&iommu->lock);
827
828 return 0;
829
830 out_detach:
831 iommu_detach_group(domain->domain, iommu_group);
832 out_domain:
833 iommu_domain_free(domain->domain);
834 out_free:
835 kfree(domain);
836 kfree(group);
837 mutex_unlock(&iommu->lock);
838 return ret;
839 }
840
841 static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu)
842 {
843 struct rb_node *node;
844
845 while ((node = rb_first(&iommu->dma_list)))
846 vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node));
847 }
848
849 static void vfio_iommu_type1_detach_group(void *iommu_data,
850 struct iommu_group *iommu_group)
851 {
852 struct vfio_iommu *iommu = iommu_data;
853 struct vfio_domain *domain;
854 struct vfio_group *group;
855
856 mutex_lock(&iommu->lock);
857
858 list_for_each_entry(domain, &iommu->domain_list, next) {
859 list_for_each_entry(group, &domain->group_list, next) {
860 if (group->iommu_group != iommu_group)
861 continue;
862
863 iommu_detach_group(domain->domain, iommu_group);
864 list_del(&group->next);
865 kfree(group);
866 /*
867 * Group ownership provides privilege, if the group
868 * list is empty, the domain goes away. If it's the
869 * last domain, then all the mappings go away too.
870 */
871 if (list_empty(&domain->group_list)) {
872 if (list_is_singular(&iommu->domain_list))
873 vfio_iommu_unmap_unpin_all(iommu);
874 iommu_domain_free(domain->domain);
875 list_del(&domain->next);
876 kfree(domain);
877 }
878 goto done;
879 }
880 }
881
882 done:
883 mutex_unlock(&iommu->lock);
884 }
885
886 static void *vfio_iommu_type1_open(unsigned long arg)
887 {
888 struct vfio_iommu *iommu;
889
890 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
891 if (!iommu)
892 return ERR_PTR(-ENOMEM);
893
894 switch (arg) {
895 case VFIO_TYPE1_IOMMU:
896 break;
897 case VFIO_TYPE1_NESTING_IOMMU:
898 iommu->nesting = true;
899 case VFIO_TYPE1v2_IOMMU:
900 iommu->v2 = true;
901 break;
902 default:
903 kfree(iommu);
904 return ERR_PTR(-EINVAL);
905 }
906
907 INIT_LIST_HEAD(&iommu->domain_list);
908 iommu->dma_list = RB_ROOT;
909 mutex_init(&iommu->lock);
910
911 return iommu;
912 }
913
914 static void vfio_iommu_type1_release(void *iommu_data)
915 {
916 struct vfio_iommu *iommu = iommu_data;
917 struct vfio_domain *domain, *domain_tmp;
918 struct vfio_group *group, *group_tmp;
919
920 vfio_iommu_unmap_unpin_all(iommu);
921
922 list_for_each_entry_safe(domain, domain_tmp,
923 &iommu->domain_list, next) {
924 list_for_each_entry_safe(group, group_tmp,
925 &domain->group_list, next) {
926 iommu_detach_group(domain->domain, group->iommu_group);
927 list_del(&group->next);
928 kfree(group);
929 }
930 iommu_domain_free(domain->domain);
931 list_del(&domain->next);
932 kfree(domain);
933 }
934
935 kfree(iommu);
936 }
937
938 static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
939 {
940 struct vfio_domain *domain;
941 int ret = 1;
942
943 mutex_lock(&iommu->lock);
944 list_for_each_entry(domain, &iommu->domain_list, next) {
945 if (!(domain->prot & IOMMU_CACHE)) {
946 ret = 0;
947 break;
948 }
949 }
950 mutex_unlock(&iommu->lock);
951
952 return ret;
953 }
954
955 static long vfio_iommu_type1_ioctl(void *iommu_data,
956 unsigned int cmd, unsigned long arg)
957 {
958 struct vfio_iommu *iommu = iommu_data;
959 unsigned long minsz;
960
961 if (cmd == VFIO_CHECK_EXTENSION) {
962 switch (arg) {
963 case VFIO_TYPE1_IOMMU:
964 case VFIO_TYPE1v2_IOMMU:
965 case VFIO_TYPE1_NESTING_IOMMU:
966 return 1;
967 case VFIO_DMA_CC_IOMMU:
968 if (!iommu)
969 return 0;
970 return vfio_domains_have_iommu_cache(iommu);
971 default:
972 return 0;
973 }
974 } else if (cmd == VFIO_IOMMU_GET_INFO) {
975 struct vfio_iommu_type1_info info;
976
977 minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
978
979 if (copy_from_user(&info, (void __user *)arg, minsz))
980 return -EFAULT;
981
982 if (info.argsz < minsz)
983 return -EINVAL;
984
985 info.flags = 0;
986
987 info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
988
989 return copy_to_user((void __user *)arg, &info, minsz);
990
991 } else if (cmd == VFIO_IOMMU_MAP_DMA) {
992 struct vfio_iommu_type1_dma_map map;
993 uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
994 VFIO_DMA_MAP_FLAG_WRITE;
995
996 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
997
998 if (copy_from_user(&map, (void __user *)arg, minsz))
999 return -EFAULT;
1000
1001 if (map.argsz < minsz || map.flags & ~mask)
1002 return -EINVAL;
1003
1004 return vfio_dma_do_map(iommu, &map);
1005
1006 } else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
1007 struct vfio_iommu_type1_dma_unmap unmap;
1008 long ret;
1009
1010 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
1011
1012 if (copy_from_user(&unmap, (void __user *)arg, minsz))
1013 return -EFAULT;
1014
1015 if (unmap.argsz < minsz || unmap.flags)
1016 return -EINVAL;
1017
1018 ret = vfio_dma_do_unmap(iommu, &unmap);
1019 if (ret)
1020 return ret;
1021
1022 return copy_to_user((void __user *)arg, &unmap, minsz);
1023 }
1024
1025 return -ENOTTY;
1026 }
1027
1028 static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
1029 .name = "vfio-iommu-type1",
1030 .owner = THIS_MODULE,
1031 .open = vfio_iommu_type1_open,
1032 .release = vfio_iommu_type1_release,
1033 .ioctl = vfio_iommu_type1_ioctl,
1034 .attach_group = vfio_iommu_type1_attach_group,
1035 .detach_group = vfio_iommu_type1_detach_group,
1036 };
1037
1038 static int __init vfio_iommu_type1_init(void)
1039 {
1040 return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1);
1041 }
1042
1043 static void __exit vfio_iommu_type1_cleanup(void)
1044 {
1045 vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1);
1046 }
1047
1048 module_init(vfio_iommu_type1_init);
1049 module_exit(vfio_iommu_type1_cleanup);
1050
1051 MODULE_VERSION(DRIVER_VERSION);
1052 MODULE_LICENSE("GPL v2");
1053 MODULE_AUTHOR(DRIVER_AUTHOR);
1054 MODULE_DESCRIPTION(DRIVER_DESC);
This page took 0.062175 seconds and 5 git commands to generate.