Check agaw is sufficient for mapped memory
[deliverable/linux.git] / drivers / pci / intel-iommu.c
index ffbe4c57372926ad3a885ce80ed06079ef622ff0..772fb22e1be0288e69f1b39f1884a5cb485d5317 100644 (file)
@@ -228,6 +228,9 @@ struct dmar_domain {
        int             flags;          /* flags to find out type of domain */
 
        int             iommu_coherency;/* indicate coherency of iommu access */
+       int             iommu_count;    /* reference count of iommu */
+       spinlock_t      iommu_lock;     /* protect iommu set in domain */
+       u64             max_addr;       /* maximum mapped address */
 };
 
 /* PCI domain-device relationship */
@@ -422,6 +425,34 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
        }
 }
 
+static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
+{
+       struct dmar_drhd_unit *drhd = NULL;
+       int i;
+
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+
+               for (i = 0; i < drhd->devices_cnt; i++)
+                       if (drhd->devices[i]->bus->number == bus &&
+                           drhd->devices[i]->devfn == devfn)
+                               return drhd->iommu;
+
+               if (drhd->include_all)
+                       return drhd->iommu;
+       }
+
+       return NULL;
+}
+
+static void domain_flush_cache(struct dmar_domain *domain,
+                              void *addr, int size)
+{
+       if (!domain->iommu_coherency)
+               clflush_cache_range(addr, size);
+}
+
 /* Gets context entry for a given bus and devfn */
 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
                u8 bus, u8 devfn)
@@ -562,7 +593,6 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
        int level = agaw_to_level(domain->agaw);
        int offset;
        unsigned long flags;
-       struct intel_iommu *iommu = domain_get_iommu(domain);
 
        BUG_ON(!domain->pgd);
 
@@ -586,8 +616,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
                                        flags);
                                return NULL;
                        }
-                       __iommu_flush_cache(iommu, tmp_page,
-                                       PAGE_SIZE);
+                       domain_flush_cache(domain, tmp_page, PAGE_SIZE);
                        dma_set_pte_addr(pte, virt_to_phys(tmp_page));
                        /*
                         * high level table always sets r/w, last level page
@@ -595,7 +624,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
                         */
                        dma_set_pte_readable(pte);
                        dma_set_pte_writable(pte);
-                       __iommu_flush_cache(iommu, pte, sizeof(*pte));
+                       domain_flush_cache(domain, pte, sizeof(*pte));
                }
                parent = phys_to_virt(dma_pte_addr(pte));
                level--;
@@ -632,14 +661,13 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
 static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
 {
        struct dma_pte *pte = NULL;
-       struct intel_iommu *iommu = domain_get_iommu(domain);
 
        /* get last level pte */
        pte = dma_addr_level_pte(domain, addr, 1);
 
        if (pte) {
                dma_clear_pte(pte);
-               __iommu_flush_cache(iommu, pte, sizeof(*pte));
+               domain_flush_cache(domain, pte, sizeof(*pte));
        }
 }
 
@@ -670,7 +698,6 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
        int total = agaw_to_level(domain->agaw);
        int level;
        u64 tmp;
-       struct intel_iommu *iommu = domain_get_iommu(domain);
 
        start &= (((u64)1) << addr_width) - 1;
        end &= (((u64)1) << addr_width) - 1;
@@ -688,8 +715,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
                                free_pgtable_page(
                                        phys_to_virt(dma_pte_addr(pte)));
                                dma_clear_pte(pte);
-                               __iommu_flush_cache(iommu,
-                                               pte, sizeof(*pte));
+                               domain_flush_cache(domain, pte, sizeof(*pte));
                        }
                        tmp += level_size(level);
                }
@@ -1191,17 +1217,28 @@ static int iommu_init_domains(struct intel_iommu *iommu)
 
 
 static void domain_exit(struct dmar_domain *domain);
+static void vm_domain_exit(struct dmar_domain *domain);
 
 void free_dmar_iommu(struct intel_iommu *iommu)
 {
        struct dmar_domain *domain;
        int i;
+       unsigned long flags;
 
        i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
        for (; i < cap_ndoms(iommu->cap); ) {
                domain = iommu->domains[i];
                clear_bit(i, iommu->domain_ids);
-               domain_exit(domain);
+
+               spin_lock_irqsave(&domain->iommu_lock, flags);
+               if (--domain->iommu_count == 0) {
+                       if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+                               vm_domain_exit(domain);
+                       else
+                               domain_exit(domain);
+               }
+               spin_unlock_irqrestore(&domain->iommu_lock, flags);
+
                i = find_next_bit(iommu->domain_ids,
                        cap_ndoms(iommu->cap), i+1);
        }
@@ -1351,6 +1388,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
 
        init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
        spin_lock_init(&domain->mapping_lock);
+       spin_lock_init(&domain->iommu_lock);
 
        domain_reserve_special_ranges(domain);
 
@@ -1377,6 +1415,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
        else
                domain->iommu_coherency = 0;
 
+       domain->iommu_count = 1;
+
        /* always allocate the top pgd */
        domain->pgd = (struct dma_pte *)alloc_pgtable_page();
        if (!domain->pgd)
@@ -1413,12 +1453,22 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                u8 bus, u8 devfn)
 {
        struct context_entry *context;
-       struct intel_iommu *iommu = domain_get_iommu(domain);
        unsigned long flags;
+       struct intel_iommu *iommu;
+       struct dma_pte *pgd;
+       unsigned long num;
+       unsigned long ndomains;
+       int id;
+       int agaw;
 
        pr_debug("Set context mapping for %02x:%02x.%d\n",
                bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
        BUG_ON(!domain->pgd);
+
+       iommu = device_to_iommu(bus, devfn);
+       if (!iommu)
+               return -ENODEV;
+
        context = device_to_context_entry(iommu, bus, devfn);
        if (!context)
                return -ENOMEM;
@@ -1428,13 +1478,57 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                return 0;
        }
 
-       context_set_domain_id(context, domain->id);
-       context_set_address_width(context, domain->agaw);
-       context_set_address_root(context, virt_to_phys(domain->pgd));
+       id = domain->id;
+       pgd = domain->pgd;
+
+       if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
+               int found = 0;
+
+               /* find an available domain id for this device in iommu */
+               ndomains = cap_ndoms(iommu->cap);
+               num = find_first_bit(iommu->domain_ids, ndomains);
+               for (; num < ndomains; ) {
+                       if (iommu->domains[num] == domain) {
+                               id = num;
+                               found = 1;
+                               break;
+                       }
+                       num = find_next_bit(iommu->domain_ids,
+                                           cap_ndoms(iommu->cap), num+1);
+               }
+
+               if (found == 0) {
+                       num = find_first_zero_bit(iommu->domain_ids, ndomains);
+                       if (num >= ndomains) {
+                               spin_unlock_irqrestore(&iommu->lock, flags);
+                               printk(KERN_ERR "IOMMU: no free domain ids\n");
+                               return -EFAULT;
+                       }
+
+                       set_bit(num, iommu->domain_ids);
+                       iommu->domains[num] = domain;
+                       id = num;
+               }
+
+               /* Skip top levels of page tables for
+                * iommu which has less agaw than default.
+                */
+               for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+                       pgd = phys_to_virt(dma_pte_addr(pgd));
+                       if (!dma_pte_present(pgd)) {
+                               spin_unlock_irqrestore(&iommu->lock, flags);
+                               return -ENOMEM;
+                       }
+               }
+       }
+
+       context_set_domain_id(context, id);
+       context_set_address_width(context, iommu->agaw);
+       context_set_address_root(context, virt_to_phys(pgd));
        context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
        context_set_fault_enable(context);
        context_set_present(context);
-       __iommu_flush_cache(iommu, context, sizeof(*context));
+       domain_flush_cache(domain, context, sizeof(*context));
 
        /* it's a non-present to present mapping */
        if (iommu->flush.flush_context(iommu, domain->id,
@@ -1445,6 +1539,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
 
        spin_unlock_irqrestore(&iommu->lock, flags);
+
+       spin_lock_irqsave(&domain->iommu_lock, flags);
+       if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
+               domain->iommu_count++;
+               domain_update_iommu_coherency(domain);
+       }
+       spin_unlock_irqrestore(&domain->iommu_lock, flags);
        return 0;
 }
 
@@ -1480,12 +1581,15 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
                        tmp->bus->number, tmp->devfn);
 }
 
-static int domain_context_mapped(struct dmar_domain *domain,
-       struct pci_dev *pdev)
+static int domain_context_mapped(struct pci_dev *pdev)
 {
        int ret;
        struct pci_dev *tmp, *parent;
-       struct intel_iommu *iommu = domain_get_iommu(domain);
+       struct intel_iommu *iommu;
+
+       iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+       if (!iommu)
+               return -ENODEV;
 
        ret = device_context_mapped(iommu,
                pdev->bus->number, pdev->devfn);
@@ -1520,7 +1624,6 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
        struct dma_pte *pte;
        int index;
        int addr_width = agaw_to_width(domain->agaw);
-       struct intel_iommu *iommu = domain_get_iommu(domain);
 
        hpa &= (((u64)1) << addr_width) - 1;
 
@@ -1540,16 +1643,17 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
                BUG_ON(dma_pte_addr(pte));
                dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
                dma_set_pte_prot(pte, prot);
-               __iommu_flush_cache(iommu, pte, sizeof(*pte));
+               domain_flush_cache(domain, pte, sizeof(*pte));
                start_pfn++;
                index++;
        }
        return 0;
 }
 
-static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
+static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
 {
-       struct intel_iommu *iommu = domain_get_iommu(domain);
+       if (!iommu)
+               return;
 
        clear_context_table(iommu, bus, devfn);
        iommu->flush.flush_context(iommu, 0, 0, 0,
@@ -1562,6 +1666,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
 {
        struct device_domain_info *info;
        unsigned long flags;
+       struct intel_iommu *iommu;
 
        spin_lock_irqsave(&device_domain_lock, flags);
        while (!list_empty(&domain->devices)) {
@@ -1573,7 +1678,8 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
                        info->dev->dev.archdata.iommu = NULL;
                spin_unlock_irqrestore(&device_domain_lock, flags);
 
-               detach_domain_for_dev(info->domain, info->bus, info->devfn);
+               iommu = device_to_iommu(info->bus, info->devfn);
+               iommu_detach_dev(iommu, info->bus, info->devfn);
                free_devinfo_mem(info);
 
                spin_lock_irqsave(&device_domain_lock, flags);
@@ -2087,7 +2193,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
        }
 
        /* make sure context mapping is ok */
-       if (unlikely(!domain_context_mapped(domain, pdev))) {
+       if (unlikely(!domain_context_mapped(pdev))) {
                ret = domain_context_mapping(domain, pdev);
                if (ret) {
                        printk(KERN_ERR
@@ -2625,7 +2731,214 @@ int __init intel_iommu_init(void)
        return 0;
 }
 
-void intel_iommu_domain_exit(struct dmar_domain *domain)
+static int vm_domain_add_dev_info(struct dmar_domain *domain,
+                                 struct pci_dev *pdev)
+{
+       struct device_domain_info *info;
+       unsigned long flags;
+
+       info = alloc_devinfo_mem();
+       if (!info)
+               return -ENOMEM;
+
+       info->bus = pdev->bus->number;
+       info->devfn = pdev->devfn;
+       info->dev = pdev;
+       info->domain = domain;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       list_add(&info->link, &domain->devices);
+       list_add(&info->global, &device_domain_list);
+       pdev->dev.archdata.iommu = info;
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+       return 0;
+}
+
+static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
+                                         struct pci_dev *pdev)
+{
+       struct device_domain_info *info;
+       struct intel_iommu *iommu;
+       unsigned long flags;
+       int found = 0;
+       struct list_head *entry, *tmp;
+
+       iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+       if (!iommu)
+               return;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       list_for_each_safe(entry, tmp, &domain->devices) {
+               info = list_entry(entry, struct device_domain_info, link);
+               if (info->bus == pdev->bus->number &&
+                   info->devfn == pdev->devfn) {
+                       list_del(&info->link);
+                       list_del(&info->global);
+                       if (info->dev)
+                               info->dev->dev.archdata.iommu = NULL;
+                       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+                       iommu_detach_dev(iommu, info->bus, info->devfn);
+                       free_devinfo_mem(info);
+
+                       spin_lock_irqsave(&device_domain_lock, flags);
+
+                       if (found)
+                               break;
+                       else
+                               continue;
+               }
+
+               /* if there is no other devices under the same iommu
+                * owned by this domain, clear this iommu in iommu_bmp
+                * update iommu count and coherency
+                */
+               if (device_to_iommu(info->bus, info->devfn) == iommu)
+                       found = 1;
+       }
+
+       if (found == 0) {
+               unsigned long tmp_flags;
+               spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
+               clear_bit(iommu->seq_id, &domain->iommu_bmp);
+               domain->iommu_count--;
+               domain_update_iommu_coherency(domain);
+               spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
+       }
+
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
+{
+       struct device_domain_info *info;
+       struct intel_iommu *iommu;
+       unsigned long flags1, flags2;
+
+       spin_lock_irqsave(&device_domain_lock, flags1);
+       while (!list_empty(&domain->devices)) {
+               info = list_entry(domain->devices.next,
+                       struct device_domain_info, link);
+               list_del(&info->link);
+               list_del(&info->global);
+               if (info->dev)
+                       info->dev->dev.archdata.iommu = NULL;
+
+               spin_unlock_irqrestore(&device_domain_lock, flags1);
+
+               iommu = device_to_iommu(info->bus, info->devfn);
+               iommu_detach_dev(iommu, info->bus, info->devfn);
+
+               /* clear this iommu in iommu_bmp, update iommu count
+                * and coherency
+                */
+               spin_lock_irqsave(&domain->iommu_lock, flags2);
+               if (test_and_clear_bit(iommu->seq_id,
+                                      &domain->iommu_bmp)) {
+                       domain->iommu_count--;
+                       domain_update_iommu_coherency(domain);
+               }
+               spin_unlock_irqrestore(&domain->iommu_lock, flags2);
+
+               free_devinfo_mem(info);
+               spin_lock_irqsave(&device_domain_lock, flags1);
+       }
+       spin_unlock_irqrestore(&device_domain_lock, flags1);
+}
+
+/* domain id for virtual machine, it won't be set in context */
+static unsigned long vm_domid;
+
+static int vm_domain_min_agaw(struct dmar_domain *domain)
+{
+       int i;
+       int min_agaw = domain->agaw;
+
+       i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
+       for (; i < g_num_of_iommus; ) {
+               if (min_agaw > g_iommus[i]->agaw)
+                       min_agaw = g_iommus[i]->agaw;
+
+               i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
+       }
+
+       return min_agaw;
+}
+
+static struct dmar_domain *iommu_alloc_vm_domain(void)
+{
+       struct dmar_domain *domain;
+
+       domain = alloc_domain_mem();
+       if (!domain)
+               return NULL;
+
+       domain->id = vm_domid++;
+       memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
+       domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
+
+       return domain;
+}
+
+static int vm_domain_init(struct dmar_domain *domain, int guest_width)
+{
+       int adjust_width;
+
+       init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
+       spin_lock_init(&domain->mapping_lock);
+       spin_lock_init(&domain->iommu_lock);
+
+       domain_reserve_special_ranges(domain);
+
+       /* calculate AGAW */
+       domain->gaw = guest_width;
+       adjust_width = guestwidth_to_adjustwidth(guest_width);
+       domain->agaw = width_to_agaw(adjust_width);
+
+       INIT_LIST_HEAD(&domain->devices);
+
+       domain->iommu_count = 0;
+       domain->iommu_coherency = 0;
+       domain->max_addr = 0;
+
+       /* always allocate the top pgd */
+       domain->pgd = (struct dma_pte *)alloc_pgtable_page();
+       if (!domain->pgd)
+               return -ENOMEM;
+       domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
+       return 0;
+}
+
+static void iommu_free_vm_domain(struct dmar_domain *domain)
+{
+       unsigned long flags;
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       unsigned long i;
+       unsigned long ndomains;
+
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+               iommu = drhd->iommu;
+
+               ndomains = cap_ndoms(iommu->cap);
+               i = find_first_bit(iommu->domain_ids, ndomains);
+               for (; i < ndomains; ) {
+                       if (iommu->domains[i] == domain) {
+                               spin_lock_irqsave(&iommu->lock, flags);
+                               clear_bit(i, iommu->domain_ids);
+                               iommu->domains[i] = NULL;
+                               spin_unlock_irqrestore(&iommu->lock, flags);
+                               break;
+                       }
+                       i = find_next_bit(iommu->domain_ids, ndomains, i+1);
+               }
+       }
+}
+
+static void vm_domain_exit(struct dmar_domain *domain)
 {
        u64 end;
 
@@ -2633,6 +2946,9 @@ void intel_iommu_domain_exit(struct dmar_domain *domain)
        if (!domain)
                return;
 
+       vm_domain_remove_all_dev_info(domain);
+       /* destroy iovas */
+       put_iova_domain(&domain->iovad);
        end = DOMAIN_MAX_ADDR(domain->gaw);
        end = end & (~VTD_PAGE_MASK);
 
@@ -2642,76 +2958,134 @@ void intel_iommu_domain_exit(struct dmar_domain *domain)
        /* free page tables */
        dma_pte_free_pagetable(domain, 0, end);
 
-       iommu_free_domain(domain);
+       iommu_free_vm_domain(domain);
        free_domain_mem(domain);
 }
-EXPORT_SYMBOL_GPL(intel_iommu_domain_exit);
 
-struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev)
+struct dmar_domain *intel_iommu_alloc_domain(void)
 {
-       struct dmar_drhd_unit *drhd;
        struct dmar_domain *domain;
-       struct intel_iommu *iommu;
 
-       drhd = dmar_find_matched_drhd_unit(pdev);
-       if (!drhd) {
-               printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n");
-               return NULL;
-       }
-
-       iommu = drhd->iommu;
-       if (!iommu) {
-               printk(KERN_ERR
-                       "intel_iommu_domain_alloc: iommu == NULL\n");
-               return NULL;
-       }
-       domain = iommu_alloc_domain(iommu);
+       domain = iommu_alloc_vm_domain();
        if (!domain) {
                printk(KERN_ERR
                        "intel_iommu_domain_alloc: domain == NULL\n");
                return NULL;
        }
-       if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+       if (vm_domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
                printk(KERN_ERR
                        "intel_iommu_domain_alloc: domain_init() failed\n");
-               intel_iommu_domain_exit(domain);
+               vm_domain_exit(domain);
                return NULL;
        }
+
        return domain;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc);
+EXPORT_SYMBOL_GPL(intel_iommu_alloc_domain);
 
-int intel_iommu_context_mapping(
-       struct dmar_domain *domain, struct pci_dev *pdev)
+void intel_iommu_free_domain(struct dmar_domain *domain)
 {
-       int rc;
-       rc = domain_context_mapping(domain, pdev);
-       return rc;
+       vm_domain_exit(domain);
 }
-EXPORT_SYMBOL_GPL(intel_iommu_context_mapping);
+EXPORT_SYMBOL_GPL(intel_iommu_free_domain);
 
-int intel_iommu_page_mapping(
-       struct dmar_domain *domain, dma_addr_t iova,
-       u64 hpa, size_t size, int prot)
+int intel_iommu_attach_device(struct dmar_domain *domain,
+                             struct pci_dev *pdev)
 {
-       int rc;
-       rc = domain_page_mapping(domain, iova, hpa, size, prot);
-       return rc;
+       struct intel_iommu *iommu;
+       int addr_width;
+       u64 end;
+       int ret;
+
+       /* normally pdev is not mapped */
+       if (unlikely(domain_context_mapped(pdev))) {
+               struct dmar_domain *old_domain;
+
+               old_domain = find_domain(pdev);
+               if (old_domain) {
+                       if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+                               vm_domain_remove_one_dev_info(old_domain, pdev);
+                       else
+                               domain_remove_dev_info(old_domain);
+               }
+       }
+
+       iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+       if (!iommu)
+               return -ENODEV;
+
+       /* check if this iommu agaw is sufficient for max mapped address */
+       addr_width = agaw_to_width(iommu->agaw);
+       end = DOMAIN_MAX_ADDR(addr_width);
+       end = end & VTD_PAGE_MASK;
+       if (end < domain->max_addr) {
+               printk(KERN_ERR "%s: iommu agaw (%d) is not "
+                      "sufficient for the mapped address (%llx)\n",
+                      __func__, iommu->agaw, domain->max_addr);
+               return -EFAULT;
+       }
+
+       ret = domain_context_mapping(domain, pdev);
+       if (ret)
+               return ret;
+
+       ret = vm_domain_add_dev_info(domain, pdev);
+       return ret;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_page_mapping);
+EXPORT_SYMBOL_GPL(intel_iommu_attach_device);
 
-void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
+void intel_iommu_detach_device(struct dmar_domain *domain,
+                              struct pci_dev *pdev)
 {
-       detach_domain_for_dev(domain, bus, devfn);
+       vm_domain_remove_one_dev_info(domain, pdev);
 }
-EXPORT_SYMBOL_GPL(intel_iommu_detach_dev);
+EXPORT_SYMBOL_GPL(intel_iommu_detach_device);
 
-struct dmar_domain *
-intel_iommu_find_domain(struct pci_dev *pdev)
+int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova,
+                           u64 hpa, size_t size, int prot)
 {
-       return find_domain(pdev);
+       u64 max_addr;
+       int addr_width;
+       int ret;
+
+       max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
+       if (domain->max_addr < max_addr) {
+               int min_agaw;
+               u64 end;
+
+               /* check if minimum agaw is sufficient for mapped address */
+               min_agaw = vm_domain_min_agaw(domain);
+               addr_width = agaw_to_width(min_agaw);
+               end = DOMAIN_MAX_ADDR(addr_width);
+               end = end & VTD_PAGE_MASK;
+               if (end < max_addr) {
+                       printk(KERN_ERR "%s: iommu agaw (%d) is not "
+                              "sufficient for the mapped address (%llx)\n",
+                              __func__, min_agaw, max_addr);
+                       return -EFAULT;
+               }
+               domain->max_addr = max_addr;
+       }
+
+       ret = domain_page_mapping(domain, iova, hpa, size, prot);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(intel_iommu_map_address);
+
+void intel_iommu_unmap_address(struct dmar_domain *domain,
+                              dma_addr_t iova, size_t size)
+{
+       dma_addr_t base;
+
+       /* The address might not be aligned */
+       base = iova & VTD_PAGE_MASK;
+       size = VTD_PAGE_ALIGN(size);
+       dma_pte_clear_range(domain, base, base + size);
+
+       if (domain->max_addr == base + size)
+               domain->max_addr = base;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_find_domain);
+EXPORT_SYMBOL_GPL(intel_iommu_unmap_address);
 
 int intel_iommu_found(void)
 {
@@ -2719,17 +3093,15 @@ int intel_iommu_found(void)
 }
 EXPORT_SYMBOL_GPL(intel_iommu_found);
 
-u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
+u64 intel_iommu_iova_to_phys(struct dmar_domain *domain, u64 iova)
 {
        struct dma_pte *pte;
-       u64 pfn;
+       u64 phys = 0;
 
-       pfn = 0;
        pte = addr_to_dma_pte(domain, iova);
-
        if (pte)
-               pfn = dma_pte_addr(pte);
+               phys = dma_pte_addr(pte);
 
-       return pfn >> VTD_PAGE_SHIFT;
+       return phys;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);
+EXPORT_SYMBOL_GPL(intel_iommu_iova_to_phys);
This page took 0.033681 seconds and 5 git commands to generate.