x86/amd-iommu: Make alloc_new_range aware of multiple IOMMUs
[deliverable/linux.git] / arch / x86 / kernel / amd_iommu.c
index c5102ebdcbd99f0d75d804ae83a6d0bb2ca90566..687f617b95d7684b04abfca545a63a05cea877c4 100644 (file)
@@ -788,11 +788,11 @@ static u64 *fetch_pte(struct protection_domain *domain,
  * aperture in case of dma_ops domain allocation or address allocation
  * failure.
  */
-static int alloc_new_range(struct amd_iommu *iommu,
-                          struct dma_ops_domain *dma_dom,
+static int alloc_new_range(struct dma_ops_domain *dma_dom,
                           bool populate, gfp_t gfp)
 {
        int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
+       struct amd_iommu *iommu;
        int i;
 
 #ifdef CONFIG_IOMMU_STRESS
@@ -832,14 +832,17 @@ static int alloc_new_range(struct amd_iommu *iommu,
        dma_dom->aperture_size += APERTURE_RANGE_SIZE;
 
        /* Intialize the exclusion range if necessary */
-       if (iommu->exclusion_start &&
-           iommu->exclusion_start >= dma_dom->aperture[index]->offset &&
-           iommu->exclusion_start < dma_dom->aperture_size) {
-               unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
-               int pages = iommu_num_pages(iommu->exclusion_start,
-                                           iommu->exclusion_length,
-                                           PAGE_SIZE);
-               dma_ops_reserve_addresses(dma_dom, startpage, pages);
+       for_each_iommu(iommu) {
+               if (iommu->exclusion_start &&
+                   iommu->exclusion_start >= dma_dom->aperture[index]->offset
+                   && iommu->exclusion_start < dma_dom->aperture_size) {
+                       unsigned long startpage;
+                       int pages = iommu_num_pages(iommu->exclusion_start,
+                                                   iommu->exclusion_length,
+                                                   PAGE_SIZE);
+                       startpage = iommu->exclusion_start >> PAGE_SHIFT;
+                       dma_ops_reserve_addresses(dma_dom, startpage, pages);
+               }
        }
 
        /*
@@ -1143,7 +1146,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
 
        add_domain_to_list(&dma_dom->domain);
 
-       if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL))
+       if (alloc_new_range(dma_dom, true, GFP_KERNEL))
                goto free_dma_dom;
 
        /*
@@ -1585,8 +1588,7 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
  * This is the generic map function. It maps one 4kb page at paddr to
  * the given address in the DMA address space for the domain.
  */
-static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
-                                    struct dma_ops_domain *dom,
+static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
                                     unsigned long address,
                                     phys_addr_t paddr,
                                     int direction)
@@ -1620,8 +1622,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
 /*
  * The generic unmapping function for on page in the DMA address space.
  */
-static void dma_ops_domain_unmap(struct amd_iommu *iommu,
-                                struct dma_ops_domain *dom,
+static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
                                 unsigned long address)
 {
        struct aperture_range *aperture;
@@ -1688,7 +1689,7 @@ retry:
                 */
                dma_dom->next_address = dma_dom->aperture_size;
 
-               if (alloc_new_range(iommu, dma_dom, false, GFP_ATOMIC))
+               if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
                        goto out;
 
                /*
@@ -1700,7 +1701,7 @@ retry:
 
        start = address;
        for (i = 0; i < pages; ++i) {
-               ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
+               ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
                if (ret == DMA_ERROR_CODE)
                        goto out_unmap;
 
@@ -1724,7 +1725,7 @@ out_unmap:
 
        for (--i; i >= 0; --i) {
                start -= PAGE_SIZE;
-               dma_ops_domain_unmap(iommu, dma_dom, start);
+               dma_ops_domain_unmap(dma_dom, start);
        }
 
        dma_ops_free_addresses(dma_dom, address, pages);
@@ -1754,7 +1755,7 @@ static void __unmap_single(struct amd_iommu *iommu,
        start = dma_addr;
 
        for (i = 0; i < pages; ++i) {
-               dma_ops_domain_unmap(iommu, dma_dom, start);
+               dma_ops_domain_unmap(dma_dom, start);
                start += PAGE_SIZE;
        }
 
This page took 0.034805 seconds and 5 git commands to generate.