intel-iommu: fix return value of iommu_unmap() API
[deliverable/linux.git] / drivers / iommu / intel-iommu.c
index c621c98c99da00886931b3aed1b8364c7de6b958..237ef52c8c7684e15077568b25f3d7637b607f2d 100644 (file)
@@ -404,6 +404,9 @@ static int dmar_forcedac;
 static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
 
+int intel_iommu_gfx_mapped;
+EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
+
 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
 static DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
@@ -816,13 +819,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
 }
 
 /* clear last level pte, a tlb flush should be followed */
-static void dma_pte_clear_range(struct dmar_domain *domain,
+static int dma_pte_clear_range(struct dmar_domain *domain,
                                unsigned long start_pfn,
                                unsigned long last_pfn)
 {
        int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
        unsigned int large_page = 1;
        struct dma_pte *first_pte, *pte;
+       int order;
 
        BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
        BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -846,6 +850,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
                                   (void *)pte - (void *)first_pte);
 
        } while (start_pfn && start_pfn <= last_pfn);
+
+       order = (large_page - 1) * 9;
+       return order;
 }
 
 /* free page table pages. last level pte should already be cleared */
@@ -3226,9 +3233,6 @@ static void __init init_no_remapping_devices(void)
                }
        }
 
-       if (dmar_map_gfx)
-               return;
-
        for_each_drhd_unit(drhd) {
                int i;
                if (drhd->ignored || drhd->include_all)
@@ -3236,18 +3240,23 @@ static void __init init_no_remapping_devices(void)
 
                for (i = 0; i < drhd->devices_cnt; i++)
                        if (drhd->devices[i] &&
-                               !IS_GFX_DEVICE(drhd->devices[i]))
+                           !IS_GFX_DEVICE(drhd->devices[i]))
                                break;
 
                if (i < drhd->devices_cnt)
                        continue;
 
-               /* bypass IOMMU if it is just for gfx devices */
-               drhd->ignored = 1;
-               for (i = 0; i < drhd->devices_cnt; i++) {
-                       if (!drhd->devices[i])
-                               continue;
-                       drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+               /* This IOMMU has *only* gfx devices. Either bypass it or
+                  set the gfx_mapped flag, as appropriate */
+               if (dmar_map_gfx) {
+                       intel_iommu_gfx_mapped = 1;
+               } else {
+                       drhd->ignored = 1;
+                       for (i = 0; i < drhd->devices_cnt; i++) {
+                               if (!drhd->devices[i])
+                                       continue;
+                               drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+                       }
                }
        }
 }
@@ -3568,6 +3577,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
                        found = 1;
        }
 
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+
        if (found == 0) {
                unsigned long tmp_flags;
                spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
@@ -3584,8 +3595,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
                        spin_unlock_irqrestore(&iommu->lock, tmp_flags);
                }
        }
-
-       spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
@@ -3864,14 +3873,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
 {
        struct dmar_domain *dmar_domain = domain->priv;
        size_t size = PAGE_SIZE << gfp_order;
+       int order;
 
-       dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
+       order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
                            (iova + size - 1) >> VTD_PAGE_SHIFT);
 
        if (dmar_domain->max_addr == iova + size)
                dmar_domain->max_addr = iova;
 
-       return gfp_order;
+       return order;
 }
 
 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3950,7 +3960,11 @@ static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
        if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
                printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
                dmar_map_gfx = 0;
-       }
+       } else if (dmar_map_gfx) {
+               /* we have to ensure the gfx device is idle before we flush */
+               printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
+               intel_iommu_strict = 1;
+       }
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
This page took 0.026957 seconds and 5 git commands to generate.