/* global iommu list, set NULL for ignored DMAR units */
static struct intel_iommu **g_iommus;
+static int rwbf_quirk;
+
/*
* 0: Present
* 1-11: Reserved
}
/* devices under the same p2p bridge are owned in one domain */
-#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 < 0)
+#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
/* domain represents a virtual machine, more than one devices
* across iommus may be owned in one domain, e.g. kvm guest.
int flags; /* flags to find out type of domain */
int iommu_coherency;/* indicate coherency of iommu access */
+ int iommu_snooping; /* indicate snooping control feature*/
int iommu_count; /* reference count of iommu */
spinlock_t iommu_lock; /* protect iommu set in domain */
u64 max_addr; /* maximum mapped address */
static void domain_remove_dev_info(struct dmar_domain *domain);
-int dmar_disabled;
+#ifdef CONFIG_DMAR_DEFAULT_ON
+int dmar_disabled = 0;
+#else
+int dmar_disabled = 1;
+#endif /*CONFIG_DMAR_DEFAULT_ON*/
+
static int __initdata dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
+static struct iommu_ops intel_iommu_ops;
+
static int __init intel_iommu_setup(char *str)
{
if (!str)
return -EINVAL;
while (*str) {
- if (!strncmp(str, "off", 3)) {
+ if (!strncmp(str, "on", 2)) {
+ dmar_disabled = 0;
+ printk(KERN_INFO "Intel-IOMMU: enabled\n");
+ } else if (!strncmp(str, "off", 3)) {
dmar_disabled = 1;
- printk(KERN_INFO"Intel-IOMMU: disabled\n");
+ printk(KERN_INFO "Intel-IOMMU: disabled\n");
} else if (!strncmp(str, "igfx_off", 8)) {
dmar_map_gfx = 0;
printk(KERN_INFO
return g_iommus[iommu_id];
}
-/* "Coherency" capability may be different across iommus */
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
int i;
}
}
+static void domain_update_iommu_snooping(struct dmar_domain *domain)
+{
+ int i;
+
+ domain->iommu_snooping = 1;
+
+ i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
+ for (; i < g_num_of_iommus; ) {
+ if (!ecap_sc_support(g_iommus[i]->ecap)) {
+ domain->iommu_snooping = 0;
+ break;
+ }
+ i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
+ }
+}
+
+/* Some capabilities may be different across iommus */
+static void domain_update_iommu_cap(struct dmar_domain *domain)
+{
+ domain_update_iommu_coherency(domain);
+ domain_update_iommu_snooping(domain);
+}
+
static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
{
struct dmar_drhd_unit *drhd = NULL;
continue;
for (i = 0; i < drhd->devices_cnt; i++)
- if (drhd->devices[i]->bus->number == bus &&
+ if (drhd->devices[i] &&
+ drhd->devices[i]->bus->number == bus &&
drhd->devices[i]->devfn == devfn)
return drhd->iommu;
u32 val;
unsigned long flag;
- if (!cap_rwbf(iommu->cap))
+ if (!rwbf_quirk && !cap_rwbf(iommu->cap))
return;
val = iommu->gcmd | DMA_GCMD_WBF;
else
domain->iommu_coherency = 0;
+ if (ecap_sc_support(iommu->ecap))
+ domain->iommu_snooping = 1;
+ else
+ domain->iommu_snooping = 0;
+
domain->iommu_count = 1;
/* always allocate the top pgd */
spin_lock_irqsave(&domain->iommu_lock, flags);
if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
domain->iommu_count++;
- domain_update_iommu_coherency(domain);
+ domain_update_iommu_cap(domain);
}
spin_unlock_irqrestore(&domain->iommu_lock, flags);
return 0;
init_timer(&unmap_timer);
force_iommu = 1;
dma_ops = &intel_dma_ops;
+
+ register_iommu(&intel_iommu_ops);
+
return 0;
}
spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
clear_bit(iommu->seq_id, &domain->iommu_bmp);
domain->iommu_count--;
- domain_update_iommu_coherency(domain);
+ domain_update_iommu_cap(domain);
spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
}
iommu_detach_dev(iommu, info->bus, info->devfn);
/* clear this iommu in iommu_bmp, update iommu count
- * and coherency
+ * and capabilities
*/
spin_lock_irqsave(&domain->iommu_lock, flags2);
if (test_and_clear_bit(iommu->seq_id,
&domain->iommu_bmp)) {
domain->iommu_count--;
- domain_update_iommu_coherency(domain);
+ domain_update_iommu_cap(domain);
}
spin_unlock_irqrestore(&domain->iommu_lock, flags2);
vm_domain_exit(dmar_domain);
}
-int intel_iommu_attach_device(struct dmar_domain *domain,
- struct pci_dev *pdev)
+static int intel_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
{
+ struct dmar_domain *dmar_domain = domain->priv;
+ struct pci_dev *pdev = to_pci_dev(dev);
struct intel_iommu *iommu;
int addr_width;
u64 end;
old_domain = find_domain(pdev);
if (old_domain) {
- if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+ if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
vm_domain_remove_one_dev_info(old_domain, pdev);
else
domain_remove_dev_info(old_domain);
addr_width = agaw_to_width(iommu->agaw);
end = DOMAIN_MAX_ADDR(addr_width);
end = end & VTD_PAGE_MASK;
- if (end < domain->max_addr) {
+ if (end < dmar_domain->max_addr) {
printk(KERN_ERR "%s: iommu agaw (%d) is not "
"sufficient for the mapped address (%llx)\n",
- __func__, iommu->agaw, domain->max_addr);
+ __func__, iommu->agaw, dmar_domain->max_addr);
return -EFAULT;
}
- ret = domain_context_mapping(domain, pdev);
+ ret = domain_context_mapping(dmar_domain, pdev);
if (ret)
return ret;
- ret = vm_domain_add_dev_info(domain, pdev);
+ ret = vm_domain_add_dev_info(dmar_domain, pdev);
return ret;
}
-EXPORT_SYMBOL_GPL(intel_iommu_attach_device);
-void intel_iommu_detach_device(struct dmar_domain *domain,
- struct pci_dev *pdev)
+static void intel_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
{
- vm_domain_remove_one_dev_info(domain, pdev);
+ struct dmar_domain *dmar_domain = domain->priv;
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ vm_domain_remove_one_dev_info(dmar_domain, pdev);
}
-EXPORT_SYMBOL_GPL(intel_iommu_detach_device);
-int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova,
- u64 hpa, size_t size, int prot)
+static int intel_iommu_map_range(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t hpa,
+ size_t size, int iommu_prot)
{
+ struct dmar_domain *dmar_domain = domain->priv;
u64 max_addr;
int addr_width;
+ int prot = 0;
int ret;
+ if (iommu_prot & IOMMU_READ)
+ prot |= DMA_PTE_READ;
+ if (iommu_prot & IOMMU_WRITE)
+ prot |= DMA_PTE_WRITE;
+
max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
- if (domain->max_addr < max_addr) {
+ if (dmar_domain->max_addr < max_addr) {
int min_agaw;
u64 end;
/* check if minimum agaw is sufficient for mapped address */
- min_agaw = vm_domain_min_agaw(domain);
+ min_agaw = vm_domain_min_agaw(dmar_domain);
addr_width = agaw_to_width(min_agaw);
end = DOMAIN_MAX_ADDR(addr_width);
end = end & VTD_PAGE_MASK;
__func__, min_agaw, max_addr);
return -EFAULT;
}
- domain->max_addr = max_addr;
+ dmar_domain->max_addr = max_addr;
}
- ret = domain_page_mapping(domain, iova, hpa, size, prot);
+ ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
return ret;
}
-EXPORT_SYMBOL_GPL(intel_iommu_map_address);
-void intel_iommu_unmap_address(struct dmar_domain *domain,
- dma_addr_t iova, size_t size)
+static void intel_iommu_unmap_range(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
+ struct dmar_domain *dmar_domain = domain->priv;
dma_addr_t base;
/* The address might not be aligned */
base = iova & VTD_PAGE_MASK;
size = VTD_PAGE_ALIGN(size);
- dma_pte_clear_range(domain, base, base + size);
+ dma_pte_clear_range(dmar_domain, base, base + size);
- if (domain->max_addr == base + size)
- domain->max_addr = base;
+ if (dmar_domain->max_addr == base + size)
+ dmar_domain->max_addr = base;
}
-EXPORT_SYMBOL_GPL(intel_iommu_unmap_address);
-int intel_iommu_found(void)
-{
- return g_num_of_iommus;
-}
-EXPORT_SYMBOL_GPL(intel_iommu_found);
-
-u64 intel_iommu_iova_to_phys(struct dmar_domain *domain, u64 iova)
+static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
+ unsigned long iova)
{
+ struct dmar_domain *dmar_domain = domain->priv;
struct dma_pte *pte;
u64 phys = 0;
- pte = addr_to_dma_pte(domain, iova);
+ pte = addr_to_dma_pte(dmar_domain, iova);
if (pte)
phys = dma_pte_addr(pte);
return phys;
}
-EXPORT_SYMBOL_GPL(intel_iommu_iova_to_phys);
+
+static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
+ unsigned long cap)
+{
+ struct dmar_domain *dmar_domain = domain->priv;
+
+ if (cap == IOMMU_CAP_CACHE_COHERENCY)
+ return dmar_domain->iommu_snooping;
+
+ return 0;
+}
+
+static struct iommu_ops intel_iommu_ops = {
+ .domain_init = intel_iommu_domain_init,
+ .domain_destroy = intel_iommu_domain_destroy,
+ .attach_dev = intel_iommu_attach_device,
+ .detach_dev = intel_iommu_detach_device,
+ .map = intel_iommu_map_range,
+ .unmap = intel_iommu_unmap_range,
+ .iova_to_phys = intel_iommu_iova_to_phys,
+ .domain_has_cap = intel_iommu_domain_has_cap,
+};
+
+static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+{
+ /*
+ * Mobile 4 Series Chipset neglects to set RWBF capability,
+ * but needs it:
+ */
+ printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
+ rwbf_quirk = 1;
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);