tile: uninline dma_set_mask
authorChristoph Hellwig <hch@lst.de>
Wed, 20 Jan 2016 23:02:02 +0000 (15:02 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 21 Jan 2016 01:09:18 +0000 (17:09 -0800)
We'll soon merge <asm-generic/dma-mapping-common.h> into
<linux/dma-mapping.h> and the reference to dma_capable in the tile
dma_set_mask would create a circular dependency.

Fix this by moving the implementation out of line.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/tile/include/asm/dma-mapping.h
arch/tile/kernel/pci-dma.c

index 96ac6cce4a32c03ead94166ac1190b91ac5b032d..c342736e3f1f846ef66721dad05f810b93de8a9f 100644 (file)
@@ -76,34 +76,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 
 #include <asm-generic/dma-mapping-common.h>
 
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
-       struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
-       /*
-        * For PCI devices with 64-bit DMA addressing capability, promote
-        * the dma_ops to hybrid, with the consistent memory DMA space limited
-        * to 32-bit. For 32-bit capable devices, limit the streaming DMA
-        * address range to max_direct_dma_addr.
-        */
-       if (dma_ops == gx_pci_dma_map_ops ||
-           dma_ops == gx_hybrid_pci_dma_map_ops ||
-           dma_ops == gx_legacy_pci_dma_map_ops) {
-               if (mask == DMA_BIT_MASK(64) &&
-                   dma_ops == gx_legacy_pci_dma_map_ops)
-                       set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
-               else if (mask > dev->archdata.max_direct_dma_addr)
-                       mask = dev->archdata.max_direct_dma_addr;
-       }
-
-       if (!dev->dma_mask || !dma_supported(dev, mask))
-               return -EIO;
-
-       *dev->dma_mask = mask;
-
-       return 0;
-}
+int dma_set_mask(struct device *dev, u64 mask);
 
 /*
  * dma_alloc_noncoherent() is #defined to return coherent memory,
index 09b58703ac264a7218e2f4586abd7e1f4d59f82a..b6bc0547a4f6989b9e275b27c4287d7146dd3670 100644 (file)
@@ -583,6 +583,35 @@ struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
 EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
 EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
 
+int dma_set_mask(struct device *dev, u64 mask)
+{
+       struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+       /*
+        * For PCI devices with 64-bit DMA addressing capability, promote
+        * the dma_ops to hybrid, with the consistent memory DMA space limited
+        * to 32-bit. For 32-bit capable devices, limit the streaming DMA
+        * address range to max_direct_dma_addr.
+        */
+       if (dma_ops == gx_pci_dma_map_ops ||
+           dma_ops == gx_hybrid_pci_dma_map_ops ||
+           dma_ops == gx_legacy_pci_dma_map_ops) {
+               if (mask == DMA_BIT_MASK(64) &&
+                   dma_ops == gx_legacy_pci_dma_map_ops)
+                       set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
+               else if (mask > dev->archdata.max_direct_dma_addr)
+                       mask = dev->archdata.max_direct_dma_addr;
+       }
+
+       if (!dev->dma_mask || !dma_supported(dev, mask))
+               return -EIO;
+
+       *dev->dma_mask = mask;
+
+       return 0;
+}
+EXPORT_SYMBOL(dma_set_mask);
+
 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
 int dma_set_coherent_mask(struct device *dev, u64 mask)
 {
This page took 0.026707 seconds and 5 git commands to generate.