metag: convert to dma_map_ops
[deliverable/linux.git] / arch / metag / kernel / dma.c
index c700d625067a96cb725064e0c19ceadad078cd6a..e12368d02155ac5ede14d9ce982134de486a4551 100644 (file)
@@ -171,8 +171,8 @@ out:
  * Allocate DMA-coherent memory space and return both the kernel remapped
  * virtual and bus address for that space.
  */
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *handle, gfp_t gfp)
+static void *metag_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
 {
        struct page *page;
        struct metag_vm_region *c;
@@ -263,13 +263,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
 no_page:
        return NULL;
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
 /*
  * free a page as defined by the above mapping.
  */
-void dma_free_coherent(struct device *dev, size_t size,
-                      void *vaddr, dma_addr_t dma_handle)
+static void metag_dma_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
        struct metag_vm_region *c;
        unsigned long flags, addr;
@@ -329,16 +328,19 @@ no_area:
               __func__, vaddr);
        dump_stack();
 }
-EXPORT_SYMBOL(dma_free_coherent);
 
-
-static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
-                   void *cpu_addr, dma_addr_t dma_addr, size_t size)
+static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+               void *cpu_addr, dma_addr_t dma_addr, size_t size,
+               struct dma_attrs *attrs)
 {
-       int ret = -ENXIO;
-
        unsigned long flags, user_size, kern_size;
        struct metag_vm_region *c;
+       int ret = -ENXIO;
+
+       if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       else
+               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 
@@ -364,25 +366,6 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
        return ret;
 }
 
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
-                     void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_coherent);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
-                         void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-       return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_writecombine);
-
-
-
-
 /*
  * Initialise the consistent memory allocation.
  */
@@ -423,7 +406,7 @@ early_initcall(dma_alloc_init);
 /*
  * make an area consistent to devices.
  */
-void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
+static void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
 {
        /*
         * Ensure any writes get through the write combiner. This is necessary
@@ -465,12 +448,11 @@ void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
 
        wmb();
 }
-EXPORT_SYMBOL(dma_sync_for_device);
 
 /*
  * make an area consistent to the core.
  */
-void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
+static void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
 {
        /*
         * Hardware L2 cache prefetch doesn't occur across 4K physical
@@ -497,4 +479,100 @@ void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
 
        rmb();
 }
-EXPORT_SYMBOL(dma_sync_for_cpu);
+
+static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size,
+               enum dma_data_direction direction, struct dma_attrs *attrs)
+{
+       dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
+                           direction);
+       return page_to_phys(page) + offset;
+}
+
+static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+               size_t size, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
+{
+       dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
+}
+
+static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+               int nents, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sglist, sg, nents, i) {
+               BUG_ON(!sg_page(sg));
+
+               sg->dma_address = sg_phys(sg);
+               dma_sync_for_device(sg_virt(sg), sg->length, direction);
+       }
+
+       return nents;
+}
+
+
+static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+               int nhwentries, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sglist, sg, nhwentries, i) {
+               BUG_ON(!sg_page(sg));
+
+               sg->dma_address = sg_phys(sg);
+               dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+       }
+}
+
+static void metag_dma_sync_single_for_cpu(struct device *dev,
+               dma_addr_t dma_handle, size_t size,
+               enum dma_data_direction direction)
+{
+       dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
+}
+
+static void metag_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t dma_handle, size_t size,
+               enum dma_data_direction direction)
+{
+       dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
+}
+
+static void metag_dma_sync_sg_for_cpu(struct device *dev,
+               struct scatterlist *sglist, int nelems,
+               enum dma_data_direction direction)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sglist, sg, nelems, i)
+               dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+}
+
+static void metag_dma_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sglist, int nelems,
+               enum dma_data_direction direction)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sglist, sg, nelems, i)
+               dma_sync_for_device(sg_virt(sg), sg->length, direction);
+}
+
+struct dma_map_ops metag_dma_ops = {
+       .alloc                  = metag_dma_alloc,
+       .free                   = metag_dma_free,
+       .map_page               = metag_dma_map_page,
+       .map_sg                 = metag_dma_map_sg,
+       .sync_single_for_device = metag_dma_sync_single_for_device,
+       .sync_single_for_cpu    = metag_dma_sync_single_for_cpu,
+       .sync_sg_for_cpu        = metag_dma_sync_sg_for_cpu,
+       .mmap                   = metag_dma_mmap,
+};
+EXPORT_SYMBOL(metag_dma_ops);
This page took 0.026261 seconds and 5 git commands to generate.