Linux 3.18
[deliverable/linux.git] / arch / microblaze / kernel / dma.c
CommitLineData
ccfe27d7
MS
1/*
2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses.
7 */
8
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
5a0e3ad6 11#include <linux/gfp.h>
ccfe27d7 12#include <linux/dma-debug.h>
66421a64 13#include <linux/export.h>
6bd55f0b 14#include <linux/bug.h>
ccfe27d7 15
1be53e08
MS
16#define NOT_COHERENT_CACHE
17
18static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
988624ec
AP
19 dma_addr_t *dma_handle, gfp_t flag,
20 struct dma_attrs *attrs)
ccfe27d7 21{
1be53e08
MS
22#ifdef NOT_COHERENT_CACHE
23 return consistent_alloc(flag, size, dma_handle);
24#else
ccfe27d7
MS
25 void *ret;
26 struct page *page;
27 int node = dev_to_node(dev);
28
29 /* ignore region specifiers */
30 flag &= ~(__GFP_HIGHMEM);
31
32 page = alloc_pages_node(node, flag, get_order(size));
33 if (page == NULL)
34 return NULL;
35 ret = page_address(page);
36 memset(ret, 0, size);
193bca59 37 *dma_handle = virt_to_phys(ret);
ccfe27d7
MS
38
39 return ret;
1be53e08 40#endif
ccfe27d7
MS
41}
42
1be53e08 43static void dma_direct_free_coherent(struct device *dev, size_t size,
988624ec
AP
44 void *vaddr, dma_addr_t dma_handle,
45 struct dma_attrs *attrs)
ccfe27d7 46{
1be53e08 47#ifdef NOT_COHERENT_CACHE
f1525765 48 consistent_free(size, vaddr);
1be53e08 49#else
ccfe27d7 50 free_pages((unsigned long)vaddr, get_order(size));
1be53e08 51#endif
ccfe27d7
MS
52}
53
54static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
55 int nents, enum dma_data_direction direction,
56 struct dma_attrs *attrs)
57{
58 struct scatterlist *sg;
59 int i;
60
d79f3b06 61 /* FIXME this part of code is untested */
ccfe27d7 62 for_each_sg(sgl, sg, nents, i) {
193bca59 63 sg->dma_address = sg_phys(sg);
cf560c18 64 __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
d79f3b06 65 sg->length, direction);
ccfe27d7
MS
66 }
67
68 return nents;
69}
70
ccfe27d7
MS
71static int dma_direct_dma_supported(struct device *dev, u64 mask)
72{
73 return 1;
74}
75
76static inline dma_addr_t dma_direct_map_page(struct device *dev,
77 struct page *page,
78 unsigned long offset,
79 size_t size,
2549edd3 80 enum dma_data_direction direction,
ccfe27d7
MS
81 struct dma_attrs *attrs)
82{
cf560c18 83 __dma_sync(page_to_phys(page) + offset, size, direction);
193bca59 84 return page_to_phys(page) + offset;
ccfe27d7
MS
85}
86
87static inline void dma_direct_unmap_page(struct device *dev,
88 dma_addr_t dma_address,
89 size_t size,
90 enum dma_data_direction direction,
91 struct dma_attrs *attrs)
92{
d79f3b06
MS
93/* There is not necessary to do cache cleanup
94 *
95 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
96 * dma_address is physical address
97 */
cf560c18 98 __dma_sync(dma_address, size, direction);
ccfe27d7
MS
99}
100
0fb2a6f2
EB
101static inline void
102dma_direct_sync_single_for_cpu(struct device *dev,
103 dma_addr_t dma_handle, size_t size,
104 enum dma_data_direction direction)
105{
106 /*
107 * It's pointless to flush the cache as the memory segment
108 * is given to the CPU
109 */
110
111 if (direction == DMA_FROM_DEVICE)
112 __dma_sync(dma_handle, size, direction);
113}
114
115static inline void
116dma_direct_sync_single_for_device(struct device *dev,
117 dma_addr_t dma_handle, size_t size,
118 enum dma_data_direction direction)
119{
120 /*
121 * It's pointless to invalidate the cache if the device isn't
122 * supposed to write to the relevant region
123 */
124
125 if (direction == DMA_TO_DEVICE)
126 __dma_sync(dma_handle, size, direction);
127}
128
129static inline void
130dma_direct_sync_sg_for_cpu(struct device *dev,
131 struct scatterlist *sgl, int nents,
132 enum dma_data_direction direction)
133{
134 struct scatterlist *sg;
135 int i;
136
137 /* FIXME this part of code is untested */
138 if (direction == DMA_FROM_DEVICE)
139 for_each_sg(sgl, sg, nents, i)
140 __dma_sync(sg->dma_address, sg->length, direction);
141}
142
143static inline void
144dma_direct_sync_sg_for_device(struct device *dev,
145 struct scatterlist *sgl, int nents,
146 enum dma_data_direction direction)
147{
148 struct scatterlist *sg;
149 int i;
150
151 /* FIXME this part of code is untested */
152 if (direction == DMA_TO_DEVICE)
153 for_each_sg(sgl, sg, nents, i)
154 __dma_sync(sg->dma_address, sg->length, direction);
155}
156
ccfe27d7 157struct dma_map_ops dma_direct_ops = {
988624ec
AP
158 .alloc = dma_direct_alloc_coherent,
159 .free = dma_direct_free_coherent,
ccfe27d7 160 .map_sg = dma_direct_map_sg,
ccfe27d7
MS
161 .dma_supported = dma_direct_dma_supported,
162 .map_page = dma_direct_map_page,
163 .unmap_page = dma_direct_unmap_page,
0fb2a6f2
EB
164 .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
165 .sync_single_for_device = dma_direct_sync_single_for_device,
166 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
167 .sync_sg_for_device = dma_direct_sync_sg_for_device,
ccfe27d7
MS
168};
169EXPORT_SYMBOL(dma_direct_ops);
170
171/* Number of entries preallocated for DMA-API debugging */
172#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
173
174static int __init dma_init(void)
175{
6bd55f0b 176 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
ccfe27d7 177
6bd55f0b 178 return 0;
ccfe27d7
MS
179}
180fs_initcall(dma_init);
This page took 0.266772 seconds and 5 git commands to generate.