Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SH_DMA_MAPPING_H |
2 | #define __ASM_SH_DMA_MAPPING_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/mm.h> |
5 | #include <asm/scatterlist.h> | |
0d831770 | 6 | #include <asm/cacheflush.h> |
1da177e4 LT |
7 | #include <asm/io.h> |
8 | ||
9 | extern struct bus_type pci_bus_type; | |
10 | ||
11 | /* arch/sh/mm/consistent.c */ | |
6dae2c23 | 12 | extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle); |
1da177e4 LT |
13 | extern void consistent_free(void *vaddr, size_t size); |
14 | extern void consistent_sync(void *vaddr, size_t size, int direction); | |
15 | ||
16 | #define dma_supported(dev, mask) (1) | |
17 | ||
18 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
19 | { | |
20 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
21 | return -EIO; | |
22 | ||
23 | *dev->dma_mask = mask; | |
24 | ||
25 | return 0; | |
26 | } | |
27 | ||
28 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |
6dae2c23 | 29 | dma_addr_t *dma_handle, gfp_t flag) |
1da177e4 LT |
30 | { |
31 | if (sh_mv.mv_consistent_alloc) { | |
32 | void *ret; | |
33 | ||
34 | ret = sh_mv.mv_consistent_alloc(dev, size, dma_handle, flag); | |
35 | if (ret != NULL) | |
36 | return ret; | |
37 | } | |
38 | ||
39 | return consistent_alloc(flag, size, dma_handle); | |
40 | } | |
41 | ||
42 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
43 | void *vaddr, dma_addr_t dma_handle) | |
44 | { | |
45 | if (sh_mv.mv_consistent_free) { | |
46 | int ret; | |
47 | ||
48 | ret = sh_mv.mv_consistent_free(dev, size, vaddr, dma_handle); | |
49 | if (ret == 0) | |
50 | return; | |
51 | } | |
52 | ||
53 | consistent_free(vaddr, size); | |
54 | } | |
55 | ||
c7666e72 PM |
56 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
57 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | |
58 | #define dma_is_consistent(d, h) (1) | |
59 | ||
d3fa72e4 | 60 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
1da177e4 LT |
61 | enum dma_data_direction dir) |
62 | { | |
63 | consistent_sync(vaddr, size, (int)dir); | |
64 | } | |
65 | ||
66 | static inline dma_addr_t dma_map_single(struct device *dev, | |
67 | void *ptr, size_t size, | |
68 | enum dma_data_direction dir) | |
69 | { | |
70 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
71 | if (dev->bus == &pci_bus_type) | |
e257ad06 | 72 | return virt_to_phys(ptr); |
1da177e4 | 73 | #endif |
54321434 | 74 | dma_cache_sync(dev, ptr, size, dir); |
1da177e4 | 75 | |
e257ad06 | 76 | return virt_to_phys(ptr); |
1da177e4 LT |
77 | } |
78 | ||
79 | #define dma_unmap_single(dev, addr, size, dir) do { } while (0) | |
80 | ||
81 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | |
82 | int nents, enum dma_data_direction dir) | |
83 | { | |
84 | int i; | |
85 | ||
86 | for (i = 0; i < nents; i++) { | |
87 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
54321434 | 88 | dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, |
1da177e4 LT |
89 | sg[i].length, dir); |
90 | #endif | |
91 | sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; | |
92 | } | |
93 | ||
94 | return nents; | |
95 | } | |
96 | ||
97 | #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) | |
98 | ||
99 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
100 | unsigned long offset, size_t size, | |
101 | enum dma_data_direction dir) | |
102 | { | |
103 | return dma_map_single(dev, page_address(page) + offset, size, dir); | |
104 | } | |
105 | ||
106 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | |
107 | size_t size, enum dma_data_direction dir) | |
108 | { | |
109 | dma_unmap_single(dev, dma_address, size, dir); | |
110 | } | |
111 | ||
112 | static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle, | |
113 | size_t size, enum dma_data_direction dir) | |
114 | { | |
115 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
116 | if (dev->bus == &pci_bus_type) | |
117 | return; | |
118 | #endif | |
e257ad06 | 119 | dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir); |
1da177e4 LT |
120 | } |
121 | ||
122 | static inline void dma_sync_single_range(struct device *dev, | |
123 | dma_addr_t dma_handle, | |
124 | unsigned long offset, size_t size, | |
125 | enum dma_data_direction dir) | |
126 | { | |
127 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
128 | if (dev->bus == &pci_bus_type) | |
129 | return; | |
130 | #endif | |
e257ad06 | 131 | dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir); |
1da177e4 LT |
132 | } |
133 | ||
134 | static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, | |
135 | int nelems, enum dma_data_direction dir) | |
136 | { | |
137 | int i; | |
138 | ||
139 | for (i = 0; i < nelems; i++) { | |
140 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
54321434 | 141 | dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, |
1da177e4 LT |
142 | sg[i].length, dir); |
143 | #endif | |
144 | sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; | |
145 | } | |
146 | } | |
147 | ||
87b0ef91 PM |
148 | static inline void dma_sync_single_for_cpu(struct device *dev, |
149 | dma_addr_t dma_handle, size_t size, | |
150 | enum dma_data_direction dir) | |
151 | { | |
152 | dma_sync_single(dev, dma_handle, size, dir); | |
153 | } | |
154 | ||
155 | static inline void dma_sync_single_for_device(struct device *dev, | |
156 | dma_addr_t dma_handle, | |
157 | size_t size, | |
158 | enum dma_data_direction dir) | |
159 | { | |
160 | dma_sync_single(dev, dma_handle, size, dir); | |
161 | } | |
1da177e4 | 162 | |
32239264 PM |
163 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
164 | dma_addr_t dma_handle, | |
165 | unsigned long offset, | |
166 | size_t size, | |
167 | enum dma_data_direction direction) | |
168 | { | |
169 | dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); | |
170 | } | |
171 | ||
172 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
173 | dma_addr_t dma_handle, | |
174 | unsigned long offset, | |
175 | size_t size, | |
176 | enum dma_data_direction direction) | |
177 | { | |
178 | dma_sync_single_for_device(dev, dma_handle+offset, size, direction); | |
179 | } | |
180 | ||
181 | ||
87b0ef91 PM |
182 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
183 | struct scatterlist *sg, int nelems, | |
0d831770 | 184 | enum dma_data_direction dir) |
87b0ef91 PM |
185 | { |
186 | dma_sync_sg(dev, sg, nelems, dir); | |
187 | } | |
1da177e4 | 188 | |
87b0ef91 PM |
189 | static inline void dma_sync_sg_for_device(struct device *dev, |
190 | struct scatterlist *sg, int nelems, | |
191 | enum dma_data_direction dir) | |
192 | { | |
193 | dma_sync_sg(dev, sg, nelems, dir); | |
194 | } | |
1da177e4 | 195 | |
1da177e4 LT |
196 | |
197 | static inline int dma_get_cache_alignment(void) | |
198 | { | |
199 | /* | |
200 | * Each processor family will define its own L1_CACHE_SHIFT, | |
201 | * L1_CACHE_BYTES wraps to this, so this is always safe. | |
202 | */ | |
203 | return L1_CACHE_BYTES; | |
204 | } | |
205 | ||
206 | static inline int dma_mapping_error(dma_addr_t dma_addr) | |
207 | { | |
208 | return dma_addr == 0; | |
209 | } | |
1da177e4 | 210 | #endif /* __ASM_SH_DMA_MAPPING_H */ |