dma-mapping: use unsigned long for dma_attrs
[deliverable/linux.git] / arch / powerpc / kernel / dma.c
1 /*
2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3 *
4 * Provide default implementations of the DMA mapping callbacks for
5 * directly mapped busses.
6 */
7
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-debug.h>
11 #include <linux/gfp.h>
12 #include <linux/memblock.h>
13 #include <linux/export.h>
14 #include <linux/pci.h>
15 #include <asm/vio.h>
16 #include <asm/bug.h>
17 #include <asm/machdep.h>
18 #include <asm/swiotlb.h>
19 #include <asm/iommu.h>
20
21 /*
22 * Generic direct DMA implementation
23 *
24 * This implementation supports a per-device offset that can be applied if
25 * the address at which memory is visible to devices is not 0. Platform code
26 * can set archdata.dma_data to an unsigned long holding the offset. By
27 * default the offset is PCI_DRAM_OFFSET.
28 */
29
30 static u64 __maybe_unused get_pfn_limit(struct device *dev)
31 {
32 u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1;
33 struct dev_archdata __maybe_unused *sd = &dev->archdata;
34
35 #ifdef CONFIG_SWIOTLB
36 if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops)
37 pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
38 #endif
39
40 return pfn;
41 }
42
43 static int dma_direct_dma_supported(struct device *dev, u64 mask)
44 {
45 #ifdef CONFIG_PPC64
46 u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
47
48 /* Limit fits in the mask, we are good */
49 if (mask >= limit)
50 return 1;
51
52 #ifdef CONFIG_FSL_SOC
53 /* Freescale gets another chance via ZONE_DMA/ZONE_DMA32, however
54 * that will have to be refined if/when they support iommus
55 */
56 return 1;
57 #endif
58 /* Sorry ... */
59 return 0;
60 #else
61 return 1;
62 #endif
63 }
64
65 void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
66 dma_addr_t *dma_handle, gfp_t flag,
67 unsigned long attrs)
68 {
69 void *ret;
70 #ifdef CONFIG_NOT_COHERENT_CACHE
71 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
72 if (ret == NULL)
73 return NULL;
74 *dma_handle += get_dma_offset(dev);
75 return ret;
76 #else
77 struct page *page;
78 int node = dev_to_node(dev);
79 #ifdef CONFIG_FSL_SOC
80 u64 pfn = get_pfn_limit(dev);
81 int zone;
82
83 /*
84 * This code should be OK on other platforms, but we have drivers that
85 * don't set coherent_dma_mask. As a workaround we just ifdef it. This
86 * whole routine needs some serious cleanup.
87 */
88
89 zone = dma_pfn_limit_to_zone(pfn);
90 if (zone < 0) {
91 dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
92 __func__, pfn);
93 return NULL;
94 }
95
96 switch (zone) {
97 case ZONE_DMA:
98 flag |= GFP_DMA;
99 break;
100 #ifdef CONFIG_ZONE_DMA32
101 case ZONE_DMA32:
102 flag |= GFP_DMA32;
103 break;
104 #endif
105 };
106 #endif /* CONFIG_FSL_SOC */
107
108 /* ignore region specifiers */
109 flag &= ~(__GFP_HIGHMEM);
110
111 page = alloc_pages_node(node, flag, get_order(size));
112 if (page == NULL)
113 return NULL;
114 ret = page_address(page);
115 memset(ret, 0, size);
116 *dma_handle = __pa(ret) + get_dma_offset(dev);
117
118 return ret;
119 #endif
120 }
121
122 void __dma_direct_free_coherent(struct device *dev, size_t size,
123 void *vaddr, dma_addr_t dma_handle,
124 unsigned long attrs)
125 {
126 #ifdef CONFIG_NOT_COHERENT_CACHE
127 __dma_free_coherent(size, vaddr);
128 #else
129 free_pages((unsigned long)vaddr, get_order(size));
130 #endif
131 }
132
133 static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
134 dma_addr_t *dma_handle, gfp_t flag,
135 unsigned long attrs)
136 {
137 struct iommu_table *iommu;
138
139 /* The coherent mask may be smaller than the real mask, check if
140 * we can really use the direct ops
141 */
142 if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
143 return __dma_direct_alloc_coherent(dev, size, dma_handle,
144 flag, attrs);
145
146 /* Ok we can't ... do we have an iommu ? If not, fail */
147 iommu = get_iommu_table_base(dev);
148 if (!iommu)
149 return NULL;
150
151 /* Try to use the iommu */
152 return iommu_alloc_coherent(dev, iommu, size, dma_handle,
153 dev->coherent_dma_mask, flag,
154 dev_to_node(dev));
155 }
156
157 static void dma_direct_free_coherent(struct device *dev, size_t size,
158 void *vaddr, dma_addr_t dma_handle,
159 unsigned long attrs)
160 {
161 struct iommu_table *iommu;
162
163 /* See comments in dma_direct_alloc_coherent() */
164 if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
165 return __dma_direct_free_coherent(dev, size, vaddr, dma_handle,
166 attrs);
167 /* Maybe we used an iommu ... */
168 iommu = get_iommu_table_base(dev);
169
170 /* If we hit that we should have never allocated in the first
171 * place so how come we are freeing ?
172 */
173 if (WARN_ON(!iommu))
174 return;
175 iommu_free_coherent(iommu, size, vaddr, dma_handle);
176 }
177
178 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
179 void *cpu_addr, dma_addr_t handle, size_t size,
180 unsigned long attrs)
181 {
182 unsigned long pfn;
183
184 #ifdef CONFIG_NOT_COHERENT_CACHE
185 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
186 pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
187 #else
188 pfn = page_to_pfn(virt_to_page(cpu_addr));
189 #endif
190 return remap_pfn_range(vma, vma->vm_start,
191 pfn + vma->vm_pgoff,
192 vma->vm_end - vma->vm_start,
193 vma->vm_page_prot);
194 }
195
196 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
197 int nents, enum dma_data_direction direction,
198 unsigned long attrs)
199 {
200 struct scatterlist *sg;
201 int i;
202
203 for_each_sg(sgl, sg, nents, i) {
204 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
205 sg->dma_length = sg->length;
206 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
207 }
208
209 return nents;
210 }
211
212 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
213 int nents, enum dma_data_direction direction,
214 unsigned long attrs)
215 {
216 }
217
218 static u64 dma_direct_get_required_mask(struct device *dev)
219 {
220 u64 end, mask;
221
222 end = memblock_end_of_DRAM() + get_dma_offset(dev);
223
224 mask = 1ULL << (fls64(end) - 1);
225 mask += mask - 1;
226
227 return mask;
228 }
229
230 static inline dma_addr_t dma_direct_map_page(struct device *dev,
231 struct page *page,
232 unsigned long offset,
233 size_t size,
234 enum dma_data_direction dir,
235 unsigned long attrs)
236 {
237 BUG_ON(dir == DMA_NONE);
238 __dma_sync_page(page, offset, size, dir);
239 return page_to_phys(page) + offset + get_dma_offset(dev);
240 }
241
242 static inline void dma_direct_unmap_page(struct device *dev,
243 dma_addr_t dma_address,
244 size_t size,
245 enum dma_data_direction direction,
246 unsigned long attrs)
247 {
248 }
249
250 #ifdef CONFIG_NOT_COHERENT_CACHE
251 static inline void dma_direct_sync_sg(struct device *dev,
252 struct scatterlist *sgl, int nents,
253 enum dma_data_direction direction)
254 {
255 struct scatterlist *sg;
256 int i;
257
258 for_each_sg(sgl, sg, nents, i)
259 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
260 }
261
262 static inline void dma_direct_sync_single(struct device *dev,
263 dma_addr_t dma_handle, size_t size,
264 enum dma_data_direction direction)
265 {
266 __dma_sync(bus_to_virt(dma_handle), size, direction);
267 }
268 #endif
269
270 struct dma_map_ops dma_direct_ops = {
271 .alloc = dma_direct_alloc_coherent,
272 .free = dma_direct_free_coherent,
273 .mmap = dma_direct_mmap_coherent,
274 .map_sg = dma_direct_map_sg,
275 .unmap_sg = dma_direct_unmap_sg,
276 .dma_supported = dma_direct_dma_supported,
277 .map_page = dma_direct_map_page,
278 .unmap_page = dma_direct_unmap_page,
279 .get_required_mask = dma_direct_get_required_mask,
280 #ifdef CONFIG_NOT_COHERENT_CACHE
281 .sync_single_for_cpu = dma_direct_sync_single,
282 .sync_single_for_device = dma_direct_sync_single,
283 .sync_sg_for_cpu = dma_direct_sync_sg,
284 .sync_sg_for_device = dma_direct_sync_sg,
285 #endif
286 };
287 EXPORT_SYMBOL(dma_direct_ops);
288
289 int dma_set_coherent_mask(struct device *dev, u64 mask)
290 {
291 if (!dma_supported(dev, mask)) {
292 /*
293 * We need to special case the direct DMA ops which can
294 * support a fallback for coherent allocations. There
295 * is no dma_op->set_coherent_mask() so we have to do
296 * things the hard way:
297 */
298 if (get_dma_ops(dev) != &dma_direct_ops ||
299 get_iommu_table_base(dev) == NULL ||
300 !dma_iommu_dma_supported(dev, mask))
301 return -EIO;
302 }
303 dev->coherent_dma_mask = mask;
304 return 0;
305 }
306 EXPORT_SYMBOL(dma_set_coherent_mask);
307
308 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
309
310 int __dma_set_mask(struct device *dev, u64 dma_mask)
311 {
312 struct dma_map_ops *dma_ops = get_dma_ops(dev);
313
314 if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
315 return dma_ops->set_dma_mask(dev, dma_mask);
316 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
317 return -EIO;
318 *dev->dma_mask = dma_mask;
319 return 0;
320 }
321
322 int dma_set_mask(struct device *dev, u64 dma_mask)
323 {
324 if (ppc_md.dma_set_mask)
325 return ppc_md.dma_set_mask(dev, dma_mask);
326
327 if (dev_is_pci(dev)) {
328 struct pci_dev *pdev = to_pci_dev(dev);
329 struct pci_controller *phb = pci_bus_to_host(pdev->bus);
330 if (phb->controller_ops.dma_set_mask)
331 return phb->controller_ops.dma_set_mask(pdev, dma_mask);
332 }
333
334 return __dma_set_mask(dev, dma_mask);
335 }
336 EXPORT_SYMBOL(dma_set_mask);
337
338 u64 __dma_get_required_mask(struct device *dev)
339 {
340 struct dma_map_ops *dma_ops = get_dma_ops(dev);
341
342 if (unlikely(dma_ops == NULL))
343 return 0;
344
345 if (dma_ops->get_required_mask)
346 return dma_ops->get_required_mask(dev);
347
348 return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
349 }
350
351 u64 dma_get_required_mask(struct device *dev)
352 {
353 if (ppc_md.dma_get_required_mask)
354 return ppc_md.dma_get_required_mask(dev);
355
356 if (dev_is_pci(dev)) {
357 struct pci_dev *pdev = to_pci_dev(dev);
358 struct pci_controller *phb = pci_bus_to_host(pdev->bus);
359 if (phb->controller_ops.dma_get_required_mask)
360 return phb->controller_ops.dma_get_required_mask(pdev);
361 }
362
363 return __dma_get_required_mask(dev);
364 }
365 EXPORT_SYMBOL_GPL(dma_get_required_mask);
366
367 static int __init dma_init(void)
368 {
369 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
370 #ifdef CONFIG_PCI
371 dma_debug_add_bus(&pci_bus_type);
372 #endif
373 #ifdef CONFIG_IBMVIO
374 dma_debug_add_bus(&vio_bus_type);
375 #endif
376
377 return 0;
378 }
379 fs_initcall(dma_init);
380
This page took 0.039242 seconds and 5 git commands to generate.