powerpc: Handle SWIOTLB mapping error properly
[deliverable/linux.git] / arch / powerpc / kernel / dma.c
CommitLineData
1da177e4 1/*
12d04eef 2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
1da177e4 3 *
12d04eef 4 * Provide default implementations of the DMA mapping callbacks for
8dd0e952 5 * directly mapped busses.
1da177e4
LT
6 */
7
8#include <linux/device.h>
9#include <linux/dma-mapping.h>
b2f2e8fe 10#include <linux/lmb.h>
1da177e4 11#include <asm/bug.h>
12d04eef 12#include <asm/abs_addr.h>
1da177e4 13
12d04eef
BH
14/*
15 * Generic direct DMA implementation
92b20c40 16 *
31d1b493
ME
17 * This implementation supports a per-device offset that can be applied if
18 * the address at which memory is visible to devices is not 0. Platform code
19 * can set archdata.dma_data to an unsigned long holding the offset. By
4fc665b8 20 * default the offset is PCI_DRAM_OFFSET.
12d04eef 21 */
5d33eebe 22
ec3cf2ec 23unsigned long get_dma_direct_offset(struct device *dev)
35e4a6e2 24{
4fc665b8
BB
25 if (dev)
26 return (unsigned long)dev->archdata.dma_data;
27
28 return PCI_DRAM_OFFSET;
35e4a6e2
ME
29}
30
4fc665b8
BB
31void *dma_direct_alloc_coherent(struct device *dev, size_t size,
32 dma_addr_t *dma_handle, gfp_t flag)
12d04eef 33{
8aa26590 34 void *ret;
4fc665b8 35#ifdef CONFIG_NOT_COHERENT_CACHE
8b31e49d 36 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
8aa26590
BH
37 if (ret == NULL)
38 return NULL;
39 *dma_handle += get_dma_direct_offset(dev);
40 return ret;
4fc665b8 41#else
c80d9133 42 struct page *page;
8fae0353 43 int node = dev_to_node(dev);
12d04eef 44
4fc665b8
BB
45 /* ignore region specifiers */
46 flag &= ~(__GFP_HIGHMEM);
47
c80d9133
BH
48 page = alloc_pages_node(node, flag, get_order(size));
49 if (page == NULL)
50 return NULL;
51 ret = page_address(page);
52 memset(ret, 0, size);
35e4a6e2 53 *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
c80d9133 54
12d04eef 55 return ret;
4fc665b8 56#endif
1da177e4 57}
1da177e4 58
4fc665b8
BB
59void dma_direct_free_coherent(struct device *dev, size_t size,
60 void *vaddr, dma_addr_t dma_handle)
1da177e4 61{
4fc665b8
BB
62#ifdef CONFIG_NOT_COHERENT_CACHE
63 __dma_free_coherent(size, vaddr);
64#else
12d04eef 65 free_pages((unsigned long)vaddr, get_order(size));
4fc665b8 66#endif
1da177e4 67}
1da177e4 68
78bdc310 69static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
3affedc4
MN
70 int nents, enum dma_data_direction direction,
71 struct dma_attrs *attrs)
1da177e4 72{
78bdc310 73 struct scatterlist *sg;
12d04eef 74 int i;
1da177e4 75
78bdc310 76 for_each_sg(sgl, sg, nents, i) {
35e4a6e2 77 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
12d04eef 78 sg->dma_length = sg->length;
2434bbb3 79 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
12d04eef 80 }
5d33eebe 81
12d04eef 82 return nents;
1da177e4 83}
1da177e4 84
12d04eef 85static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
3affedc4
MN
86 int nents, enum dma_data_direction direction,
87 struct dma_attrs *attrs)
1da177e4 88{
12d04eef 89}
5d33eebe 90
12d04eef
BH
91static int dma_direct_dma_supported(struct device *dev, u64 mask)
92{
4fc665b8 93#ifdef CONFIG_PPC64
b2f2e8fe 94 /* Could be improved so platforms can set the limit in case
12d04eef
BH
95 * they have limited DMA windows
96 */
b2f2e8fe 97 return mask >= (lmb_end_of_DRAM() - 1);
4fc665b8
BB
98#else
99 return 1;
100#endif
101}
102
103static inline dma_addr_t dma_direct_map_page(struct device *dev,
104 struct page *page,
105 unsigned long offset,
106 size_t size,
107 enum dma_data_direction dir,
108 struct dma_attrs *attrs)
109{
110 BUG_ON(dir == DMA_NONE);
111 __dma_sync_page(page, offset, size, dir);
112 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
113}
114
115static inline void dma_direct_unmap_page(struct device *dev,
116 dma_addr_t dma_address,
117 size_t size,
118 enum dma_data_direction direction,
119 struct dma_attrs *attrs)
120{
1da177e4 121}
12d04eef 122
15e09c0e
BB
123#ifdef CONFIG_NOT_COHERENT_CACHE
124static inline void dma_direct_sync_sg(struct device *dev,
125 struct scatterlist *sgl, int nents,
126 enum dma_data_direction direction)
127{
128 struct scatterlist *sg;
129 int i;
130
131 for_each_sg(sgl, sg, nents, i)
132 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
133}
134
135static inline void dma_direct_sync_single_range(struct device *dev,
136 dma_addr_t dma_handle, unsigned long offset, size_t size,
137 enum dma_data_direction direction)
138{
139 __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
140}
141#endif
142
45223c54 143struct dma_map_ops dma_direct_ops = {
12d04eef
BH
144 .alloc_coherent = dma_direct_alloc_coherent,
145 .free_coherent = dma_direct_free_coherent,
12d04eef
BH
146 .map_sg = dma_direct_map_sg,
147 .unmap_sg = dma_direct_unmap_sg,
148 .dma_supported = dma_direct_dma_supported,
4fc665b8
BB
149 .map_page = dma_direct_map_page,
150 .unmap_page = dma_direct_unmap_page,
15e09c0e
BB
151#ifdef CONFIG_NOT_COHERENT_CACHE
152 .sync_single_range_for_cpu = dma_direct_sync_single_range,
153 .sync_single_range_for_device = dma_direct_sync_single_range,
154 .sync_sg_for_cpu = dma_direct_sync_sg,
155 .sync_sg_for_device = dma_direct_sync_sg,
156#endif
12d04eef
BH
157};
158EXPORT_SYMBOL(dma_direct_ops);
This page took 0.465486 seconds and 5 git commands to generate.