Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux...
[deliverable/linux.git] / arch / powerpc / kernel / dma.c
CommitLineData
1da177e4 1/*
12d04eef 2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
1da177e4 3 *
12d04eef 4 * Provide default implementations of the DMA mapping callbacks for
8dd0e952 5 * directly mapped busses.
1da177e4
LT
6 */
7
8#include <linux/device.h>
9#include <linux/dma-mapping.h>
80d3e8ab 10#include <linux/dma-debug.h>
5a0e3ad6 11#include <linux/gfp.h>
95f72d1e 12#include <linux/memblock.h>
1da177e4 13#include <asm/bug.h>
12d04eef 14#include <asm/abs_addr.h>
5b6e9ff6 15#include <asm/machdep.h>
1da177e4 16
12d04eef
BH
17/*
18 * Generic direct DMA implementation
92b20c40 19 *
31d1b493
ME
20 * This implementation supports a per-device offset that can be applied if
21 * the address at which memory is visible to devices is not 0. Platform code
22 * can set archdata.dma_data to an unsigned long holding the offset. By
4fc665b8 23 * default the offset is PCI_DRAM_OFFSET.
12d04eef 24 */
5d33eebe 25
35e4a6e2 26
4fc665b8
BB
27void *dma_direct_alloc_coherent(struct device *dev, size_t size,
28 dma_addr_t *dma_handle, gfp_t flag)
12d04eef 29{
8aa26590 30 void *ret;
4fc665b8 31#ifdef CONFIG_NOT_COHERENT_CACHE
8b31e49d 32 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
8aa26590
BH
33 if (ret == NULL)
34 return NULL;
1cebd7a0 35 *dma_handle += get_dma_offset(dev);
8aa26590 36 return ret;
4fc665b8 37#else
c80d9133 38 struct page *page;
8fae0353 39 int node = dev_to_node(dev);
12d04eef 40
4fc665b8
BB
41 /* ignore region specifiers */
42 flag &= ~(__GFP_HIGHMEM);
43
c80d9133
BH
44 page = alloc_pages_node(node, flag, get_order(size));
45 if (page == NULL)
46 return NULL;
47 ret = page_address(page);
48 memset(ret, 0, size);
1cebd7a0 49 *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
c80d9133 50
12d04eef 51 return ret;
4fc665b8 52#endif
1da177e4 53}
1da177e4 54
4fc665b8
BB
55void dma_direct_free_coherent(struct device *dev, size_t size,
56 void *vaddr, dma_addr_t dma_handle)
1da177e4 57{
4fc665b8
BB
58#ifdef CONFIG_NOT_COHERENT_CACHE
59 __dma_free_coherent(size, vaddr);
60#else
12d04eef 61 free_pages((unsigned long)vaddr, get_order(size));
4fc665b8 62#endif
1da177e4 63}
1da177e4 64
78bdc310 65static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
3affedc4
MN
66 int nents, enum dma_data_direction direction,
67 struct dma_attrs *attrs)
1da177e4 68{
78bdc310 69 struct scatterlist *sg;
12d04eef 70 int i;
1da177e4 71
78bdc310 72 for_each_sg(sgl, sg, nents, i) {
1cebd7a0 73 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
12d04eef 74 sg->dma_length = sg->length;
2434bbb3 75 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
12d04eef 76 }
5d33eebe 77
12d04eef 78 return nents;
1da177e4 79}
1da177e4 80
12d04eef 81static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
3affedc4
MN
82 int nents, enum dma_data_direction direction,
83 struct dma_attrs *attrs)
1da177e4 84{
12d04eef 85}
5d33eebe 86
12d04eef
BH
87static int dma_direct_dma_supported(struct device *dev, u64 mask)
88{
4fc665b8 89#ifdef CONFIG_PPC64
b2f2e8fe 90 /* Could be improved so platforms can set the limit in case
12d04eef
BH
91 * they have limited DMA windows
92 */
ffa56e55 93 return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
4fc665b8
BB
94#else
95 return 1;
96#endif
97}
98
99static inline dma_addr_t dma_direct_map_page(struct device *dev,
100 struct page *page,
101 unsigned long offset,
102 size_t size,
103 enum dma_data_direction dir,
104 struct dma_attrs *attrs)
105{
106 BUG_ON(dir == DMA_NONE);
107 __dma_sync_page(page, offset, size, dir);
1cebd7a0 108 return page_to_phys(page) + offset + get_dma_offset(dev);
4fc665b8
BB
109}
110
111static inline void dma_direct_unmap_page(struct device *dev,
112 dma_addr_t dma_address,
113 size_t size,
114 enum dma_data_direction direction,
115 struct dma_attrs *attrs)
116{
1da177e4 117}
12d04eef 118
15e09c0e
BB
119#ifdef CONFIG_NOT_COHERENT_CACHE
120static inline void dma_direct_sync_sg(struct device *dev,
121 struct scatterlist *sgl, int nents,
122 enum dma_data_direction direction)
123{
124 struct scatterlist *sg;
125 int i;
126
127 for_each_sg(sgl, sg, nents, i)
128 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
129}
130
712d3e22
FT
131static inline void dma_direct_sync_single(struct device *dev,
132 dma_addr_t dma_handle, size_t size,
133 enum dma_data_direction direction)
15e09c0e 134{
712d3e22 135 __dma_sync(bus_to_virt(dma_handle), size, direction);
15e09c0e
BB
136}
137#endif
138
45223c54 139struct dma_map_ops dma_direct_ops = {
12d04eef
BH
140 .alloc_coherent = dma_direct_alloc_coherent,
141 .free_coherent = dma_direct_free_coherent,
12d04eef
BH
142 .map_sg = dma_direct_map_sg,
143 .unmap_sg = dma_direct_unmap_sg,
144 .dma_supported = dma_direct_dma_supported,
4fc665b8
BB
145 .map_page = dma_direct_map_page,
146 .unmap_page = dma_direct_unmap_page,
15e09c0e 147#ifdef CONFIG_NOT_COHERENT_CACHE
712d3e22
FT
148 .sync_single_for_cpu = dma_direct_sync_single,
149 .sync_single_for_device = dma_direct_sync_single,
15e09c0e
BB
150 .sync_sg_for_cpu = dma_direct_sync_sg,
151 .sync_sg_for_device = dma_direct_sync_sg,
152#endif
12d04eef
BH
153};
154EXPORT_SYMBOL(dma_direct_ops);
80d3e8ab
FT
155
156#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
157
5b6e9ff6
BH
158int dma_set_mask(struct device *dev, u64 dma_mask)
159{
160 struct dma_map_ops *dma_ops = get_dma_ops(dev);
161
162 if (ppc_md.dma_set_mask)
163 return ppc_md.dma_set_mask(dev, dma_mask);
164 if (unlikely(dma_ops == NULL))
165 return -EIO;
166 if (dma_ops->set_dma_mask != NULL)
167 return dma_ops->set_dma_mask(dev, dma_mask);
168 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
169 return -EIO;
170 *dev->dma_mask = dma_mask;
171 return 0;
172}
173EXPORT_SYMBOL(dma_set_mask);
174
80d3e8ab
FT
175static int __init dma_init(void)
176{
177 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
178
179 return 0;
180}
181fs_initcall(dma_init);
This page took 0.510837 seconds and 5 git commands to generate.