Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | |
7 | * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | |
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | |
9 | */ | |
10 | #include <linux/types.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/pci.h> | |
15 | ||
16 | #include <asm/cache.h> | |
17 | #include <asm/pci/bridge.h> | |
18 | ||
19 | #define pdev_to_baddr(pdev, addr) \ | |
20 | (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr)) | |
21 | #define dev_to_baddr(dev, addr) \ | |
22 | pdev_to_baddr(to_pci_dev(dev), (addr)) | |
23 | ||
24 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | |
25 | dma_addr_t * dma_handle, int gfp) | |
26 | { | |
27 | void *ret; | |
28 | ||
29 | /* ignore region specifiers */ | |
30 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | |
31 | ||
32 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | |
33 | gfp |= GFP_DMA; | |
34 | ret = (void *) __get_free_pages(gfp, get_order(size)); | |
35 | ||
36 | if (ret != NULL) { | |
37 | memset(ret, 0, size); | |
38 | *dma_handle = dev_to_baddr(dev, virt_to_phys(ret)); | |
39 | } | |
40 | ||
41 | return ret; | |
42 | } | |
43 | ||
44 | EXPORT_SYMBOL(dma_alloc_noncoherent); | |
45 | ||
46 | void *dma_alloc_coherent(struct device *dev, size_t size, | |
47 | dma_addr_t * dma_handle, int gfp) | |
48 | __attribute__((alias("dma_alloc_noncoherent"))); | |
49 | ||
50 | EXPORT_SYMBOL(dma_alloc_coherent); | |
51 | ||
52 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
53 | dma_addr_t dma_handle) | |
54 | { | |
55 | unsigned long addr = (unsigned long) vaddr; | |
56 | ||
57 | free_pages(addr, get_order(size)); | |
58 | } | |
59 | ||
60 | EXPORT_SYMBOL(dma_free_noncoherent); | |
61 | ||
62 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
63 | dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent"))); | |
64 | ||
65 | EXPORT_SYMBOL(dma_free_coherent); | |
66 | ||
67 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |
68 | enum dma_data_direction direction) | |
69 | { | |
70 | BUG_ON(direction == DMA_NONE); | |
71 | ||
72 | return dev_to_baddr(dev, __pa(ptr)); | |
73 | } | |
74 | ||
75 | EXPORT_SYMBOL(dma_map_single); | |
76 | ||
77 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
78 | enum dma_data_direction direction) | |
79 | { | |
80 | BUG_ON(direction == DMA_NONE); | |
81 | } | |
82 | ||
83 | EXPORT_SYMBOL(dma_unmap_single); | |
84 | ||
85 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
86 | enum dma_data_direction direction) | |
87 | { | |
88 | int i; | |
89 | ||
90 | BUG_ON(direction == DMA_NONE); | |
91 | ||
92 | for (i = 0; i < nents; i++, sg++) { | |
93 | sg->dma_address = (dma_addr_t) dev_to_baddr(dev, | |
94 | page_to_phys(sg->page) + sg->offset); | |
95 | } | |
96 | ||
97 | return nents; | |
98 | } | |
99 | ||
100 | EXPORT_SYMBOL(dma_map_sg); | |
101 | ||
102 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
103 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
104 | { | |
105 | BUG_ON(direction == DMA_NONE); | |
106 | ||
107 | return dev_to_baddr(dev, page_to_phys(page) + offset); | |
108 | } | |
109 | ||
110 | EXPORT_SYMBOL(dma_map_page); | |
111 | ||
112 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |
113 | enum dma_data_direction direction) | |
114 | { | |
115 | BUG_ON(direction == DMA_NONE); | |
116 | } | |
117 | ||
118 | EXPORT_SYMBOL(dma_unmap_page); | |
119 | ||
120 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
121 | enum dma_data_direction direction) | |
122 | { | |
123 | BUG_ON(direction == DMA_NONE); | |
124 | } | |
125 | ||
126 | EXPORT_SYMBOL(dma_unmap_sg); | |
127 | ||
128 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | |
129 | enum dma_data_direction direction) | |
130 | { | |
131 | BUG_ON(direction == DMA_NONE); | |
132 | } | |
133 | ||
134 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | |
135 | ||
136 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | |
137 | enum dma_data_direction direction) | |
138 | { | |
139 | BUG_ON(direction == DMA_NONE); | |
140 | } | |
141 | ||
142 | EXPORT_SYMBOL(dma_sync_single_for_device); | |
143 | ||
144 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
145 | unsigned long offset, size_t size, | |
146 | enum dma_data_direction direction) | |
147 | { | |
148 | BUG_ON(direction == DMA_NONE); | |
149 | } | |
150 | ||
151 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | |
152 | ||
153 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
154 | unsigned long offset, size_t size, | |
155 | enum dma_data_direction direction) | |
156 | { | |
157 | BUG_ON(direction == DMA_NONE); | |
158 | } | |
159 | ||
160 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | |
161 | ||
162 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |
163 | enum dma_data_direction direction) | |
164 | { | |
165 | BUG_ON(direction == DMA_NONE); | |
166 | } | |
167 | ||
168 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |
169 | ||
170 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |
171 | enum dma_data_direction direction) | |
172 | { | |
173 | BUG_ON(direction == DMA_NONE); | |
174 | } | |
175 | ||
176 | EXPORT_SYMBOL(dma_sync_sg_for_device); | |
177 | ||
178 | int dma_mapping_error(dma_addr_t dma_addr) | |
179 | { | |
180 | return 0; | |
181 | } | |
182 | ||
183 | EXPORT_SYMBOL(dma_mapping_error); | |
184 | ||
185 | int dma_supported(struct device *dev, u64 mask) | |
186 | { | |
187 | /* | |
188 | * we fall back to GFP_DMA when the mask isn't all 1s, | |
189 | * so we can't guarantee allocations that must be | |
190 | * within a tighter range than GFP_DMA.. | |
191 | */ | |
192 | if (mask < 0x00ffffff) | |
193 | return 0; | |
194 | ||
195 | return 1; | |
196 | } | |
197 | ||
198 | EXPORT_SYMBOL(dma_supported); | |
199 | ||
200 | int dma_is_consistent(dma_addr_t dma_addr) | |
201 | { | |
202 | return 1; | |
203 | } | |
204 | ||
205 | EXPORT_SYMBOL(dma_is_consistent); | |
206 | ||
207 | void dma_cache_sync(void *vaddr, size_t size, | |
208 | enum dma_data_direction direction) | |
209 | { | |
210 | BUG_ON(direction == DMA_NONE); | |
211 | } | |
212 | ||
213 | EXPORT_SYMBOL(dma_cache_sync); | |
214 | ||
215 | dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | |
216 | struct page *page, unsigned long offset, int direction) | |
217 | { | |
218 | dma64_addr_t addr = page_to_phys(page) + offset; | |
219 | ||
220 | return (dma64_addr_t) pdev_to_baddr(pdev, addr); | |
221 | } | |
222 | ||
223 | EXPORT_SYMBOL(pci_dac_page_to_dma); | |
224 | ||
225 | struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | |
226 | dma64_addr_t dma_addr) | |
227 | { | |
228 | struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus); | |
229 | ||
230 | return pfn_to_page((dma_addr - bc->baddr) >> PAGE_SHIFT); | |
231 | } | |
232 | ||
233 | EXPORT_SYMBOL(pci_dac_dma_to_page); | |
234 | ||
235 | unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | |
236 | dma64_addr_t dma_addr) | |
237 | { | |
238 | return dma_addr & ~PAGE_MASK; | |
239 | } | |
240 | ||
241 | EXPORT_SYMBOL(pci_dac_dma_to_offset); | |
242 | ||
243 | void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | |
244 | dma64_addr_t dma_addr, size_t len, int direction) | |
245 | { | |
246 | BUG_ON(direction == PCI_DMA_NONE); | |
247 | } | |
248 | ||
249 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | |
250 | ||
251 | void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | |
252 | dma64_addr_t dma_addr, size_t len, int direction) | |
253 | { | |
254 | BUG_ON(direction == PCI_DMA_NONE); | |
255 | } | |
256 | ||
257 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); |