[SG] Update drivers to use sg helpers
[deliverable/linux.git] / drivers / ieee1394 / dma.c
1 /*
2 * DMA region bookkeeping routines
3 *
4 * Copyright (C) 2002 Maas Digital LLC
5 *
6 * This code is licensed under the GPL. See the file COPYING in the root
7 * directory of the kernel sources for details.
8 */
9
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <asm/scatterlist.h>
16
17 #include "dma.h"
18
19 /* dma_prog_region */
20
21 void dma_prog_region_init(struct dma_prog_region *prog)
22 {
23 prog->kvirt = NULL;
24 prog->dev = NULL;
25 prog->n_pages = 0;
26 prog->bus_addr = 0;
27 }
28
29 int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
30 struct pci_dev *dev)
31 {
32 /* round up to page size */
33 n_bytes = PAGE_ALIGN(n_bytes);
34
35 prog->n_pages = n_bytes >> PAGE_SHIFT;
36
37 prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr);
38 if (!prog->kvirt) {
39 printk(KERN_ERR
40 "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
41 dma_prog_region_free(prog);
42 return -ENOMEM;
43 }
44
45 prog->dev = dev;
46
47 return 0;
48 }
49
50 void dma_prog_region_free(struct dma_prog_region *prog)
51 {
52 if (prog->kvirt) {
53 pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT,
54 prog->kvirt, prog->bus_addr);
55 }
56
57 prog->kvirt = NULL;
58 prog->dev = NULL;
59 prog->n_pages = 0;
60 prog->bus_addr = 0;
61 }
62
63 /* dma_region */
64
65 /**
66 * dma_region_init - clear out all fields but do not allocate anything
67 */
68 void dma_region_init(struct dma_region *dma)
69 {
70 dma->kvirt = NULL;
71 dma->dev = NULL;
72 dma->n_pages = 0;
73 dma->n_dma_pages = 0;
74 dma->sglist = NULL;
75 }
76
77 /**
78 * dma_region_alloc - allocate the buffer and map it to the IOMMU
79 */
80 int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
81 struct pci_dev *dev, int direction)
82 {
83 unsigned int i;
84
85 /* round up to page size */
86 n_bytes = PAGE_ALIGN(n_bytes);
87
88 dma->n_pages = n_bytes >> PAGE_SHIFT;
89
90 dma->kvirt = vmalloc_32(n_bytes);
91 if (!dma->kvirt) {
92 printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n");
93 goto err;
94 }
95
96 /* Clear the ram out, no junk to the user */
97 memset(dma->kvirt, 0, n_bytes);
98
99 /* allocate scatter/gather list */
100 dma->sglist = vmalloc(dma->n_pages * sizeof(*dma->sglist));
101 if (!dma->sglist) {
102 printk(KERN_ERR "dma_region_alloc: vmalloc(sglist) failed\n");
103 goto err;
104 }
105
106 /* just to be safe - this will become unnecessary once sglist->address goes away */
107 memset(dma->sglist, 0, dma->n_pages * sizeof(*dma->sglist));
108
109 /* fill scatter/gather list with pages */
110 for (i = 0; i < dma->n_pages; i++) {
111 unsigned long va =
112 (unsigned long)dma->kvirt + (i << PAGE_SHIFT);
113
114 sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va));
115 dma->sglist[i].length = PAGE_SIZE;
116 }
117
118 /* map sglist to the IOMMU */
119 dma->n_dma_pages =
120 pci_map_sg(dev, dma->sglist, dma->n_pages, direction);
121
122 if (dma->n_dma_pages == 0) {
123 printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
124 goto err;
125 }
126
127 dma->dev = dev;
128 dma->direction = direction;
129
130 return 0;
131
132 err:
133 dma_region_free(dma);
134 return -ENOMEM;
135 }
136
137 /**
138 * dma_region_free - unmap and free the buffer
139 */
140 void dma_region_free(struct dma_region *dma)
141 {
142 if (dma->n_dma_pages) {
143 pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages,
144 dma->direction);
145 dma->n_dma_pages = 0;
146 dma->dev = NULL;
147 }
148
149 vfree(dma->sglist);
150 dma->sglist = NULL;
151
152 vfree(dma->kvirt);
153 dma->kvirt = NULL;
154 dma->n_pages = 0;
155 }
156
157 /* find the scatterlist index and remaining offset corresponding to a
158 given offset from the beginning of the buffer */
159 static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
160 unsigned int start, unsigned long *rem)
161 {
162 int i;
163 unsigned long off = offset;
164
165 for (i = start; i < dma->n_dma_pages; i++) {
166 if (off < sg_dma_len(&dma->sglist[i])) {
167 *rem = off;
168 break;
169 }
170
171 off -= sg_dma_len(&dma->sglist[i]);
172 }
173
174 BUG_ON(i >= dma->n_dma_pages);
175
176 return i;
177 }
178
179 /**
180 * dma_region_offset_to_bus - get bus address of an offset within a DMA region
181 *
182 * Returns the DMA bus address of the byte with the given @offset relative to
183 * the beginning of the @dma.
184 */
185 dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
186 unsigned long offset)
187 {
188 unsigned long rem = 0;
189
190 struct scatterlist *sg =
191 &dma->sglist[dma_region_find(dma, offset, 0, &rem)];
192 return sg_dma_address(sg) + rem;
193 }
194
195 /**
196 * dma_region_sync_for_cpu - sync the CPU's view of the buffer
197 */
198 void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
199 unsigned long len)
200 {
201 int first, last;
202 unsigned long rem = 0;
203
204 if (!len)
205 len = 1;
206
207 first = dma_region_find(dma, offset, 0, &rem);
208 last = dma_region_find(dma, rem + len - 1, first, &rem);
209
210 pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1,
211 dma->direction);
212 }
213
214 /**
215 * dma_region_sync_for_device - sync the IO bus' view of the buffer
216 */
217 void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
218 unsigned long len)
219 {
220 int first, last;
221 unsigned long rem = 0;
222
223 if (!len)
224 len = 1;
225
226 first = dma_region_find(dma, offset, 0, &rem);
227 last = dma_region_find(dma, rem + len - 1, first, &rem);
228
229 pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first],
230 last - first + 1, dma->direction);
231 }
232
233 #ifdef CONFIG_MMU
234
235 /* nopage() handler for mmap access */
236
237 static struct page *dma_region_pagefault(struct vm_area_struct *area,
238 unsigned long address, int *type)
239 {
240 unsigned long offset;
241 unsigned long kernel_virt_addr;
242 struct page *ret = NOPAGE_SIGBUS;
243
244 struct dma_region *dma = (struct dma_region *)area->vm_private_data;
245
246 if (!dma->kvirt)
247 goto out;
248
249 if ((address < (unsigned long)area->vm_start) ||
250 (address >
251 (unsigned long)area->vm_start + (dma->n_pages << PAGE_SHIFT)))
252 goto out;
253
254 if (type)
255 *type = VM_FAULT_MINOR;
256 offset = address - area->vm_start;
257 kernel_virt_addr = (unsigned long)dma->kvirt + offset;
258 ret = vmalloc_to_page((void *)kernel_virt_addr);
259 get_page(ret);
260 out:
261 return ret;
262 }
263
264 static struct vm_operations_struct dma_region_vm_ops = {
265 .nopage = dma_region_pagefault,
266 };
267
268 /**
269 * dma_region_mmap - map the buffer into a user space process
270 */
271 int dma_region_mmap(struct dma_region *dma, struct file *file,
272 struct vm_area_struct *vma)
273 {
274 unsigned long size;
275
276 if (!dma->kvirt)
277 return -EINVAL;
278
279 /* must be page-aligned */
280 if (vma->vm_pgoff != 0)
281 return -EINVAL;
282
283 /* check the length */
284 size = vma->vm_end - vma->vm_start;
285 if (size > (dma->n_pages << PAGE_SHIFT))
286 return -EINVAL;
287
288 vma->vm_ops = &dma_region_vm_ops;
289 vma->vm_private_data = dma;
290 vma->vm_file = file;
291 vma->vm_flags |= VM_RESERVED;
292
293 return 0;
294 }
295
296 #else /* CONFIG_MMU */
297
298 int dma_region_mmap(struct dma_region *dma, struct file *file,
299 struct vm_area_struct *vma)
300 {
301 return -EINVAL;
302 }
303
304 #endif /* CONFIG_MMU */
This page took 0.04161 seconds and 5 git commands to generate.