Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/arm/common/dmabounce.c | |
3 | * | |
4 | * Special dma_{map/unmap/dma_sync}_* routines for systems that have | |
5 | * limited DMA windows. These functions utilize bounce buffers to | |
6 | * copy data to/from buffers located outside the DMA region. This | |
7 | * only works for systems in which DMA memory is at the bottom of | |
3a2916aa | 8 | * RAM, the remainder of memory is at the top and the DMA memory |
6cbdc8c5 | 9 | * can be marked as ZONE_DMA. Anything beyond that such as discontiguous |
1da177e4 LT |
10 | * DMA windows will require custom implementations that reserve memory |
11 | * areas at early bootup. | |
12 | * | |
13 | * Original version by Brad Parker (brad@heeltoe.com) | |
14 | * Re-written by Christopher Hoover <ch@murgatroid.com> | |
15 | * Made generic by Deepak Saxena <dsaxena@plexity.net> | |
16 | * | |
17 | * Copyright (C) 2002 Hewlett Packard Company. | |
18 | * Copyright (C) 2004 MontaVista Software, Inc. | |
19 | * | |
20 | * This program is free software; you can redistribute it and/or | |
21 | * modify it under the terms of the GNU General Public License | |
22 | * version 2 as published by the Free Software Foundation. | |
23 | */ | |
24 | ||
25 | #include <linux/module.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/slab.h> | |
58edb515 | 28 | #include <linux/page-flags.h> |
1da177e4 LT |
29 | #include <linux/device.h> |
30 | #include <linux/dma-mapping.h> | |
31 | #include <linux/dmapool.h> | |
32 | #include <linux/list.h> | |
9f2326be | 33 | #include <linux/scatterlist.h> |
1da177e4 | 34 | |
14eb75b6 RK |
35 | #include <asm/cacheflush.h> |
36 | ||
1da177e4 | 37 | #undef STATS |
cb7610d0 | 38 | |
1da177e4 LT |
39 | #ifdef STATS |
40 | #define DO_STATS(X) do { X ; } while (0) | |
41 | #else | |
42 | #define DO_STATS(X) do { } while (0) | |
43 | #endif | |
44 | ||
45 | /* ************************************************** */ | |
46 | ||
47 | struct safe_buffer { | |
48 | struct list_head node; | |
49 | ||
50 | /* original request */ | |
51 | void *ptr; | |
52 | size_t size; | |
53 | int direction; | |
54 | ||
55 | /* safe buffer info */ | |
cb7610d0 | 56 | struct dmabounce_pool *pool; |
1da177e4 LT |
57 | void *safe; |
58 | dma_addr_t safe_dma_addr; | |
59 | }; | |
60 | ||
cb7610d0 RK |
61 | struct dmabounce_pool { |
62 | unsigned long size; | |
63 | struct dma_pool *pool; | |
64 | #ifdef STATS | |
65 | unsigned long allocs; | |
66 | #endif | |
67 | }; | |
68 | ||
1da177e4 | 69 | struct dmabounce_device_info { |
1da177e4 | 70 | struct device *dev; |
1da177e4 | 71 | struct list_head safe_buffers; |
1da177e4 | 72 | #ifdef STATS |
1da177e4 LT |
73 | unsigned long total_allocs; |
74 | unsigned long map_op_count; | |
75 | unsigned long bounce_count; | |
017cc022 | 76 | int attr_res; |
1da177e4 | 77 | #endif |
cb7610d0 RK |
78 | struct dmabounce_pool small; |
79 | struct dmabounce_pool large; | |
823588c1 KH |
80 | |
81 | rwlock_t lock; | |
0703ed2a RK |
82 | |
83 | int (*needs_bounce)(struct device *, dma_addr_t, size_t); | |
1da177e4 LT |
84 | }; |
85 | ||
1da177e4 | 86 | #ifdef STATS |
017cc022 RK |
87 | static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr, |
88 | char *buf) | |
1da177e4 | 89 | { |
017cc022 RK |
90 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
91 | return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n", | |
92 | device_info->small.allocs, | |
93 | device_info->large.allocs, | |
cb7610d0 RK |
94 | device_info->total_allocs - device_info->small.allocs - |
95 | device_info->large.allocs, | |
017cc022 RK |
96 | device_info->total_allocs, |
97 | device_info->map_op_count, | |
98 | device_info->bounce_count); | |
1da177e4 | 99 | } |
017cc022 RK |
100 | |
101 | static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL); | |
1da177e4 LT |
102 | #endif |
103 | ||
1da177e4 LT |
104 | |
105 | /* allocate a 'safe' buffer and keep track of it */ | |
106 | static inline struct safe_buffer * | |
107 | alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, | |
cb7610d0 | 108 | size_t size, enum dma_data_direction dir) |
1da177e4 LT |
109 | { |
110 | struct safe_buffer *buf; | |
cb7610d0 | 111 | struct dmabounce_pool *pool; |
1da177e4 | 112 | struct device *dev = device_info->dev; |
823588c1 | 113 | unsigned long flags; |
1da177e4 LT |
114 | |
115 | dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", | |
116 | __func__, ptr, size, dir); | |
117 | ||
cb7610d0 RK |
118 | if (size <= device_info->small.size) { |
119 | pool = &device_info->small; | |
120 | } else if (size <= device_info->large.size) { | |
121 | pool = &device_info->large; | |
122 | } else { | |
123 | pool = NULL; | |
124 | } | |
1da177e4 LT |
125 | |
126 | buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); | |
127 | if (buf == NULL) { | |
128 | dev_warn(dev, "%s: kmalloc failed\n", __func__); | |
129 | return NULL; | |
130 | } | |
131 | ||
cb7610d0 RK |
132 | buf->ptr = ptr; |
133 | buf->size = size; | |
134 | buf->direction = dir; | |
135 | buf->pool = pool; | |
1da177e4 | 136 | |
cb7610d0 RK |
137 | if (pool) { |
138 | buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, | |
139 | &buf->safe_dma_addr); | |
1da177e4 | 140 | } else { |
cb7610d0 RK |
141 | buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr, |
142 | GFP_ATOMIC); | |
1da177e4 LT |
143 | } |
144 | ||
cb7610d0 RK |
145 | if (buf->safe == NULL) { |
146 | dev_warn(dev, | |
147 | "%s: could not alloc dma memory (size=%d)\n", | |
148 | __func__, size); | |
1da177e4 LT |
149 | kfree(buf); |
150 | return NULL; | |
151 | } | |
152 | ||
153 | #ifdef STATS | |
cb7610d0 RK |
154 | if (pool) |
155 | pool->allocs++; | |
156 | device_info->total_allocs++; | |
1da177e4 LT |
157 | #endif |
158 | ||
823588c1 | 159 | write_lock_irqsave(&device_info->lock, flags); |
1da177e4 | 160 | list_add(&buf->node, &device_info->safe_buffers); |
823588c1 KH |
161 | write_unlock_irqrestore(&device_info->lock, flags); |
162 | ||
1da177e4 LT |
163 | return buf; |
164 | } | |
165 | ||
166 | /* determine if a buffer is from our "safe" pool */ | |
167 | static inline struct safe_buffer * | |
168 | find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) | |
169 | { | |
e2785f0d | 170 | struct safe_buffer *b, *rb = NULL; |
823588c1 KH |
171 | unsigned long flags; |
172 | ||
173 | read_lock_irqsave(&device_info->lock, flags); | |
1da177e4 | 174 | |
b46a58fd | 175 | list_for_each_entry(b, &device_info->safe_buffers, node) |
a227fb92 MS |
176 | if (b->safe_dma_addr <= safe_dma_addr && |
177 | b->safe_dma_addr + b->size > safe_dma_addr) { | |
e2785f0d | 178 | rb = b; |
823588c1 | 179 | break; |
e2785f0d | 180 | } |
1da177e4 | 181 | |
823588c1 | 182 | read_unlock_irqrestore(&device_info->lock, flags); |
e2785f0d | 183 | return rb; |
1da177e4 LT |
184 | } |
185 | ||
186 | static inline void | |
187 | free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf) | |
188 | { | |
823588c1 KH |
189 | unsigned long flags; |
190 | ||
1da177e4 LT |
191 | dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); |
192 | ||
823588c1 KH |
193 | write_lock_irqsave(&device_info->lock, flags); |
194 | ||
1da177e4 LT |
195 | list_del(&buf->node); |
196 | ||
823588c1 KH |
197 | write_unlock_irqrestore(&device_info->lock, flags); |
198 | ||
1da177e4 | 199 | if (buf->pool) |
cb7610d0 | 200 | dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); |
1da177e4 LT |
201 | else |
202 | dma_free_coherent(device_info->dev, buf->size, buf->safe, | |
203 | buf->safe_dma_addr); | |
204 | ||
205 | kfree(buf); | |
206 | } | |
207 | ||
208 | /* ************************************************** */ | |
209 | ||
125ab12a RK |
210 | static struct safe_buffer *find_safe_buffer_dev(struct device *dev, |
211 | dma_addr_t dma_addr, const char *where) | |
212 | { | |
213 | if (!dev || !dev->archdata.dmabounce) | |
214 | return NULL; | |
215 | if (dma_mapping_error(dev, dma_addr)) { | |
e2f521e2 | 216 | dev_err(dev, "Trying to %s invalid mapping\n", where); |
125ab12a RK |
217 | return NULL; |
218 | } | |
219 | return find_safe_buffer(dev->archdata.dmabounce, dma_addr); | |
220 | } | |
221 | ||
23bc9873 | 222 | static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) |
1da177e4 | 223 | { |
23bc9873 RK |
224 | if (!dev || !dev->archdata.dmabounce) |
225 | return 0; | |
1da177e4 LT |
226 | |
227 | if (dev->dma_mask) { | |
23bc9873 | 228 | unsigned long limit, mask = *dev->dma_mask; |
1da177e4 LT |
229 | |
230 | limit = (mask + 1) & ~mask; | |
231 | if (limit && size > limit) { | |
232 | dev_err(dev, "DMA mapping too big (requested %#x " | |
233 | "mask %#Lx)\n", size, *dev->dma_mask); | |
23bc9873 | 234 | return -E2BIG; |
1da177e4 LT |
235 | } |
236 | ||
23bc9873 RK |
237 | /* Figure out if we need to bounce from the DMA mask. */ |
238 | if ((dma_addr | (dma_addr + size - 1)) & ~mask) | |
239 | return 1; | |
1da177e4 LT |
240 | } |
241 | ||
0703ed2a | 242 | return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size); |
23bc9873 RK |
243 | } |
244 | ||
245 | static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, | |
246 | enum dma_data_direction dir) | |
247 | { | |
248 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | |
dd3641fc | 249 | struct safe_buffer *buf; |
23bc9873 RK |
250 | |
251 | if (device_info) | |
252 | DO_STATS ( device_info->map_op_count++ ); | |
253 | ||
dd3641fc | 254 | buf = alloc_safe_buffer(device_info, ptr, size, dir); |
dfa322fc | 255 | if (buf == NULL) { |
dd3641fc RK |
256 | dev_err(dev, "%s: unable to map unsafe buffer %p!\n", |
257 | __func__, ptr); | |
553ac788 | 258 | return DMA_ERROR_CODE; |
dd3641fc | 259 | } |
23bc9873 | 260 | |
dd3641fc RK |
261 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
262 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | |
263 | buf->safe, buf->safe_dma_addr); | |
1da177e4 | 264 | |
dd3641fc RK |
265 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { |
266 | dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", | |
267 | __func__, ptr, buf->safe, size); | |
268 | memcpy(buf->safe, ptr, size); | |
1da177e4 LT |
269 | } |
270 | ||
dd3641fc | 271 | return buf->safe_dma_addr; |
1da177e4 LT |
272 | } |
273 | ||
dd3641fc | 274 | static inline void unmap_single(struct device *dev, struct safe_buffer *buf, |
3216a97b | 275 | size_t size, enum dma_data_direction dir) |
1da177e4 | 276 | { |
dd3641fc RK |
277 | BUG_ON(buf->size != size); |
278 | BUG_ON(buf->direction != dir); | |
1da177e4 | 279 | |
dd3641fc RK |
280 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
281 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | |
282 | buf->safe, buf->safe_dma_addr); | |
1da177e4 | 283 | |
dd3641fc | 284 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
1da177e4 | 285 | |
dd3641fc RK |
286 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { |
287 | void *ptr = buf->ptr; | |
5abc100e | 288 | |
dd3641fc RK |
289 | dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", |
290 | __func__, buf->safe, ptr, size); | |
291 | memcpy(ptr, buf->safe, size); | |
5abc100e | 292 | |
dd3641fc RK |
293 | /* |
294 | * Since we may have written to a page cache page, | |
295 | * we need to ensure that the data will be coherent | |
296 | * with user mappings. | |
297 | */ | |
298 | __cpuc_flush_dcache_area(ptr, size); | |
1da177e4 | 299 | } |
dd3641fc | 300 | free_safe_buffer(dev->archdata.dmabounce, buf); |
1da177e4 LT |
301 | } |
302 | ||
303 | /* ************************************************** */ | |
304 | ||
305 | /* | |
306 | * see if a buffer address is in an 'unsafe' range. if it is | |
307 | * allocate a 'safe' buffer and copy the unsafe buffer into it. | |
308 | * substitute the safe buffer for the unsafe one. | |
309 | * (basically move the buffer from an unsafe area to a safe one) | |
310 | */ | |
15237e1f MS |
311 | static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, |
312 | unsigned long offset, size_t size, enum dma_data_direction dir, | |
313 | struct dma_attrs *attrs) | |
56f55f8b | 314 | { |
dd3641fc RK |
315 | dma_addr_t dma_addr; |
316 | int ret; | |
317 | ||
56f55f8b RK |
318 | dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", |
319 | __func__, page, offset, size, dir); | |
320 | ||
dd3641fc RK |
321 | dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset; |
322 | ||
323 | ret = needs_bounce(dev, dma_addr, size); | |
324 | if (ret < 0) | |
553ac788 | 325 | return DMA_ERROR_CODE; |
dd3641fc RK |
326 | |
327 | if (ret == 0) { | |
15237e1f | 328 | arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir); |
dd3641fc RK |
329 | return dma_addr; |
330 | } | |
331 | ||
58edb515 | 332 | if (PageHighMem(page)) { |
dd3641fc | 333 | dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); |
553ac788 | 334 | return DMA_ERROR_CODE; |
58edb515 NP |
335 | } |
336 | ||
56f55f8b RK |
337 | return map_single(dev, page_address(page) + offset, size, dir); |
338 | } | |
56f55f8b | 339 | |
1da177e4 LT |
340 | /* |
341 | * see if a mapped address was really a "safe" buffer and if so, copy | |
342 | * the data from the safe buffer back to the unsafe buffer and free up | |
343 | * the safe buffer. (basically return things back to the way they | |
344 | * should be) | |
345 | */ | |
15237e1f MS |
346 | static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, |
347 | enum dma_data_direction dir, struct dma_attrs *attrs) | |
1da177e4 | 348 | { |
dd3641fc RK |
349 | struct safe_buffer *buf; |
350 | ||
c289b2e0 RK |
351 | dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n", |
352 | __func__, dma_addr, size, dir); | |
1da177e4 | 353 | |
dd3641fc RK |
354 | buf = find_safe_buffer_dev(dev, dma_addr, __func__); |
355 | if (!buf) { | |
15237e1f | 356 | arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir); |
dd3641fc RK |
357 | return; |
358 | } | |
359 | ||
360 | unmap_single(dev, buf, size, dir); | |
1da177e4 LT |
361 | } |
362 | ||
15237e1f | 363 | static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, |
a227fb92 | 364 | size_t sz, enum dma_data_direction dir) |
1da177e4 | 365 | { |
125ab12a | 366 | struct safe_buffer *buf; |
a227fb92 | 367 | unsigned long off; |
125ab12a | 368 | |
fdb11173 MS |
369 | dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", |
370 | __func__, addr, sz, dir); | |
125ab12a RK |
371 | |
372 | buf = find_safe_buffer_dev(dev, addr, __func__); | |
373 | if (!buf) | |
374 | return 1; | |
375 | ||
a227fb92 MS |
376 | off = addr - buf->safe_dma_addr; |
377 | ||
0e18b5d7 RK |
378 | BUG_ON(buf->direction != dir); |
379 | ||
fdb11173 MS |
380 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", |
381 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, | |
125ab12a RK |
382 | buf->safe, buf->safe_dma_addr); |
383 | ||
384 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | |
385 | ||
386 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { | |
387 | dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", | |
388 | __func__, buf->safe + off, buf->ptr + off, sz); | |
389 | memcpy(buf->ptr + off, buf->safe + off, sz); | |
390 | } | |
391 | return 0; | |
1da177e4 LT |
392 | } |
393 | ||
15237e1f MS |
394 | static void dmabounce_sync_for_cpu(struct device *dev, |
395 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | |
396 | { | |
397 | if (!__dmabounce_sync_for_cpu(dev, handle, size, dir)) | |
398 | return; | |
399 | ||
400 | arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir); | |
401 | } | |
402 | ||
403 | static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | |
a227fb92 | 404 | size_t sz, enum dma_data_direction dir) |
1da177e4 | 405 | { |
125ab12a | 406 | struct safe_buffer *buf; |
a227fb92 | 407 | unsigned long off; |
125ab12a | 408 | |
fdb11173 MS |
409 | dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", |
410 | __func__, addr, sz, dir); | |
125ab12a RK |
411 | |
412 | buf = find_safe_buffer_dev(dev, addr, __func__); | |
413 | if (!buf) | |
414 | return 1; | |
415 | ||
a227fb92 MS |
416 | off = addr - buf->safe_dma_addr; |
417 | ||
0e18b5d7 RK |
418 | BUG_ON(buf->direction != dir); |
419 | ||
fdb11173 MS |
420 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", |
421 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, | |
125ab12a RK |
422 | buf->safe, buf->safe_dma_addr); |
423 | ||
424 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | |
425 | ||
426 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { | |
427 | dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n", | |
428 | __func__,buf->ptr + off, buf->safe + off, sz); | |
429 | memcpy(buf->safe + off, buf->ptr + off, sz); | |
430 | } | |
431 | return 0; | |
1da177e4 | 432 | } |
15237e1f MS |
433 | |
434 | static void dmabounce_sync_for_device(struct device *dev, | |
435 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | |
436 | { | |
437 | if (!__dmabounce_sync_for_device(dev, handle, size, dir)) | |
438 | return; | |
439 | ||
440 | arm_dma_ops.sync_single_for_device(dev, handle, size, dir); | |
441 | } | |
442 | ||
443 | static int dmabounce_set_mask(struct device *dev, u64 dma_mask) | |
444 | { | |
445 | if (dev->archdata.dmabounce) | |
446 | return 0; | |
447 | ||
448 | return arm_dma_ops.set_dma_mask(dev, dma_mask); | |
449 | } | |
450 | ||
451 | static struct dma_map_ops dmabounce_ops = { | |
f99d6034 MS |
452 | .alloc = arm_dma_alloc, |
453 | .free = arm_dma_free, | |
454 | .mmap = arm_dma_mmap, | |
dc2832e1 | 455 | .get_sgtable = arm_dma_get_sgtable, |
15237e1f MS |
456 | .map_page = dmabounce_map_page, |
457 | .unmap_page = dmabounce_unmap_page, | |
458 | .sync_single_for_cpu = dmabounce_sync_for_cpu, | |
459 | .sync_single_for_device = dmabounce_sync_for_device, | |
460 | .map_sg = arm_dma_map_sg, | |
461 | .unmap_sg = arm_dma_unmap_sg, | |
462 | .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, | |
463 | .sync_sg_for_device = arm_dma_sync_sg_for_device, | |
464 | .set_dma_mask = dmabounce_set_mask, | |
465 | }; | |
1da177e4 | 466 | |
3216a97b RK |
467 | static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, |
468 | const char *name, unsigned long size) | |
cb7610d0 RK |
469 | { |
470 | pool->size = size; | |
471 | DO_STATS(pool->allocs = 0); | |
472 | pool->pool = dma_pool_create(name, dev, size, | |
473 | 0 /* byte alignment */, | |
474 | 0 /* no page-crossing issues */); | |
475 | ||
476 | return pool->pool ? 0 : -ENOMEM; | |
477 | } | |
478 | ||
3216a97b | 479 | int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, |
0703ed2a RK |
480 | unsigned long large_buffer_size, |
481 | int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t)) | |
1da177e4 LT |
482 | { |
483 | struct dmabounce_device_info *device_info; | |
cb7610d0 | 484 | int ret; |
1da177e4 LT |
485 | |
486 | device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); | |
487 | if (!device_info) { | |
fc3a8828 GKH |
488 | dev_err(dev, |
489 | "Could not allocated dmabounce_device_info\n"); | |
1da177e4 LT |
490 | return -ENOMEM; |
491 | } | |
492 | ||
cb7610d0 RK |
493 | ret = dmabounce_init_pool(&device_info->small, dev, |
494 | "small_dmabounce_pool", small_buffer_size); | |
495 | if (ret) { | |
496 | dev_err(dev, | |
497 | "dmabounce: could not allocate DMA pool for %ld byte objects\n", | |
498 | small_buffer_size); | |
499 | goto err_free; | |
1da177e4 LT |
500 | } |
501 | ||
502 | if (large_buffer_size) { | |
cb7610d0 RK |
503 | ret = dmabounce_init_pool(&device_info->large, dev, |
504 | "large_dmabounce_pool", | |
505 | large_buffer_size); | |
506 | if (ret) { | |
507 | dev_err(dev, | |
508 | "dmabounce: could not allocate DMA pool for %ld byte objects\n", | |
509 | large_buffer_size); | |
510 | goto err_destroy; | |
1da177e4 LT |
511 | } |
512 | } | |
513 | ||
514 | device_info->dev = dev; | |
1da177e4 | 515 | INIT_LIST_HEAD(&device_info->safe_buffers); |
823588c1 | 516 | rwlock_init(&device_info->lock); |
0703ed2a | 517 | device_info->needs_bounce = needs_bounce_fn; |
1da177e4 LT |
518 | |
519 | #ifdef STATS | |
1da177e4 LT |
520 | device_info->total_allocs = 0; |
521 | device_info->map_op_count = 0; | |
522 | device_info->bounce_count = 0; | |
017cc022 | 523 | device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats); |
1da177e4 LT |
524 | #endif |
525 | ||
ab2c2152 | 526 | dev->archdata.dmabounce = device_info; |
15237e1f | 527 | set_dma_ops(dev, &dmabounce_ops); |
1da177e4 | 528 | |
fc3a8828 | 529 | dev_info(dev, "dmabounce: registered device\n"); |
1da177e4 LT |
530 | |
531 | return 0; | |
cb7610d0 RK |
532 | |
533 | err_destroy: | |
534 | dma_pool_destroy(device_info->small.pool); | |
535 | err_free: | |
536 | kfree(device_info); | |
537 | return ret; | |
1da177e4 | 538 | } |
3216a97b | 539 | EXPORT_SYMBOL(dmabounce_register_dev); |
1da177e4 | 540 | |
3216a97b | 541 | void dmabounce_unregister_dev(struct device *dev) |
1da177e4 | 542 | { |
ab2c2152 RK |
543 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
544 | ||
545 | dev->archdata.dmabounce = NULL; | |
15237e1f | 546 | set_dma_ops(dev, NULL); |
1da177e4 LT |
547 | |
548 | if (!device_info) { | |
fc3a8828 GKH |
549 | dev_warn(dev, |
550 | "Never registered with dmabounce but attempting" | |
551 | "to unregister!\n"); | |
1da177e4 LT |
552 | return; |
553 | } | |
554 | ||
555 | if (!list_empty(&device_info->safe_buffers)) { | |
fc3a8828 GKH |
556 | dev_err(dev, |
557 | "Removing from dmabounce with pending buffers!\n"); | |
1da177e4 LT |
558 | BUG(); |
559 | } | |
560 | ||
cb7610d0 RK |
561 | if (device_info->small.pool) |
562 | dma_pool_destroy(device_info->small.pool); | |
563 | if (device_info->large.pool) | |
564 | dma_pool_destroy(device_info->large.pool); | |
1da177e4 LT |
565 | |
566 | #ifdef STATS | |
017cc022 RK |
567 | if (device_info->attr_res == 0) |
568 | device_remove_file(dev, &dev_attr_dmabounce_stats); | |
1da177e4 LT |
569 | #endif |
570 | ||
1da177e4 LT |
571 | kfree(device_info); |
572 | ||
fc3a8828 | 573 | dev_info(dev, "dmabounce: device unregistered\n"); |
1da177e4 | 574 | } |
1da177e4 LT |
575 | EXPORT_SYMBOL(dmabounce_unregister_dev); |
576 | ||
577 | MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"); | |
578 | MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows"); | |
579 | MODULE_LICENSE("GPL"); |