Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/arm/common/dmabounce.c | |
3 | * | |
4 | * Special dma_{map/unmap/dma_sync}_* routines for systems that have | |
5 | * limited DMA windows. These functions utilize bounce buffers to | |
6 | * copy data to/from buffers located outside the DMA region. This | |
7 | * only works for systems in which DMA memory is at the bottom of | |
8 | * RAM and the remainder of memory is at the top an the DMA memory | |
9 | * can be marked as ZONE_DMA. Anything beyond that such as discontigous | |
10 | * DMA windows will require custom implementations that reserve memory | |
11 | * areas at early bootup. | |
12 | * | |
13 | * Original version by Brad Parker (brad@heeltoe.com) | |
14 | * Re-written by Christopher Hoover <ch@murgatroid.com> | |
15 | * Made generic by Deepak Saxena <dsaxena@plexity.net> | |
16 | * | |
17 | * Copyright (C) 2002 Hewlett Packard Company. | |
18 | * Copyright (C) 2004 MontaVista Software, Inc. | |
19 | * | |
20 | * This program is free software; you can redistribute it and/or | |
21 | * modify it under the terms of the GNU General Public License | |
22 | * version 2 as published by the Free Software Foundation. | |
23 | */ | |
24 | ||
25 | #include <linux/module.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/device.h> | |
29 | #include <linux/dma-mapping.h> | |
30 | #include <linux/dmapool.h> | |
31 | #include <linux/list.h> | |
32 | ||
14eb75b6 RK |
33 | #include <asm/cacheflush.h> |
34 | ||
1da177e4 | 35 | #undef DEBUG |
1da177e4 | 36 | #undef STATS |
cb7610d0 | 37 | |
1da177e4 LT |
38 | #ifdef STATS |
39 | #define DO_STATS(X) do { X ; } while (0) | |
40 | #else | |
41 | #define DO_STATS(X) do { } while (0) | |
42 | #endif | |
43 | ||
44 | /* ************************************************** */ | |
45 | ||
46 | struct safe_buffer { | |
47 | struct list_head node; | |
48 | ||
49 | /* original request */ | |
50 | void *ptr; | |
51 | size_t size; | |
52 | int direction; | |
53 | ||
54 | /* safe buffer info */ | |
cb7610d0 | 55 | struct dmabounce_pool *pool; |
1da177e4 LT |
56 | void *safe; |
57 | dma_addr_t safe_dma_addr; | |
58 | }; | |
59 | ||
cb7610d0 RK |
60 | struct dmabounce_pool { |
61 | unsigned long size; | |
62 | struct dma_pool *pool; | |
63 | #ifdef STATS | |
64 | unsigned long allocs; | |
65 | #endif | |
66 | }; | |
67 | ||
1da177e4 LT |
68 | struct dmabounce_device_info { |
69 | struct list_head node; | |
70 | ||
71 | struct device *dev; | |
1da177e4 | 72 | struct list_head safe_buffers; |
1da177e4 | 73 | #ifdef STATS |
1da177e4 LT |
74 | unsigned long total_allocs; |
75 | unsigned long map_op_count; | |
76 | unsigned long bounce_count; | |
77 | #endif | |
cb7610d0 RK |
78 | struct dmabounce_pool small; |
79 | struct dmabounce_pool large; | |
1da177e4 LT |
80 | }; |
81 | ||
82 | static LIST_HEAD(dmabounce_devs); | |
83 | ||
84 | #ifdef STATS | |
85 | static void print_alloc_stats(struct dmabounce_device_info *device_info) | |
86 | { | |
87 | printk(KERN_INFO | |
88 | "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", | |
89 | device_info->dev->bus_id, | |
cb7610d0 RK |
90 | device_info->small.allocs, device_info->large.allocs, |
91 | device_info->total_allocs - device_info->small.allocs - | |
92 | device_info->large.allocs, | |
1da177e4 LT |
93 | device_info->total_allocs); |
94 | } | |
95 | #endif | |
96 | ||
97 | /* find the given device in the dmabounce device list */ | |
98 | static inline struct dmabounce_device_info * | |
99 | find_dmabounce_dev(struct device *dev) | |
100 | { | |
b46a58fd | 101 | struct dmabounce_device_info *d; |
1da177e4 | 102 | |
b46a58fd | 103 | list_for_each_entry(d, &dmabounce_devs, node) |
1da177e4 LT |
104 | if (d->dev == dev) |
105 | return d; | |
b46a58fd | 106 | |
1da177e4 LT |
107 | return NULL; |
108 | } | |
109 | ||
110 | ||
111 | /* allocate a 'safe' buffer and keep track of it */ | |
112 | static inline struct safe_buffer * | |
113 | alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, | |
cb7610d0 | 114 | size_t size, enum dma_data_direction dir) |
1da177e4 LT |
115 | { |
116 | struct safe_buffer *buf; | |
cb7610d0 | 117 | struct dmabounce_pool *pool; |
1da177e4 | 118 | struct device *dev = device_info->dev; |
1da177e4 LT |
119 | |
120 | dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", | |
121 | __func__, ptr, size, dir); | |
122 | ||
cb7610d0 RK |
123 | if (size <= device_info->small.size) { |
124 | pool = &device_info->small; | |
125 | } else if (size <= device_info->large.size) { | |
126 | pool = &device_info->large; | |
127 | } else { | |
128 | pool = NULL; | |
129 | } | |
1da177e4 LT |
130 | |
131 | buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); | |
132 | if (buf == NULL) { | |
133 | dev_warn(dev, "%s: kmalloc failed\n", __func__); | |
134 | return NULL; | |
135 | } | |
136 | ||
cb7610d0 RK |
137 | buf->ptr = ptr; |
138 | buf->size = size; | |
139 | buf->direction = dir; | |
140 | buf->pool = pool; | |
1da177e4 | 141 | |
cb7610d0 RK |
142 | if (pool) { |
143 | buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, | |
144 | &buf->safe_dma_addr); | |
1da177e4 | 145 | } else { |
cb7610d0 RK |
146 | buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr, |
147 | GFP_ATOMIC); | |
1da177e4 LT |
148 | } |
149 | ||
cb7610d0 RK |
150 | if (buf->safe == NULL) { |
151 | dev_warn(dev, | |
152 | "%s: could not alloc dma memory (size=%d)\n", | |
153 | __func__, size); | |
1da177e4 LT |
154 | kfree(buf); |
155 | return NULL; | |
156 | } | |
157 | ||
158 | #ifdef STATS | |
cb7610d0 RK |
159 | if (pool) |
160 | pool->allocs++; | |
161 | device_info->total_allocs++; | |
1da177e4 LT |
162 | if (device_info->total_allocs % 1000 == 0) |
163 | print_alloc_stats(device_info); | |
164 | #endif | |
165 | ||
1da177e4 LT |
166 | list_add(&buf->node, &device_info->safe_buffers); |
167 | ||
168 | return buf; | |
169 | } | |
170 | ||
171 | /* determine if a buffer is from our "safe" pool */ | |
172 | static inline struct safe_buffer * | |
173 | find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) | |
174 | { | |
b46a58fd | 175 | struct safe_buffer *b; |
1da177e4 | 176 | |
b46a58fd | 177 | list_for_each_entry(b, &device_info->safe_buffers, node) |
1da177e4 LT |
178 | if (b->safe_dma_addr == safe_dma_addr) |
179 | return b; | |
1da177e4 LT |
180 | |
181 | return NULL; | |
182 | } | |
183 | ||
184 | static inline void | |
185 | free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf) | |
186 | { | |
187 | dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); | |
188 | ||
189 | list_del(&buf->node); | |
190 | ||
191 | if (buf->pool) | |
cb7610d0 | 192 | dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); |
1da177e4 LT |
193 | else |
194 | dma_free_coherent(device_info->dev, buf->size, buf->safe, | |
195 | buf->safe_dma_addr); | |
196 | ||
197 | kfree(buf); | |
198 | } | |
199 | ||
200 | /* ************************************************** */ | |
201 | ||
202 | #ifdef STATS | |
1da177e4 LT |
203 | static void print_map_stats(struct dmabounce_device_info *device_info) |
204 | { | |
cb7610d0 RK |
205 | dev_info(device_info->dev, |
206 | "dmabounce: map_op_count=%lu, bounce_count=%lu\n", | |
1da177e4 LT |
207 | device_info->map_op_count, device_info->bounce_count); |
208 | } | |
209 | #endif | |
210 | ||
211 | static inline dma_addr_t | |
212 | map_single(struct device *dev, void *ptr, size_t size, | |
213 | enum dma_data_direction dir) | |
214 | { | |
215 | struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); | |
216 | dma_addr_t dma_addr; | |
217 | int needs_bounce = 0; | |
218 | ||
219 | if (device_info) | |
220 | DO_STATS ( device_info->map_op_count++ ); | |
221 | ||
222 | dma_addr = virt_to_dma(dev, ptr); | |
223 | ||
224 | if (dev->dma_mask) { | |
225 | unsigned long mask = *dev->dma_mask; | |
226 | unsigned long limit; | |
227 | ||
228 | limit = (mask + 1) & ~mask; | |
229 | if (limit && size > limit) { | |
230 | dev_err(dev, "DMA mapping too big (requested %#x " | |
231 | "mask %#Lx)\n", size, *dev->dma_mask); | |
232 | return ~0; | |
233 | } | |
234 | ||
235 | /* | |
236 | * Figure out if we need to bounce from the DMA mask. | |
237 | */ | |
238 | needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; | |
239 | } | |
240 | ||
241 | if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { | |
242 | struct safe_buffer *buf; | |
243 | ||
244 | buf = alloc_safe_buffer(device_info, ptr, size, dir); | |
245 | if (buf == 0) { | |
246 | dev_err(dev, "%s: unable to map unsafe buffer %p!\n", | |
247 | __func__, ptr); | |
248 | return 0; | |
249 | } | |
250 | ||
251 | dev_dbg(dev, | |
252 | "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", | |
253 | __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), | |
254 | buf->safe, (void *) buf->safe_dma_addr); | |
255 | ||
256 | if ((dir == DMA_TO_DEVICE) || | |
257 | (dir == DMA_BIDIRECTIONAL)) { | |
258 | dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", | |
259 | __func__, ptr, buf->safe, size); | |
260 | memcpy(buf->safe, ptr, size); | |
261 | } | |
cb7610d0 | 262 | ptr = buf->safe; |
1da177e4 LT |
263 | |
264 | dma_addr = buf->safe_dma_addr; | |
1da177e4 LT |
265 | } |
266 | ||
cb7610d0 RK |
267 | consistent_sync(ptr, size, dir); |
268 | ||
1da177e4 LT |
269 | return dma_addr; |
270 | } | |
271 | ||
272 | static inline void | |
273 | unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
274 | enum dma_data_direction dir) | |
275 | { | |
276 | struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); | |
277 | struct safe_buffer *buf = NULL; | |
278 | ||
279 | /* | |
280 | * Trying to unmap an invalid mapping | |
281 | */ | |
cb7610d0 | 282 | if (dma_mapping_error(dma_addr)) { |
1da177e4 LT |
283 | dev_err(dev, "Trying to unmap invalid mapping\n"); |
284 | return; | |
285 | } | |
286 | ||
287 | if (device_info) | |
288 | buf = find_safe_buffer(device_info, dma_addr); | |
289 | ||
290 | if (buf) { | |
291 | BUG_ON(buf->size != size); | |
292 | ||
293 | dev_dbg(dev, | |
294 | "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", | |
295 | __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), | |
296 | buf->safe, (void *) buf->safe_dma_addr); | |
297 | ||
1da177e4 LT |
298 | DO_STATS ( device_info->bounce_count++ ); |
299 | ||
5abc100e RK |
300 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { |
301 | unsigned long ptr; | |
302 | ||
1da177e4 LT |
303 | dev_dbg(dev, |
304 | "%s: copy back safe %p to unsafe %p size %d\n", | |
305 | __func__, buf->safe, buf->ptr, size); | |
306 | memcpy(buf->ptr, buf->safe, size); | |
5abc100e RK |
307 | |
308 | /* | |
309 | * DMA buffers must have the same cache properties | |
310 | * as if they were really used for DMA - which means | |
311 | * data must be written back to RAM. Note that | |
312 | * we don't use dmac_flush_range() here for the | |
313 | * bidirectional case because we know the cache | |
314 | * lines will be coherent with the data written. | |
315 | */ | |
316 | ptr = (unsigned long)buf->ptr; | |
317 | dmac_clean_range(ptr, ptr + size); | |
1da177e4 LT |
318 | } |
319 | free_safe_buffer(device_info, buf); | |
320 | } | |
321 | } | |
322 | ||
323 | static inline void | |
324 | sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
325 | enum dma_data_direction dir) | |
326 | { | |
327 | struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); | |
328 | struct safe_buffer *buf = NULL; | |
329 | ||
330 | if (device_info) | |
331 | buf = find_safe_buffer(device_info, dma_addr); | |
332 | ||
333 | if (buf) { | |
334 | /* | |
335 | * Both of these checks from original code need to be | |
336 | * commented out b/c some drivers rely on the following: | |
337 | * | |
338 | * 1) Drivers may map a large chunk of memory into DMA space | |
339 | * but only sync a small portion of it. Good example is | |
340 | * allocating a large buffer, mapping it, and then | |
341 | * breaking it up into small descriptors. No point | |
342 | * in syncing the whole buffer if you only have to | |
343 | * touch one descriptor. | |
344 | * | |
345 | * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are | |
346 | * usually only synced in one dir at a time. | |
347 | * | |
348 | * See drivers/net/eepro100.c for examples of both cases. | |
349 | * | |
350 | * -ds | |
351 | * | |
352 | * BUG_ON(buf->size != size); | |
353 | * BUG_ON(buf->direction != dir); | |
354 | */ | |
355 | ||
356 | dev_dbg(dev, | |
357 | "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", | |
358 | __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), | |
359 | buf->safe, (void *) buf->safe_dma_addr); | |
360 | ||
361 | DO_STATS ( device_info->bounce_count++ ); | |
362 | ||
363 | switch (dir) { | |
364 | case DMA_FROM_DEVICE: | |
365 | dev_dbg(dev, | |
366 | "%s: copy back safe %p to unsafe %p size %d\n", | |
367 | __func__, buf->safe, buf->ptr, size); | |
368 | memcpy(buf->ptr, buf->safe, size); | |
369 | break; | |
370 | case DMA_TO_DEVICE: | |
371 | dev_dbg(dev, | |
372 | "%s: copy out unsafe %p to safe %p, size %d\n", | |
373 | __func__,buf->ptr, buf->safe, size); | |
374 | memcpy(buf->safe, buf->ptr, size); | |
375 | break; | |
376 | case DMA_BIDIRECTIONAL: | |
377 | BUG(); /* is this allowed? what does it mean? */ | |
378 | default: | |
379 | BUG(); | |
380 | } | |
381 | consistent_sync(buf->safe, size, dir); | |
382 | } else { | |
383 | consistent_sync(dma_to_virt(dev, dma_addr), size, dir); | |
384 | } | |
385 | } | |
386 | ||
387 | /* ************************************************** */ | |
388 | ||
389 | /* | |
390 | * see if a buffer address is in an 'unsafe' range. if it is | |
391 | * allocate a 'safe' buffer and copy the unsafe buffer into it. | |
392 | * substitute the safe buffer for the unsafe one. | |
393 | * (basically move the buffer from an unsafe area to a safe one) | |
394 | */ | |
395 | dma_addr_t | |
396 | dma_map_single(struct device *dev, void *ptr, size_t size, | |
397 | enum dma_data_direction dir) | |
398 | { | |
399 | unsigned long flags; | |
400 | dma_addr_t dma_addr; | |
401 | ||
402 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | |
403 | __func__, ptr, size, dir); | |
404 | ||
405 | BUG_ON(dir == DMA_NONE); | |
406 | ||
407 | local_irq_save(flags); | |
408 | ||
409 | dma_addr = map_single(dev, ptr, size, dir); | |
410 | ||
411 | local_irq_restore(flags); | |
412 | ||
413 | return dma_addr; | |
414 | } | |
415 | ||
416 | /* | |
417 | * see if a mapped address was really a "safe" buffer and if so, copy | |
418 | * the data from the safe buffer back to the unsafe buffer and free up | |
419 | * the safe buffer. (basically return things back to the way they | |
420 | * should be) | |
421 | */ | |
422 | ||
423 | void | |
424 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
425 | enum dma_data_direction dir) | |
426 | { | |
427 | unsigned long flags; | |
428 | ||
429 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | |
430 | __func__, (void *) dma_addr, size, dir); | |
431 | ||
432 | BUG_ON(dir == DMA_NONE); | |
433 | ||
434 | local_irq_save(flags); | |
435 | ||
436 | unmap_single(dev, dma_addr, size, dir); | |
437 | ||
438 | local_irq_restore(flags); | |
439 | } | |
440 | ||
441 | int | |
442 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
443 | enum dma_data_direction dir) | |
444 | { | |
445 | unsigned long flags; | |
446 | int i; | |
447 | ||
448 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
449 | __func__, sg, nents, dir); | |
450 | ||
451 | BUG_ON(dir == DMA_NONE); | |
452 | ||
453 | local_irq_save(flags); | |
454 | ||
455 | for (i = 0; i < nents; i++, sg++) { | |
456 | struct page *page = sg->page; | |
457 | unsigned int offset = sg->offset; | |
458 | unsigned int length = sg->length; | |
459 | void *ptr = page_address(page) + offset; | |
460 | ||
461 | sg->dma_address = | |
462 | map_single(dev, ptr, length, dir); | |
463 | } | |
464 | ||
465 | local_irq_restore(flags); | |
466 | ||
467 | return nents; | |
468 | } | |
469 | ||
470 | void | |
471 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
472 | enum dma_data_direction dir) | |
473 | { | |
474 | unsigned long flags; | |
475 | int i; | |
476 | ||
477 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
478 | __func__, sg, nents, dir); | |
479 | ||
480 | BUG_ON(dir == DMA_NONE); | |
481 | ||
482 | local_irq_save(flags); | |
483 | ||
484 | for (i = 0; i < nents; i++, sg++) { | |
485 | dma_addr_t dma_addr = sg->dma_address; | |
486 | unsigned int length = sg->length; | |
487 | ||
488 | unmap_single(dev, dma_addr, length, dir); | |
489 | } | |
490 | ||
491 | local_irq_restore(flags); | |
492 | } | |
493 | ||
494 | void | |
495 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size, | |
496 | enum dma_data_direction dir) | |
497 | { | |
498 | unsigned long flags; | |
499 | ||
500 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | |
501 | __func__, (void *) dma_addr, size, dir); | |
502 | ||
503 | local_irq_save(flags); | |
504 | ||
505 | sync_single(dev, dma_addr, size, dir); | |
506 | ||
507 | local_irq_restore(flags); | |
508 | } | |
509 | ||
510 | void | |
511 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size, | |
512 | enum dma_data_direction dir) | |
513 | { | |
514 | unsigned long flags; | |
515 | ||
516 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | |
517 | __func__, (void *) dma_addr, size, dir); | |
518 | ||
519 | local_irq_save(flags); | |
520 | ||
521 | sync_single(dev, dma_addr, size, dir); | |
522 | ||
523 | local_irq_restore(flags); | |
524 | } | |
525 | ||
526 | void | |
527 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | |
528 | enum dma_data_direction dir) | |
529 | { | |
530 | unsigned long flags; | |
531 | int i; | |
532 | ||
533 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
534 | __func__, sg, nents, dir); | |
535 | ||
536 | BUG_ON(dir == DMA_NONE); | |
537 | ||
538 | local_irq_save(flags); | |
539 | ||
540 | for (i = 0; i < nents; i++, sg++) { | |
541 | dma_addr_t dma_addr = sg->dma_address; | |
542 | unsigned int length = sg->length; | |
543 | ||
544 | sync_single(dev, dma_addr, length, dir); | |
545 | } | |
546 | ||
547 | local_irq_restore(flags); | |
548 | } | |
549 | ||
550 | void | |
551 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | |
552 | enum dma_data_direction dir) | |
553 | { | |
554 | unsigned long flags; | |
555 | int i; | |
556 | ||
557 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
558 | __func__, sg, nents, dir); | |
559 | ||
560 | BUG_ON(dir == DMA_NONE); | |
561 | ||
562 | local_irq_save(flags); | |
563 | ||
564 | for (i = 0; i < nents; i++, sg++) { | |
565 | dma_addr_t dma_addr = sg->dma_address; | |
566 | unsigned int length = sg->length; | |
567 | ||
568 | sync_single(dev, dma_addr, length, dir); | |
569 | } | |
570 | ||
571 | local_irq_restore(flags); | |
572 | } | |
573 | ||
cb7610d0 RK |
574 | static int |
575 | dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name, | |
576 | unsigned long size) | |
577 | { | |
578 | pool->size = size; | |
579 | DO_STATS(pool->allocs = 0); | |
580 | pool->pool = dma_pool_create(name, dev, size, | |
581 | 0 /* byte alignment */, | |
582 | 0 /* no page-crossing issues */); | |
583 | ||
584 | return pool->pool ? 0 : -ENOMEM; | |
585 | } | |
586 | ||
1da177e4 LT |
587 | int |
588 | dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | |
589 | unsigned long large_buffer_size) | |
590 | { | |
591 | struct dmabounce_device_info *device_info; | |
cb7610d0 | 592 | int ret; |
1da177e4 LT |
593 | |
594 | device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); | |
595 | if (!device_info) { | |
596 | printk(KERN_ERR | |
597 | "Could not allocated dmabounce_device_info for %s", | |
598 | dev->bus_id); | |
599 | return -ENOMEM; | |
600 | } | |
601 | ||
cb7610d0 RK |
602 | ret = dmabounce_init_pool(&device_info->small, dev, |
603 | "small_dmabounce_pool", small_buffer_size); | |
604 | if (ret) { | |
605 | dev_err(dev, | |
606 | "dmabounce: could not allocate DMA pool for %ld byte objects\n", | |
607 | small_buffer_size); | |
608 | goto err_free; | |
1da177e4 LT |
609 | } |
610 | ||
611 | if (large_buffer_size) { | |
cb7610d0 RK |
612 | ret = dmabounce_init_pool(&device_info->large, dev, |
613 | "large_dmabounce_pool", | |
614 | large_buffer_size); | |
615 | if (ret) { | |
616 | dev_err(dev, | |
617 | "dmabounce: could not allocate DMA pool for %ld byte objects\n", | |
618 | large_buffer_size); | |
619 | goto err_destroy; | |
1da177e4 LT |
620 | } |
621 | } | |
622 | ||
623 | device_info->dev = dev; | |
1da177e4 LT |
624 | INIT_LIST_HEAD(&device_info->safe_buffers); |
625 | ||
626 | #ifdef STATS | |
1da177e4 LT |
627 | device_info->total_allocs = 0; |
628 | device_info->map_op_count = 0; | |
629 | device_info->bounce_count = 0; | |
630 | #endif | |
631 | ||
632 | list_add(&device_info->node, &dmabounce_devs); | |
633 | ||
634 | printk(KERN_INFO "dmabounce: registered device %s on %s bus\n", | |
635 | dev->bus_id, dev->bus->name); | |
636 | ||
637 | return 0; | |
cb7610d0 RK |
638 | |
639 | err_destroy: | |
640 | dma_pool_destroy(device_info->small.pool); | |
641 | err_free: | |
642 | kfree(device_info); | |
643 | return ret; | |
1da177e4 LT |
644 | } |
645 | ||
646 | void | |
647 | dmabounce_unregister_dev(struct device *dev) | |
648 | { | |
649 | struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); | |
650 | ||
651 | if (!device_info) { | |
652 | printk(KERN_WARNING | |
653 | "%s: Never registered with dmabounce but attempting" \ | |
654 | "to unregister!\n", dev->bus_id); | |
655 | return; | |
656 | } | |
657 | ||
658 | if (!list_empty(&device_info->safe_buffers)) { | |
659 | printk(KERN_ERR | |
660 | "%s: Removing from dmabounce with pending buffers!\n", | |
661 | dev->bus_id); | |
662 | BUG(); | |
663 | } | |
664 | ||
cb7610d0 RK |
665 | if (device_info->small.pool) |
666 | dma_pool_destroy(device_info->small.pool); | |
667 | if (device_info->large.pool) | |
668 | dma_pool_destroy(device_info->large.pool); | |
1da177e4 LT |
669 | |
670 | #ifdef STATS | |
671 | print_alloc_stats(device_info); | |
672 | print_map_stats(device_info); | |
673 | #endif | |
674 | ||
675 | list_del(&device_info->node); | |
676 | ||
677 | kfree(device_info); | |
678 | ||
679 | printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n", | |
680 | dev->bus_id, dev->bus->name); | |
681 | } | |
682 | ||
683 | ||
684 | EXPORT_SYMBOL(dma_map_single); | |
685 | EXPORT_SYMBOL(dma_unmap_single); | |
686 | EXPORT_SYMBOL(dma_map_sg); | |
687 | EXPORT_SYMBOL(dma_unmap_sg); | |
688 | EXPORT_SYMBOL(dma_sync_single); | |
689 | EXPORT_SYMBOL(dma_sync_sg); | |
690 | EXPORT_SYMBOL(dmabounce_register_dev); | |
691 | EXPORT_SYMBOL(dmabounce_unregister_dev); | |
692 | ||
693 | MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"); | |
694 | MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows"); | |
695 | MODULE_LICENSE("GPL"); |