2 * arch/arm/common/dmabounce.c
4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 * limited DMA windows. These functions utilize bounce buffers to
6 * copy data to/from buffers located outside the DMA region. This
7 * only works for systems in which DMA memory is at the bottom of
8 * RAM and the remainder of memory is at the top an the DMA memory
9 * can be marked as ZONE_DMA. Anything beyond that such as discontigous
10 * DMA windows will require custom implementations that reserve memory
11 * areas at early bootup.
13 * Original version by Brad Parker (brad@heeltoe.com)
14 * Re-written by Christopher Hoover <ch@murgatroid.com>
15 * Made generic by Deepak Saxena <dsaxena@plexity.net>
17 * Copyright (C) 2002 Hewlett Packard Company.
18 * Copyright (C) 2004 MontaVista Software, Inc.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/device.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/dmapool.h>
31 #include <linux/list.h>
33 #include <asm/cacheflush.h>
39 #define DO_STATS(X) do { X ; } while (0)
41 #define DO_STATS(X) do { } while (0)
44 /* ************************************************** */
47 struct list_head node
;
49 /* original request */
54 /* safe buffer info */
55 struct dma_pool
*pool
;
57 dma_addr_t safe_dma_addr
;
60 struct dmabounce_device_info
{
61 struct list_head node
;
64 struct dma_pool
*small_buffer_pool
;
65 struct dma_pool
*large_buffer_pool
;
66 struct list_head safe_buffers
;
67 unsigned long small_buffer_size
, large_buffer_size
;
69 unsigned long sbp_allocs
;
70 unsigned long lbp_allocs
;
71 unsigned long total_allocs
;
72 unsigned long map_op_count
;
73 unsigned long bounce_count
;
77 static LIST_HEAD(dmabounce_devs
);
80 static void print_alloc_stats(struct dmabounce_device_info
*device_info
)
83 "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n",
84 device_info
->dev
->bus_id
,
85 device_info
->sbp_allocs
, device_info
->lbp_allocs
,
86 device_info
->total_allocs
- device_info
->sbp_allocs
-
87 device_info
->lbp_allocs
,
88 device_info
->total_allocs
);
92 /* find the given device in the dmabounce device list */
93 static inline struct dmabounce_device_info
*
94 find_dmabounce_dev(struct device
*dev
)
96 struct dmabounce_device_info
*d
;
98 list_for_each_entry(d
, &dmabounce_devs
, node
)
106 /* allocate a 'safe' buffer and keep track of it */
107 static inline struct safe_buffer
*
108 alloc_safe_buffer(struct dmabounce_device_info
*device_info
, void *ptr
,
109 size_t size
, enum dma_data_direction dir
)
111 struct safe_buffer
*buf
;
112 struct dma_pool
*pool
;
113 struct device
*dev
= device_info
->dev
;
115 dma_addr_t safe_dma_addr
;
117 dev_dbg(dev
, "%s(ptr=%p, size=%d, dir=%d)\n",
118 __func__
, ptr
, size
, dir
);
120 DO_STATS ( device_info
->total_allocs
++ );
122 buf
= kmalloc(sizeof(struct safe_buffer
), GFP_ATOMIC
);
124 dev_warn(dev
, "%s: kmalloc failed\n", __func__
);
128 if (size
<= device_info
->small_buffer_size
) {
129 pool
= device_info
->small_buffer_pool
;
130 safe
= dma_pool_alloc(pool
, GFP_ATOMIC
, &safe_dma_addr
);
132 DO_STATS ( device_info
->sbp_allocs
++ );
133 } else if (size
<= device_info
->large_buffer_size
) {
134 pool
= device_info
->large_buffer_pool
;
135 safe
= dma_pool_alloc(pool
, GFP_ATOMIC
, &safe_dma_addr
);
137 DO_STATS ( device_info
->lbp_allocs
++ );
140 safe
= dma_alloc_coherent(dev
, size
, &safe_dma_addr
, GFP_ATOMIC
);
144 dev_warn(device_info
->dev
,
145 "%s: could not alloc dma memory (size=%d)\n",
152 if (device_info
->total_allocs
% 1000 == 0)
153 print_alloc_stats(device_info
);
158 buf
->direction
= dir
;
161 buf
->safe_dma_addr
= safe_dma_addr
;
163 list_add(&buf
->node
, &device_info
->safe_buffers
);
168 /* determine if a buffer is from our "safe" pool */
169 static inline struct safe_buffer
*
170 find_safe_buffer(struct dmabounce_device_info
*device_info
, dma_addr_t safe_dma_addr
)
172 struct safe_buffer
*b
;
174 list_for_each_entry(b
, &device_info
->safe_buffers
, node
)
175 if (b
->safe_dma_addr
== safe_dma_addr
)
182 free_safe_buffer(struct dmabounce_device_info
*device_info
, struct safe_buffer
*buf
)
184 dev_dbg(device_info
->dev
, "%s(buf=%p)\n", __func__
, buf
);
186 list_del(&buf
->node
);
189 dma_pool_free(buf
->pool
, buf
->safe
, buf
->safe_dma_addr
);
191 dma_free_coherent(device_info
->dev
, buf
->size
, buf
->safe
,
197 /* ************************************************** */
201 static void print_map_stats(struct dmabounce_device_info
*device_info
)
204 "%s: dmabounce: map_op_count=%lu, bounce_count=%lu\n",
205 device_info
->dev
->bus_id
,
206 device_info
->map_op_count
, device_info
->bounce_count
);
210 static inline dma_addr_t
211 map_single(struct device
*dev
, void *ptr
, size_t size
,
212 enum dma_data_direction dir
)
214 struct dmabounce_device_info
*device_info
= find_dmabounce_dev(dev
);
216 int needs_bounce
= 0;
219 DO_STATS ( device_info
->map_op_count
++ );
221 dma_addr
= virt_to_dma(dev
, ptr
);
224 unsigned long mask
= *dev
->dma_mask
;
227 limit
= (mask
+ 1) & ~mask
;
228 if (limit
&& size
> limit
) {
229 dev_err(dev
, "DMA mapping too big (requested %#x "
230 "mask %#Lx)\n", size
, *dev
->dma_mask
);
235 * Figure out if we need to bounce from the DMA mask.
237 needs_bounce
= (dma_addr
| (dma_addr
+ size
- 1)) & ~mask
;
240 if (device_info
&& (needs_bounce
|| dma_needs_bounce(dev
, dma_addr
, size
))) {
241 struct safe_buffer
*buf
;
243 buf
= alloc_safe_buffer(device_info
, ptr
, size
, dir
);
245 dev_err(dev
, "%s: unable to map unsafe buffer %p!\n",
251 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
252 __func__
, buf
->ptr
, (void *) virt_to_dma(dev
, buf
->ptr
),
253 buf
->safe
, (void *) buf
->safe_dma_addr
);
255 if ((dir
== DMA_TO_DEVICE
) ||
256 (dir
== DMA_BIDIRECTIONAL
)) {
257 dev_dbg(dev
, "%s: copy unsafe %p to safe %p, size %d\n",
258 __func__
, ptr
, buf
->safe
, size
);
259 memcpy(buf
->safe
, ptr
, size
);
261 consistent_sync(buf
->safe
, size
, dir
);
263 dma_addr
= buf
->safe_dma_addr
;
265 consistent_sync(ptr
, size
, dir
);
272 unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
273 enum dma_data_direction dir
)
275 struct dmabounce_device_info
*device_info
= find_dmabounce_dev(dev
);
276 struct safe_buffer
*buf
= NULL
;
279 * Trying to unmap an invalid mapping
281 if (dma_addr
== ~0) {
282 dev_err(dev
, "Trying to unmap invalid mapping\n");
287 buf
= find_safe_buffer(device_info
, dma_addr
);
290 BUG_ON(buf
->size
!= size
);
293 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
294 __func__
, buf
->ptr
, (void *) virt_to_dma(dev
, buf
->ptr
),
295 buf
->safe
, (void *) buf
->safe_dma_addr
);
297 DO_STATS ( device_info
->bounce_count
++ );
299 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
) {
303 "%s: copy back safe %p to unsafe %p size %d\n",
304 __func__
, buf
->safe
, buf
->ptr
, size
);
305 memcpy(buf
->ptr
, buf
->safe
, size
);
308 * DMA buffers must have the same cache properties
309 * as if they were really used for DMA - which means
310 * data must be written back to RAM. Note that
311 * we don't use dmac_flush_range() here for the
312 * bidirectional case because we know the cache
313 * lines will be coherent with the data written.
315 ptr
= (unsigned long)buf
->ptr
;
316 dmac_clean_range(ptr
, ptr
+ size
);
318 free_safe_buffer(device_info
, buf
);
323 sync_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
324 enum dma_data_direction dir
)
326 struct dmabounce_device_info
*device_info
= find_dmabounce_dev(dev
);
327 struct safe_buffer
*buf
= NULL
;
330 buf
= find_safe_buffer(device_info
, dma_addr
);
334 * Both of these checks from original code need to be
335 * commented out b/c some drivers rely on the following:
337 * 1) Drivers may map a large chunk of memory into DMA space
338 * but only sync a small portion of it. Good example is
339 * allocating a large buffer, mapping it, and then
340 * breaking it up into small descriptors. No point
341 * in syncing the whole buffer if you only have to
342 * touch one descriptor.
344 * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
345 * usually only synced in one dir at a time.
347 * See drivers/net/eepro100.c for examples of both cases.
351 * BUG_ON(buf->size != size);
352 * BUG_ON(buf->direction != dir);
356 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
357 __func__
, buf
->ptr
, (void *) virt_to_dma(dev
, buf
->ptr
),
358 buf
->safe
, (void *) buf
->safe_dma_addr
);
360 DO_STATS ( device_info
->bounce_count
++ );
363 case DMA_FROM_DEVICE
:
365 "%s: copy back safe %p to unsafe %p size %d\n",
366 __func__
, buf
->safe
, buf
->ptr
, size
);
367 memcpy(buf
->ptr
, buf
->safe
, size
);
371 "%s: copy out unsafe %p to safe %p, size %d\n",
372 __func__
,buf
->ptr
, buf
->safe
, size
);
373 memcpy(buf
->safe
, buf
->ptr
, size
);
375 case DMA_BIDIRECTIONAL
:
376 BUG(); /* is this allowed? what does it mean? */
380 consistent_sync(buf
->safe
, size
, dir
);
382 consistent_sync(dma_to_virt(dev
, dma_addr
), size
, dir
);
386 /* ************************************************** */
389 * see if a buffer address is in an 'unsafe' range. if it is
390 * allocate a 'safe' buffer and copy the unsafe buffer into it.
391 * substitute the safe buffer for the unsafe one.
392 * (basically move the buffer from an unsafe area to a safe one)
395 dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
396 enum dma_data_direction dir
)
401 dev_dbg(dev
, "%s(ptr=%p,size=%d,dir=%x)\n",
402 __func__
, ptr
, size
, dir
);
404 BUG_ON(dir
== DMA_NONE
);
406 local_irq_save(flags
);
408 dma_addr
= map_single(dev
, ptr
, size
, dir
);
410 local_irq_restore(flags
);
416 * see if a mapped address was really a "safe" buffer and if so, copy
417 * the data from the safe buffer back to the unsafe buffer and free up
418 * the safe buffer. (basically return things back to the way they
423 dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
424 enum dma_data_direction dir
)
428 dev_dbg(dev
, "%s(ptr=%p,size=%d,dir=%x)\n",
429 __func__
, (void *) dma_addr
, size
, dir
);
431 BUG_ON(dir
== DMA_NONE
);
433 local_irq_save(flags
);
435 unmap_single(dev
, dma_addr
, size
, dir
);
437 local_irq_restore(flags
);
441 dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
442 enum dma_data_direction dir
)
447 dev_dbg(dev
, "%s(sg=%p,nents=%d,dir=%x)\n",
448 __func__
, sg
, nents
, dir
);
450 BUG_ON(dir
== DMA_NONE
);
452 local_irq_save(flags
);
454 for (i
= 0; i
< nents
; i
++, sg
++) {
455 struct page
*page
= sg
->page
;
456 unsigned int offset
= sg
->offset
;
457 unsigned int length
= sg
->length
;
458 void *ptr
= page_address(page
) + offset
;
461 map_single(dev
, ptr
, length
, dir
);
464 local_irq_restore(flags
);
470 dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
471 enum dma_data_direction dir
)
476 dev_dbg(dev
, "%s(sg=%p,nents=%d,dir=%x)\n",
477 __func__
, sg
, nents
, dir
);
479 BUG_ON(dir
== DMA_NONE
);
481 local_irq_save(flags
);
483 for (i
= 0; i
< nents
; i
++, sg
++) {
484 dma_addr_t dma_addr
= sg
->dma_address
;
485 unsigned int length
= sg
->length
;
487 unmap_single(dev
, dma_addr
, length
, dir
);
490 local_irq_restore(flags
);
494 dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
495 enum dma_data_direction dir
)
499 dev_dbg(dev
, "%s(ptr=%p,size=%d,dir=%x)\n",
500 __func__
, (void *) dma_addr
, size
, dir
);
502 local_irq_save(flags
);
504 sync_single(dev
, dma_addr
, size
, dir
);
506 local_irq_restore(flags
);
510 dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
511 enum dma_data_direction dir
)
515 dev_dbg(dev
, "%s(ptr=%p,size=%d,dir=%x)\n",
516 __func__
, (void *) dma_addr
, size
, dir
);
518 local_irq_save(flags
);
520 sync_single(dev
, dma_addr
, size
, dir
);
522 local_irq_restore(flags
);
526 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nents
,
527 enum dma_data_direction dir
)
532 dev_dbg(dev
, "%s(sg=%p,nents=%d,dir=%x)\n",
533 __func__
, sg
, nents
, dir
);
535 BUG_ON(dir
== DMA_NONE
);
537 local_irq_save(flags
);
539 for (i
= 0; i
< nents
; i
++, sg
++) {
540 dma_addr_t dma_addr
= sg
->dma_address
;
541 unsigned int length
= sg
->length
;
543 sync_single(dev
, dma_addr
, length
, dir
);
546 local_irq_restore(flags
);
550 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nents
,
551 enum dma_data_direction dir
)
556 dev_dbg(dev
, "%s(sg=%p,nents=%d,dir=%x)\n",
557 __func__
, sg
, nents
, dir
);
559 BUG_ON(dir
== DMA_NONE
);
561 local_irq_save(flags
);
563 for (i
= 0; i
< nents
; i
++, sg
++) {
564 dma_addr_t dma_addr
= sg
->dma_address
;
565 unsigned int length
= sg
->length
;
567 sync_single(dev
, dma_addr
, length
, dir
);
570 local_irq_restore(flags
);
574 dmabounce_register_dev(struct device
*dev
, unsigned long small_buffer_size
,
575 unsigned long large_buffer_size
)
577 struct dmabounce_device_info
*device_info
;
579 device_info
= kmalloc(sizeof(struct dmabounce_device_info
), GFP_ATOMIC
);
582 "Could not allocated dmabounce_device_info for %s",
587 device_info
->small_buffer_pool
=
588 dma_pool_create("small_dmabounce_pool",
591 0 /* byte alignment */,
592 0 /* no page-crossing issues */);
593 if (!device_info
->small_buffer_pool
) {
595 "dmabounce: could not allocate small DMA pool for %s\n",
601 if (large_buffer_size
) {
602 device_info
->large_buffer_pool
=
603 dma_pool_create("large_dmabounce_pool",
606 0 /* byte alignment */,
607 0 /* no page-crossing issues */);
608 if (!device_info
->large_buffer_pool
) {
610 "dmabounce: could not allocate large DMA pool for %s\n",
612 dma_pool_destroy(device_info
->small_buffer_pool
);
618 device_info
->dev
= dev
;
619 device_info
->small_buffer_size
= small_buffer_size
;
620 device_info
->large_buffer_size
= large_buffer_size
;
621 INIT_LIST_HEAD(&device_info
->safe_buffers
);
624 device_info
->sbp_allocs
= 0;
625 device_info
->lbp_allocs
= 0;
626 device_info
->total_allocs
= 0;
627 device_info
->map_op_count
= 0;
628 device_info
->bounce_count
= 0;
631 list_add(&device_info
->node
, &dmabounce_devs
);
633 printk(KERN_INFO
"dmabounce: registered device %s on %s bus\n",
634 dev
->bus_id
, dev
->bus
->name
);
640 dmabounce_unregister_dev(struct device
*dev
)
642 struct dmabounce_device_info
*device_info
= find_dmabounce_dev(dev
);
646 "%s: Never registered with dmabounce but attempting" \
647 "to unregister!\n", dev
->bus_id
);
651 if (!list_empty(&device_info
->safe_buffers
)) {
653 "%s: Removing from dmabounce with pending buffers!\n",
658 if (device_info
->small_buffer_pool
)
659 dma_pool_destroy(device_info
->small_buffer_pool
);
660 if (device_info
->large_buffer_pool
)
661 dma_pool_destroy(device_info
->large_buffer_pool
);
664 print_alloc_stats(device_info
);
665 print_map_stats(device_info
);
668 list_del(&device_info
->node
);
672 printk(KERN_INFO
"dmabounce: device %s on %s bus unregistered\n",
673 dev
->bus_id
, dev
->bus
->name
);
677 EXPORT_SYMBOL(dma_map_single
);
678 EXPORT_SYMBOL(dma_unmap_single
);
679 EXPORT_SYMBOL(dma_map_sg
);
680 EXPORT_SYMBOL(dma_unmap_sg
);
681 EXPORT_SYMBOL(dma_sync_single
);
682 EXPORT_SYMBOL(dma_sync_sg
);
683 EXPORT_SYMBOL(dmabounce_register_dev
);
684 EXPORT_SYMBOL(dmabounce_unregister_dev
);
686 MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
687 MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
688 MODULE_LICENSE("GPL");
This page took 0.065546 seconds and 6 git commands to generate.