2 * arch/arm/common/dmabounce.c
4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 * limited DMA windows. These functions utilize bounce buffers to
6 * copy data to/from buffers located outside the DMA region. This
7 * only works for systems in which DMA memory is at the bottom of
8 * RAM and the remainder of memory is at the top an the DMA memory
9 * can be marked as ZONE_DMA. Anything beyond that such as discontigous
10 * DMA windows will require custom implementations that reserve memory
11 * areas at early bootup.
13 * Original version by Brad Parker (brad@heeltoe.com)
14 * Re-written by Christopher Hoover <ch@murgatroid.com>
15 * Made generic by Deepak Saxena <dsaxena@plexity.net>
17 * Copyright (C) 2002 Hewlett Packard Company.
18 * Copyright (C) 2004 MontaVista Software, Inc.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/device.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/dmapool.h>
31 #include <linux/list.h>
33 #include <asm/cacheflush.h>
39 #define DO_STATS(X) do { X ; } while (0)
41 #define DO_STATS(X) do { } while (0)
44 /* ************************************************** */
47 struct list_head node
;
49 /* original request */
54 /* safe buffer info */
55 struct dma_pool
*pool
;
57 dma_addr_t safe_dma_addr
;
60 struct dmabounce_device_info
{
61 struct list_head node
;
64 struct dma_pool
*small_buffer_pool
;
65 struct dma_pool
*large_buffer_pool
;
66 struct list_head safe_buffers
;
67 unsigned long small_buffer_size
, large_buffer_size
;
69 unsigned long sbp_allocs
;
70 unsigned long lbp_allocs
;
71 unsigned long total_allocs
;
72 unsigned long map_op_count
;
73 unsigned long bounce_count
;
77 static LIST_HEAD(dmabounce_devs
);
80 static void print_alloc_stats(struct dmabounce_device_info
*device_info
)
83 "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n",
84 device_info
->dev
->bus_id
,
85 device_info
->sbp_allocs
, device_info
->lbp_allocs
,
86 device_info
->total_allocs
- device_info
->sbp_allocs
-
87 device_info
->lbp_allocs
,
88 device_info
->total_allocs
);
92 /* find the given device in the dmabounce device list */
93 static inline struct dmabounce_device_info
*
94 find_dmabounce_dev(struct device
*dev
)
96 struct list_head
*entry
;
98 list_for_each(entry
, &dmabounce_devs
) {
99 struct dmabounce_device_info
*d
=
100 list_entry(entry
, struct dmabounce_device_info
, node
);
109 /* allocate a 'safe' buffer and keep track of it */
110 static inline struct safe_buffer
*
111 alloc_safe_buffer(struct dmabounce_device_info
*device_info
, void *ptr
,
112 size_t size
, enum dma_data_direction dir
)
114 struct safe_buffer
*buf
;
115 struct dma_pool
*pool
;
116 struct device
*dev
= device_info
->dev
;
118 dma_addr_t safe_dma_addr
;
120 dev_dbg(dev
, "%s(ptr=%p, size=%d, dir=%d)\n",
121 __func__
, ptr
, size
, dir
);
123 DO_STATS ( device_info
->total_allocs
++ );
125 buf
= kmalloc(sizeof(struct safe_buffer
), GFP_ATOMIC
);
127 dev_warn(dev
, "%s: kmalloc failed\n", __func__
);
131 if (size
<= device_info
->small_buffer_size
) {
132 pool
= device_info
->small_buffer_pool
;
133 safe
= dma_pool_alloc(pool
, GFP_ATOMIC
, &safe_dma_addr
);
135 DO_STATS ( device_info
->sbp_allocs
++ );
136 } else if (size
<= device_info
->large_buffer_size
) {
137 pool
= device_info
->large_buffer_pool
;
138 safe
= dma_pool_alloc(pool
, GFP_ATOMIC
, &safe_dma_addr
);
140 DO_STATS ( device_info
->lbp_allocs
++ );
143 safe
= dma_alloc_coherent(dev
, size
, &safe_dma_addr
, GFP_ATOMIC
);
147 dev_warn(device_info
->dev
,
148 "%s: could not alloc dma memory (size=%d)\n",
155 if (device_info
->total_allocs
% 1000 == 0)
156 print_alloc_stats(device_info
);
161 buf
->direction
= dir
;
164 buf
->safe_dma_addr
= safe_dma_addr
;
166 list_add(&buf
->node
, &device_info
->safe_buffers
);
171 /* determine if a buffer is from our "safe" pool */
172 static inline struct safe_buffer
*
173 find_safe_buffer(struct dmabounce_device_info
*device_info
, dma_addr_t safe_dma_addr
)
175 struct list_head
*entry
;
177 list_for_each(entry
, &device_info
->safe_buffers
) {
178 struct safe_buffer
*b
=
179 list_entry(entry
, struct safe_buffer
, node
);
181 if (b
->safe_dma_addr
== safe_dma_addr
)
189 free_safe_buffer(struct dmabounce_device_info
*device_info
, struct safe_buffer
*buf
)
191 dev_dbg(device_info
->dev
, "%s(buf=%p)\n", __func__
, buf
);
193 list_del(&buf
->node
);
196 dma_pool_free(buf
->pool
, buf
->safe
, buf
->safe_dma_addr
);
198 dma_free_coherent(device_info
->dev
, buf
->size
, buf
->safe
,
204 /* ************************************************** */
208 static void print_map_stats(struct dmabounce_device_info
*device_info
)
211 "%s: dmabounce: map_op_count=%lu, bounce_count=%lu\n",
212 device_info
->dev
->bus_id
,
213 device_info
->map_op_count
, device_info
->bounce_count
);
217 static inline dma_addr_t
218 map_single(struct device
*dev
, void *ptr
, size_t size
,
219 enum dma_data_direction dir
)
221 struct dmabounce_device_info
*device_info
= find_dmabounce_dev(dev
);
223 int needs_bounce
= 0;
226 DO_STATS ( device_info
->map_op_count
++ );
228 dma_addr
= virt_to_dma(dev
, ptr
);
231 unsigned long mask
= *dev
->dma_mask
;
234 limit
= (mask
+ 1) & ~mask
;
235 if (limit
&& size
> limit
) {
236 dev_err(dev
, "DMA mapping too big (requested %#x "
237 "mask %#Lx)\n", size
, *dev
->dma_mask
);
242 * Figure out if we need to bounce from the DMA mask.
244 needs_bounce
= (dma_addr
| (dma_addr
+ size
- 1)) & ~mask
;
247 if (device_info
&& (needs_bounce
|| dma_needs_bounce(dev
, dma_addr
, size
))) {
248 struct safe_buffer
*buf
;
250 buf
= alloc_safe_buffer(device_info
, ptr
, size
, dir
);
252 dev_err(dev
, "%s: unable to map unsafe buffer %p!\n",
258 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
259 __func__
, buf
->ptr
, (void *) virt_to_dma(dev
, buf
->ptr
),
260 buf
->safe
, (void *) buf
->safe_dma_addr
);
262 if ((dir
== DMA_TO_DEVICE
) ||
263 (dir
== DMA_BIDIRECTIONAL
)) {
264 dev_dbg(dev
, "%s: copy unsafe %p to safe %p, size %d\n",
265 __func__
, ptr
, buf
->safe
, size
);
266 memcpy(buf
->safe
, ptr
, size
);
268 consistent_sync(buf
->safe
, size
, dir
);
270 dma_addr
= buf
->safe_dma_addr
;
272 consistent_sync(ptr
, size
, dir
);
279 unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
280 enum dma_data_direction dir
)
282 struct dmabounce_device_info
*device_info
= find_dmabounce_dev(dev
);
283 struct safe_buffer
*buf
= NULL
;
286 * Trying to unmap an invalid mapping
288 if (dma_addr
== ~0) {
289 dev_err(dev
, "Trying to unmap invalid mapping\n");
294 buf
= find_safe_buffer(device_info
, dma_addr
);
297 BUG_ON(buf
->size
!= size
);
300 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
301 __func__
, buf
->ptr
, (void *) virt_to_dma(dev
, buf
->ptr
),
302 buf
->safe
, (void *) buf
->safe_dma_addr
);
305 DO_STATS ( device_info
->bounce_count
++ );
307 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
) {
311 "%s: copy back safe %p to unsafe %p size %d\n",
312 __func__
, buf
->safe
, buf
->ptr
, size
);
313 memcpy(buf
->ptr
, buf
->safe
, size
);
316 * DMA buffers must have the same cache properties
317 * as if they were really used for DMA - which means
318 * data must be written back to RAM. Note that
319 * we don't use dmac_flush_range() here for the
320 * bidirectional case because we know the cache
321 * lines will be coherent with the data written.
323 ptr
= (unsigned long)buf
->ptr
;
324 dmac_clean_range(ptr
, ptr
+ size
);
326 free_safe_buffer(device_info
, buf
);
331 sync_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
332 enum dma_data_direction dir
)
334 struct dmabounce_device_info
*device_info
= find_dmabounce_dev(dev
);
335 struct safe_buffer
*buf
= NULL
;
338 buf
= find_safe_buffer(device_info
, dma_addr
);
342 * Both of these checks from original code need to be
343 * commented out b/c some drivers rely on the following:
345 * 1) Drivers may map a large chunk of memory into DMA space
346 * but only sync a small portion of it. Good example is
347 * allocating a large buffer, mapping it, and then
348 * breaking it up into small descriptors. No point
349 * in syncing the whole buffer if you only have to
350 * touch one descriptor.
352 * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
353 * usually only synced in one dir at a time.
355 * See drivers/net/eepro100.c for examples of both cases.
359 * BUG_ON(buf->size != size);
360 * BUG_ON(buf->direction != dir);
364 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
365 __func__
, buf
->ptr
, (void *) virt_to_dma(dev
, buf
->ptr
),
366 buf
->safe
, (void *) buf
->safe_dma_addr
);
368 DO_STATS ( device_info
->bounce_count
++ );
371 case DMA_FROM_DEVICE
:
373 "%s: copy back safe %p to unsafe %p size %d\n",
374 __func__
, buf
->safe
, buf
->ptr
, size
);
375 memcpy(buf
->ptr
, buf
->safe
, size
);
379 "%s: copy out unsafe %p to safe %p, size %d\n",
380 __func__
,buf
->ptr
, buf
->safe
, size
);
381 memcpy(buf
->safe
, buf
->ptr
, size
);
383 case DMA_BIDIRECTIONAL
:
384 BUG(); /* is this allowed? what does it mean? */
388 consistent_sync(buf
->safe
, size
, dir
);
390 consistent_sync(dma_to_virt(dev
, dma_addr
), size
, dir
);
394 /* ************************************************** */
397 * see if a buffer address is in an 'unsafe' range. if it is
398 * allocate a 'safe' buffer and copy the unsafe buffer into it.
399 * substitute the safe buffer for the unsafe one.
400 * (basically move the buffer from an unsafe area to a safe one)
403 dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
404 enum dma_data_direction dir
)
409 dev_dbg(dev
, "%s(ptr=%p,size=%d,dir=%x)\n",
410 __func__
, ptr
, size
, dir
);
412 BUG_ON(dir
== DMA_NONE
);
414 local_irq_save(flags
);
416 dma_addr
= map_single(dev
, ptr
, size
, dir
);
418 local_irq_restore(flags
);
424 * see if a mapped address was really a "safe" buffer and if so, copy
425 * the data from the safe buffer back to the unsafe buffer and free up
426 * the safe buffer. (basically return things back to the way they
431 dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
432 enum dma_data_direction dir
)
436 dev_dbg(dev
, "%s(ptr=%p,size=%d,dir=%x)\n",
437 __func__
, (void *) dma_addr
, size
, dir
);
439 BUG_ON(dir
== DMA_NONE
);
441 local_irq_save(flags
);
443 unmap_single(dev
, dma_addr
, size
, dir
);
445 local_irq_restore(flags
);
449 dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
450 enum dma_data_direction dir
)
455 dev_dbg(dev
, "%s(sg=%p,nents=%d,dir=%x)\n",
456 __func__
, sg
, nents
, dir
);
458 BUG_ON(dir
== DMA_NONE
);
460 local_irq_save(flags
);
462 for (i
= 0; i
< nents
; i
++, sg
++) {
463 struct page
*page
= sg
->page
;
464 unsigned int offset
= sg
->offset
;
465 unsigned int length
= sg
->length
;
466 void *ptr
= page_address(page
) + offset
;
469 map_single(dev
, ptr
, length
, dir
);
472 local_irq_restore(flags
);
478 dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
479 enum dma_data_direction dir
)
484 dev_dbg(dev
, "%s(sg=%p,nents=%d,dir=%x)\n",
485 __func__
, sg
, nents
, dir
);
487 BUG_ON(dir
== DMA_NONE
);
489 local_irq_save(flags
);
491 for (i
= 0; i
< nents
; i
++, sg
++) {
492 dma_addr_t dma_addr
= sg
->dma_address
;
493 unsigned int length
= sg
->length
;
495 unmap_single(dev
, dma_addr
, length
, dir
);
498 local_irq_restore(flags
);
502 dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
503 enum dma_data_direction dir
)
507 dev_dbg(dev
, "%s(ptr=%p,size=%d,dir=%x)\n",
508 __func__
, (void *) dma_addr
, size
, dir
);
510 local_irq_save(flags
);
512 sync_single(dev
, dma_addr
, size
, dir
);
514 local_irq_restore(flags
);
518 dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
519 enum dma_data_direction dir
)
523 dev_dbg(dev
, "%s(ptr=%p,size=%d,dir=%x)\n",
524 __func__
, (void *) dma_addr
, size
, dir
);
526 local_irq_save(flags
);
528 sync_single(dev
, dma_addr
, size
, dir
);
530 local_irq_restore(flags
);
534 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nents
,
535 enum dma_data_direction dir
)
540 dev_dbg(dev
, "%s(sg=%p,nents=%d,dir=%x)\n",
541 __func__
, sg
, nents
, dir
);
543 BUG_ON(dir
== DMA_NONE
);
545 local_irq_save(flags
);
547 for (i
= 0; i
< nents
; i
++, sg
++) {
548 dma_addr_t dma_addr
= sg
->dma_address
;
549 unsigned int length
= sg
->length
;
551 sync_single(dev
, dma_addr
, length
, dir
);
554 local_irq_restore(flags
);
558 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nents
,
559 enum dma_data_direction dir
)
564 dev_dbg(dev
, "%s(sg=%p,nents=%d,dir=%x)\n",
565 __func__
, sg
, nents
, dir
);
567 BUG_ON(dir
== DMA_NONE
);
569 local_irq_save(flags
);
571 for (i
= 0; i
< nents
; i
++, sg
++) {
572 dma_addr_t dma_addr
= sg
->dma_address
;
573 unsigned int length
= sg
->length
;
575 sync_single(dev
, dma_addr
, length
, dir
);
578 local_irq_restore(flags
);
582 dmabounce_register_dev(struct device
*dev
, unsigned long small_buffer_size
,
583 unsigned long large_buffer_size
)
585 struct dmabounce_device_info
*device_info
;
587 device_info
= kmalloc(sizeof(struct dmabounce_device_info
), GFP_ATOMIC
);
590 "Could not allocated dmabounce_device_info for %s",
595 device_info
->small_buffer_pool
=
596 dma_pool_create("small_dmabounce_pool",
599 0 /* byte alignment */,
600 0 /* no page-crossing issues */);
601 if (!device_info
->small_buffer_pool
) {
603 "dmabounce: could not allocate small DMA pool for %s\n",
609 if (large_buffer_size
) {
610 device_info
->large_buffer_pool
=
611 dma_pool_create("large_dmabounce_pool",
614 0 /* byte alignment */,
615 0 /* no page-crossing issues */);
616 if (!device_info
->large_buffer_pool
) {
618 "dmabounce: could not allocate large DMA pool for %s\n",
620 dma_pool_destroy(device_info
->small_buffer_pool
);
626 device_info
->dev
= dev
;
627 device_info
->small_buffer_size
= small_buffer_size
;
628 device_info
->large_buffer_size
= large_buffer_size
;
629 INIT_LIST_HEAD(&device_info
->safe_buffers
);
632 device_info
->sbp_allocs
= 0;
633 device_info
->lbp_allocs
= 0;
634 device_info
->total_allocs
= 0;
635 device_info
->map_op_count
= 0;
636 device_info
->bounce_count
= 0;
639 list_add(&device_info
->node
, &dmabounce_devs
);
641 printk(KERN_INFO
"dmabounce: registered device %s on %s bus\n",
642 dev
->bus_id
, dev
->bus
->name
);
648 dmabounce_unregister_dev(struct device
*dev
)
650 struct dmabounce_device_info
*device_info
= find_dmabounce_dev(dev
);
654 "%s: Never registered with dmabounce but attempting" \
655 "to unregister!\n", dev
->bus_id
);
659 if (!list_empty(&device_info
->safe_buffers
)) {
661 "%s: Removing from dmabounce with pending buffers!\n",
666 if (device_info
->small_buffer_pool
)
667 dma_pool_destroy(device_info
->small_buffer_pool
);
668 if (device_info
->large_buffer_pool
)
669 dma_pool_destroy(device_info
->large_buffer_pool
);
672 print_alloc_stats(device_info
);
673 print_map_stats(device_info
);
676 list_del(&device_info
->node
);
680 printk(KERN_INFO
"dmabounce: device %s on %s bus unregistered\n",
681 dev
->bus_id
, dev
->bus
->name
);
685 EXPORT_SYMBOL(dma_map_single
);
686 EXPORT_SYMBOL(dma_unmap_single
);
687 EXPORT_SYMBOL(dma_map_sg
);
688 EXPORT_SYMBOL(dma_unmap_sg
);
689 EXPORT_SYMBOL(dma_sync_single
);
690 EXPORT_SYMBOL(dma_sync_sg
);
691 EXPORT_SYMBOL(dmabounce_register_dev
);
692 EXPORT_SYMBOL(dmabounce_unregister_dev
);
694 MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
695 MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
696 MODULE_LICENSE("GPL");
This page took 0.046418 seconds and 6 git commands to generate.