Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_DMA_MAPPING_H |
2 | #define _ASM_IA64_DMA_MAPPING_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 2003-2004 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | */ | |
8 | #include <linux/config.h> | |
9 | #include <asm/machvec.h> | |
10 | ||
11 | #define dma_alloc_coherent platform_dma_alloc_coherent | |
12 | #define dma_alloc_noncoherent platform_dma_alloc_coherent /* coherent mem. is cheap */ | |
13 | #define dma_free_coherent platform_dma_free_coherent | |
14 | #define dma_free_noncoherent platform_dma_free_coherent | |
15 | #define dma_map_single platform_dma_map_single | |
16 | #define dma_map_sg platform_dma_map_sg | |
17 | #define dma_unmap_single platform_dma_unmap_single | |
18 | #define dma_unmap_sg platform_dma_unmap_sg | |
19 | #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu | |
20 | #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu | |
21 | #define dma_sync_single_for_device platform_dma_sync_single_for_device | |
22 | #define dma_sync_sg_for_device platform_dma_sync_sg_for_device | |
23 | #define dma_mapping_error platform_dma_mapping_error | |
24 | ||
25 | #define dma_map_page(dev, pg, off, size, dir) \ | |
26 | dma_map_single(dev, page_address(pg) + (off), (size), (dir)) | |
27 | #define dma_unmap_page(dev, dma_addr, size, dir) \ | |
28 | dma_unmap_single(dev, dma_addr, size, dir) | |
29 | ||
30 | /* | |
31 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. | |
32 | * See Documentation/DMA-API.txt for details. | |
33 | */ | |
34 | ||
35 | #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ | |
36 | dma_sync_single_for_cpu(dev, dma_handle, size, dir) | |
37 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ | |
38 | dma_sync_single_for_device(dev, dma_handle, size, dir) | |
39 | ||
40 | #define dma_supported platform_dma_supported | |
41 | ||
42 | static inline int | |
43 | dma_set_mask (struct device *dev, u64 mask) | |
44 | { | |
45 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
46 | return -EIO; | |
47 | *dev->dma_mask = mask; | |
48 | return 0; | |
49 | } | |
50 | ||
51 | static inline int | |
52 | dma_get_cache_alignment (void) | |
53 | { | |
54 | extern int ia64_max_cacheline_size; | |
55 | return ia64_max_cacheline_size; | |
56 | } | |
57 | ||
58 | static inline void | |
59 | dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir) | |
60 | { | |
61 | /* | |
62 | * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to | |
63 | * ensure that dma_cache_sync() enforces order, hence the mb(). | |
64 | */ | |
65 | mb(); | |
66 | } | |
67 | ||
68 | #define dma_is_consistent(dma_handle) (1) /* all we do is coherent memory... */ | |
69 | ||
70 | #endif /* _ASM_IA64_DMA_MAPPING_H */ |