Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_DMA_MAPPING_H |
2 | #define _ASM_DMA_MAPPING_H | |
3 | ||
4 | #include <linux/device.h> | |
5 | #include <asm/cache.h> | |
6 | #include <asm/cacheflush.h> | |
7 | #include <asm/scatterlist.h> | |
8 | #include <asm/io.h> | |
9 | ||
027491f4 FT |
10 | /* |
11 | * See Documentation/DMA-API.txt for the description of how the | |
12 | * following DMA API should work. | |
13 | */ | |
14 | ||
1da177e4 LT |
15 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
16 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | |
17 | ||
18 | extern unsigned long __nongprelbss dma_coherent_mem_start; | |
19 | extern unsigned long __nongprelbss dma_coherent_mem_end; | |
20 | ||
a5da7d3c | 21 | void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); |
1da177e4 LT |
22 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); |
23 | ||
1da177e4 LT |
24 | extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, |
25 | enum dma_data_direction direction); | |
26 | ||
1da177e4 LT |
27 | static inline |
28 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
29 | enum dma_data_direction direction) | |
30 | { | |
31 | BUG_ON(direction == DMA_NONE); | |
32 | } | |
33 | ||
1da177e4 LT |
34 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
35 | enum dma_data_direction direction); | |
36 | ||
1da177e4 LT |
37 | static inline |
38 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
39 | enum dma_data_direction direction) | |
40 | { | |
41 | BUG_ON(direction == DMA_NONE); | |
42 | } | |
43 | ||
44 | extern | |
45 | dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, | |
46 | size_t size, enum dma_data_direction direction); | |
47 | ||
48 | static inline | |
49 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |
50 | enum dma_data_direction direction) | |
51 | { | |
52 | BUG_ON(direction == DMA_NONE); | |
53 | } | |
54 | ||
55 | ||
56 | static inline | |
57 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | |
58 | enum dma_data_direction direction) | |
59 | { | |
60 | } | |
61 | ||
62 | static inline | |
63 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | |
64 | enum dma_data_direction direction) | |
65 | { | |
66 | flush_write_buffers(); | |
67 | } | |
68 | ||
69 | static inline | |
70 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
71 | unsigned long offset, size_t size, | |
72 | enum dma_data_direction direction) | |
73 | { | |
74 | } | |
75 | ||
76 | static inline | |
77 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
78 | unsigned long offset, size_t size, | |
79 | enum dma_data_direction direction) | |
80 | { | |
81 | flush_write_buffers(); | |
82 | } | |
83 | ||
84 | static inline | |
85 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |
86 | enum dma_data_direction direction) | |
87 | { | |
88 | } | |
89 | ||
90 | static inline | |
91 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |
92 | enum dma_data_direction direction) | |
93 | { | |
94 | flush_write_buffers(); | |
95 | } | |
96 | ||
97 | static inline | |
8d8bb39b | 98 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
1da177e4 LT |
99 | { |
100 | return 0; | |
101 | } | |
102 | ||
103 | static inline | |
104 | int dma_supported(struct device *dev, u64 mask) | |
105 | { | |
106 | /* | |
107 | * we fall back to GFP_DMA when the mask isn't all 1s, | |
108 | * so we can't guarantee allocations that must be | |
109 | * within a tighter range than GFP_DMA.. | |
110 | */ | |
111 | if (mask < 0x00ffffff) | |
112 | return 0; | |
113 | ||
114 | return 1; | |
115 | } | |
116 | ||
117 | static inline | |
118 | int dma_set_mask(struct device *dev, u64 mask) | |
119 | { | |
120 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
121 | return -EIO; | |
122 | ||
123 | *dev->dma_mask = mask; | |
124 | ||
125 | return 0; | |
126 | } | |
127 | ||
1da177e4 | 128 | static inline |
d3fa72e4 | 129 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
1da177e4 LT |
130 | enum dma_data_direction direction) |
131 | { | |
132 | flush_write_buffers(); | |
133 | } | |
134 | ||
5fd38112 GU |
135 | /* Not supported for now */ |
136 | static inline int dma_mmap_coherent(struct device *dev, | |
137 | struct vm_area_struct *vma, void *cpu_addr, | |
138 | dma_addr_t dma_addr, size_t size) | |
139 | { | |
140 | return -EINVAL; | |
141 | } | |
142 | ||
143 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | |
144 | void *cpu_addr, dma_addr_t dma_addr, | |
145 | size_t size) | |
146 | { | |
147 | return -EINVAL; | |
148 | } | |
149 | ||
1da177e4 | 150 | #endif /* _ASM_DMA_MAPPING_H */ |