Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SH_DMA_MAPPING_H |
2 | #define __ASM_SH_DMA_MAPPING_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/mm.h> |
5 | #include <asm/scatterlist.h> | |
6 | #include <asm/io.h> | |
7 | ||
8 | struct pci_dev; | |
9 | extern void *consistent_alloc(struct pci_dev *hwdev, size_t size, | |
10 | dma_addr_t *dma_handle); | |
11 | extern void consistent_free(struct pci_dev *hwdev, size_t size, | |
12 | void *vaddr, dma_addr_t dma_handle); | |
13 | ||
14 | #define dma_supported(dev, mask) (1) | |
15 | ||
16 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
17 | { | |
18 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
19 | return -EIO; | |
20 | ||
21 | *dev->dma_mask = mask; | |
22 | ||
23 | return 0; | |
24 | } | |
25 | ||
26 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |
970a9e73 | 27 | dma_addr_t *dma_handle, gfp_t flag) |
1da177e4 LT |
28 | { |
29 | return consistent_alloc(NULL, size, dma_handle); | |
30 | } | |
31 | ||
32 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
33 | void *vaddr, dma_addr_t dma_handle) | |
34 | { | |
35 | consistent_free(NULL, size, vaddr, dma_handle); | |
36 | } | |
37 | ||
38 | static inline void dma_cache_sync(void *vaddr, size_t size, | |
39 | enum dma_data_direction dir) | |
40 | { | |
41 | dma_cache_wback_inv((unsigned long)vaddr, size); | |
42 | } | |
43 | ||
44 | static inline dma_addr_t dma_map_single(struct device *dev, | |
45 | void *ptr, size_t size, | |
46 | enum dma_data_direction dir) | |
47 | { | |
48 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
49 | if (dev->bus == &pci_bus_type) | |
50 | return virt_to_bus(ptr); | |
51 | #endif | |
52 | dma_cache_sync(ptr, size, dir); | |
53 | ||
54 | return virt_to_bus(ptr); | |
55 | } | |
56 | ||
57 | #define dma_unmap_single(dev, addr, size, dir) do { } while (0) | |
58 | ||
59 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | |
60 | int nents, enum dma_data_direction dir) | |
61 | { | |
62 | int i; | |
63 | ||
64 | for (i = 0; i < nents; i++) { | |
65 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
66 | dma_cache_sync(page_address(sg[i].page) + sg[i].offset, | |
67 | sg[i].length, dir); | |
68 | #endif | |
69 | sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; | |
70 | } | |
71 | ||
72 | return nents; | |
73 | } | |
74 | ||
75 | #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) | |
76 | ||
77 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
78 | unsigned long offset, size_t size, | |
79 | enum dma_data_direction dir) | |
80 | { | |
81 | return dma_map_single(dev, page_address(page) + offset, size, dir); | |
82 | } | |
83 | ||
84 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | |
85 | size_t size, enum dma_data_direction dir) | |
86 | { | |
87 | dma_unmap_single(dev, dma_address, size, dir); | |
88 | } | |
89 | ||
90 | static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle, | |
91 | size_t size, enum dma_data_direction dir) | |
92 | { | |
93 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
94 | if (dev->bus == &pci_bus_type) | |
95 | return; | |
96 | #endif | |
97 | dma_cache_sync(bus_to_virt(dma_handle), size, dir); | |
98 | } | |
99 | ||
100 | static inline void dma_sync_single_range(struct device *dev, | |
101 | dma_addr_t dma_handle, | |
102 | unsigned long offset, size_t size, | |
103 | enum dma_data_direction dir) | |
104 | { | |
105 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
106 | if (dev->bus == &pci_bus_type) | |
107 | return; | |
108 | #endif | |
109 | dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir); | |
110 | } | |
111 | ||
112 | static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, | |
113 | int nelems, enum dma_data_direction dir) | |
114 | { | |
115 | int i; | |
116 | ||
117 | for (i = 0; i < nelems; i++) { | |
118 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
119 | dma_cache_sync(page_address(sg[i].page) + sg[i].offset, | |
120 | sg[i].length, dir); | |
121 | #endif | |
122 | sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; | |
123 | } | |
124 | } | |
125 | ||
126 | static inline void dma_sync_single_for_cpu(struct device *dev, | |
127 | dma_addr_t dma_handle, size_t size, | |
128 | enum dma_data_direction dir) | |
21264136 PM |
129 | { |
130 | dma_sync_single(dev, dma_handle, size, dir); | |
131 | } | |
1da177e4 LT |
132 | |
133 | static inline void dma_sync_single_for_device(struct device *dev, | |
134 | dma_addr_t dma_handle, size_t size, | |
135 | enum dma_data_direction dir) | |
21264136 PM |
136 | { |
137 | dma_sync_single(dev, dma_handle, size, dir); | |
138 | } | |
1da177e4 LT |
139 | |
140 | static inline void dma_sync_sg_for_cpu(struct device *dev, | |
141 | struct scatterlist *sg, int nelems, | |
142 | enum dma_data_direction dir) | |
21264136 PM |
143 | { |
144 | dma_sync_sg(dev, sg, nelems, dir); | |
145 | } | |
1da177e4 LT |
146 | |
147 | static inline void dma_sync_sg_for_device(struct device *dev, | |
148 | struct scatterlist *sg, int nelems, | |
149 | enum dma_data_direction dir) | |
21264136 PM |
150 | { |
151 | dma_sync_sg(dev, sg, nelems, dir); | |
152 | } | |
1da177e4 LT |
153 | |
154 | static inline int dma_get_cache_alignment(void) | |
155 | { | |
156 | /* | |
157 | * Each processor family will define its own L1_CACHE_SHIFT, | |
158 | * L1_CACHE_BYTES wraps to this, so this is always safe. | |
159 | */ | |
160 | return L1_CACHE_BYTES; | |
161 | } | |
162 | ||
163 | static inline int dma_mapping_error(dma_addr_t dma_addr) | |
164 | { | |
165 | return dma_addr == 0; | |
166 | } | |
167 | ||
168 | #endif /* __ASM_SH_DMA_MAPPING_H */ | |
169 |