Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_DMA_MAPPING_H |
2 | #define _ASM_X86_DMA_MAPPING_H | |
6f536635 GC |
3 | |
4 | /* | |
5872fb94 RD |
5 | * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and |
6 | * Documentation/DMA-API.txt for documentation. | |
6f536635 GC |
7 | */ |
8 | ||
d7002857 | 9 | #include <linux/kmemcheck.h> |
6f536635 | 10 | #include <linux/scatterlist.h> |
2118d0c5 | 11 | #include <linux/dma-debug.h> |
abe6602b | 12 | #include <linux/dma-attrs.h> |
6f536635 GC |
13 | #include <asm/io.h> |
14 | #include <asm/swiotlb.h> | |
6c505ce3 | 15 | #include <asm-generic/dma-coherent.h> |
6f536635 | 16 | |
7c183416 | 17 | extern dma_addr_t bad_dma_address; |
b7107a3d | 18 | extern int iommu_merge; |
6c505ce3 | 19 | extern struct device x86_dma_fallback_dev; |
b7107a3d | 20 | extern int panic_on_overflow; |
7c183416 | 21 | |
160c1d8e FT |
22 | extern struct dma_map_ops *dma_ops; |
23 | ||
24 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |
c786df08 | 25 | { |
8d8bb39b FT |
26 | #ifdef CONFIG_X86_32 |
27 | return dma_ops; | |
28 | #else | |
29 | if (unlikely(!dev) || !dev->archdata.dma_ops) | |
30 | return dma_ops; | |
31 | else | |
32 | return dev->archdata.dma_ops; | |
cfb80c9e | 33 | #endif |
8d8bb39b FT |
34 | } |
35 | ||
7c095e46 FT |
36 | #include <asm-generic/dma-mapping-common.h> |
37 | ||
8d8bb39b FT |
38 | /* Make sure we keep the same behaviour */ |
39 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |
40 | { | |
160c1d8e | 41 | struct dma_map_ops *ops = get_dma_ops(dev); |
8d8bb39b FT |
42 | if (ops->mapping_error) |
43 | return ops->mapping_error(dev, dma_addr); | |
c786df08 | 44 | |
7b1dedca | 45 | return (dma_addr == bad_dma_address); |
c786df08 GC |
46 | } |
47 | ||
8d396ded GC |
48 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
49 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | |
6c505ce3 | 50 | #define dma_is_consistent(d, h) (1) |
8d396ded | 51 | |
802c1f66 GC |
52 | extern int dma_supported(struct device *hwdev, u64 mask); |
53 | extern int dma_set_mask(struct device *dev, u64 mask); | |
54 | ||
9f6ac577 FT |
55 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
56 | dma_addr_t *dma_addr, gfp_t flag); | |
57 | ||
99becaca FT |
58 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
59 | { | |
60 | if (!dev->dma_mask) | |
61 | return 0; | |
62 | ||
63 | return addr + size <= *dev->dma_mask; | |
64 | } | |
65 | ||
8d4f5339 FT |
66 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
67 | { | |
68 | return paddr; | |
69 | } | |
70 | ||
71 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | |
72 | { | |
73 | return daddr; | |
74 | } | |
75 | ||
3cb6a917 GC |
76 | static inline void |
77 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
78 | enum dma_data_direction dir) | |
79 | { | |
80 | flush_write_buffers(); | |
81 | } | |
ae17a63b | 82 | |
b7107a3d GC |
83 | static inline int dma_get_cache_alignment(void) |
84 | { | |
85 | /* no easy way to get cache size on all x86, so return the | |
86 | * maximum possible, to be safe */ | |
87 | return boot_cpu_data.x86_clflush_size; | |
88 | } | |
89 | ||
823e7e8c FT |
90 | static inline unsigned long dma_alloc_coherent_mask(struct device *dev, |
91 | gfp_t gfp) | |
92 | { | |
93 | unsigned long dma_mask = 0; | |
b7107a3d | 94 | |
823e7e8c FT |
95 | dma_mask = dev->coherent_dma_mask; |
96 | if (!dma_mask) | |
2f4f27d4 | 97 | dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); |
823e7e8c FT |
98 | |
99 | return dma_mask; | |
100 | } | |
101 | ||
102 | static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) | |
103 | { | |
104 | unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); | |
105 | ||
2f4f27d4 | 106 | if (dma_mask <= DMA_BIT_MASK(24)) |
75bebb7f FT |
107 | gfp |= GFP_DMA; |
108 | #ifdef CONFIG_X86_64 | |
284901a9 | 109 | if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) |
823e7e8c FT |
110 | gfp |= GFP_DMA32; |
111 | #endif | |
112 | return gfp; | |
113 | } | |
114 | ||
6c505ce3 JR |
115 | static inline void * |
116 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
117 | gfp_t gfp) | |
118 | { | |
160c1d8e | 119 | struct dma_map_ops *ops = get_dma_ops(dev); |
6c505ce3 JR |
120 | void *memory; |
121 | ||
8a53ad67 FT |
122 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
123 | ||
6c505ce3 JR |
124 | if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) |
125 | return memory; | |
126 | ||
127 | if (!dev) { | |
128 | dev = &x86_dma_fallback_dev; | |
129 | gfp |= GFP_DMA; | |
130 | } | |
131 | ||
98216260 | 132 | if (!is_device_dma_capable(dev)) |
de9f521f FT |
133 | return NULL; |
134 | ||
823e7e8c FT |
135 | if (!ops->alloc_coherent) |
136 | return NULL; | |
137 | ||
2118d0c5 JR |
138 | memory = ops->alloc_coherent(dev, size, dma_handle, |
139 | dma_alloc_coherent_gfp_flags(dev, gfp)); | |
140 | debug_dma_alloc_coherent(dev, size, *dma_handle, memory); | |
141 | ||
142 | return memory; | |
6c505ce3 JR |
143 | } |
144 | ||
145 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
146 | void *vaddr, dma_addr_t bus) | |
147 | { | |
160c1d8e | 148 | struct dma_map_ops *ops = get_dma_ops(dev); |
6c505ce3 JR |
149 | |
150 | WARN_ON(irqs_disabled()); /* for portability */ | |
151 | ||
152 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) | |
153 | return; | |
154 | ||
2118d0c5 | 155 | debug_dma_free_coherent(dev, size, vaddr, bus); |
6c505ce3 JR |
156 | if (ops->free_coherent) |
157 | ops->free_coherent(dev, size, vaddr, bus); | |
158 | } | |
b7107a3d | 159 | |
6f536635 | 160 | #endif |