ARM: dma-mapping: fix debug messages in dmabounce code
[deliverable/linux.git] / arch / x86 / kernel / pci-dma.c
CommitLineData
459121c9 1#include <linux/dma-mapping.h>
2118d0c5 2#include <linux/dma-debug.h>
cb5867a5 3#include <linux/dmar.h>
69c60c88 4#include <linux/export.h>
116890d5 5#include <linux/bootmem.h>
5a0e3ad6 6#include <linux/gfp.h>
bca5c096 7#include <linux/pci.h>
acde31dc 8#include <linux/kmemleak.h>
cb5867a5 9
116890d5
GC
10#include <asm/proto.h>
11#include <asm/dma.h>
46a7fa27 12#include <asm/iommu.h>
1d9b16d1 13#include <asm/gart.h>
cb5867a5 14#include <asm/calgary.h>
b4941a9a 15#include <asm/x86_init.h>
ee1f284f 16#include <asm/iommu_table.h>
459121c9 17
3b15e581
FY
18static int forbid_dac __read_mostly;
19
a3b28ee1 20struct dma_map_ops *dma_ops = &nommu_dma_ops;
85c246ee
GC
21EXPORT_SYMBOL(dma_ops);
22
b4cdc430 23static int iommu_sac_force __read_mostly;
8e0c3797 24
f9c258de
GC
25#ifdef CONFIG_IOMMU_DEBUG
26int panic_on_overflow __read_mostly = 1;
27int force_iommu __read_mostly = 1;
28#else
29int panic_on_overflow __read_mostly = 0;
30int force_iommu __read_mostly = 0;
31#endif
32
fae9a0d8
GC
33int iommu_merge __read_mostly = 0;
34
35int no_iommu __read_mostly;
36/* Set this to 1 if there is a HW IOMMU in the system */
37int iommu_detected __read_mostly = 0;
38
ac0101d3
JR
39/*
40 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
e3be785f 41 * If this variable is 1, IOMMU implementations do no DMA translation for
ac0101d3 42 * devices and allow every device to access to whole physical memory. This is
fb637f3c 43 * useful if a user wants to use an IOMMU only for KVM device assignment to
ac0101d3
JR
44 * guests and not for driver dma translation.
45 */
46int iommu_pass_through __read_mostly;
aed5d5f4 47
bcb71abe
AW
48/*
49 * Group multi-function PCI devices into a single device-group for the
50 * iommu_device_group interface. This tells the iommu driver to pretend
51 * it cannot distinguish between functions of a device, exposing only one
52 * group for the device. Useful for disallowing use of individual PCI
53 * functions from userspace drivers.
54 */
55int iommu_group_mf __read_mostly;
56
ee1f284f
KRW
57extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
58
eb647138 59/* Dummy device used for NULL arguments (normally ISA). */
6c505ce3 60struct device x86_dma_fallback_dev = {
1a927133 61 .init_name = "fallback device",
eb647138 62 .coherent_dma_mask = ISA_DMA_BIT_MASK,
6c505ce3 63 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
098cb7f2 64};
6c505ce3 65EXPORT_SYMBOL(x86_dma_fallback_dev);
098cb7f2 66
2118d0c5
JR
67/* Number of entries preallocated for DMA-API debugging */
68#define PREALLOC_DMA_DEBUG_ENTRIES 32768
69
459121c9
GC
70int dma_set_mask(struct device *dev, u64 mask)
71{
72 if (!dev->dma_mask || !dma_supported(dev, mask))
73 return -EIO;
74
75 *dev->dma_mask = mask;
76
77 return 0;
78}
79EXPORT_SYMBOL(dma_set_mask);
80
116890d5
GC
81void __init pci_iommu_alloc(void)
82{
ee1f284f
KRW
83 struct iommu_table_entry *p;
84
ee1f284f
KRW
85 sort_iommu_table(__iommu_table, __iommu_table_end);
86 check_iommu_entries(__iommu_table, __iommu_table_end);
116890d5 87
ee1f284f
KRW
88 for (p = __iommu_table; p < __iommu_table_end; p++) {
89 if (p && p->detect && p->detect() > 0) {
90 p->flags |= IOMMU_DETECTED;
91 if (p->early_init)
92 p->early_init();
93 if (p->flags & IOMMU_FINISH_IF_DETECTED)
94 break;
95 }
96 }
116890d5 97}
9f6ac577 98void *dma_generic_alloc_coherent(struct device *dev, size_t size,
baa676fc
AP
99 dma_addr_t *dma_addr, gfp_t flag,
100 struct dma_attrs *attrs)
9f6ac577
FT
101{
102 unsigned long dma_mask;
0a2b9a6e
MS
103 struct page *page = NULL;
104 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
9f6ac577
FT
105 dma_addr_t addr;
106
107 dma_mask = dma_alloc_coherent_mask(dev, flag);
108
109 flag |= __GFP_ZERO;
110again:
0a2b9a6e
MS
111 if (!(flag & GFP_ATOMIC))
112 page = dma_alloc_from_contiguous(dev, count, get_order(size));
113 if (!page)
114 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
9f6ac577
FT
115 if (!page)
116 return NULL;
117
118 addr = page_to_phys(page);
a4c2baa6 119 if (addr + size > dma_mask) {
9f6ac577
FT
120 __free_pages(page, get_order(size));
121
284901a9 122 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
9f6ac577
FT
123 flag = (flag & ~GFP_DMA32) | GFP_DMA;
124 goto again;
125 }
126
127 return NULL;
128 }
129
130 *dma_addr = addr;
131 return page_address(page);
132}
133
0a2b9a6e
MS
134void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
135 dma_addr_t dma_addr, struct dma_attrs *attrs)
136{
137 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
138 struct page *page = virt_to_page(vaddr);
139
140 if (!dma_release_from_contiguous(dev, page, count))
141 free_pages((unsigned long)vaddr, get_order(size));
142}
143
fae9a0d8 144/*
395cf969
PB
145 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
146 * parameter documentation.
fae9a0d8
GC
147 */
148static __init int iommu_setup(char *p)
149{
150 iommu_merge = 1;
151
152 if (!p)
153 return -EINVAL;
154
155 while (*p) {
156 if (!strncmp(p, "off", 3))
157 no_iommu = 1;
158 /* gart_parse_options has more force support */
159 if (!strncmp(p, "force", 5))
160 force_iommu = 1;
161 if (!strncmp(p, "noforce", 7)) {
162 iommu_merge = 0;
163 force_iommu = 0;
164 }
165
166 if (!strncmp(p, "biomerge", 8)) {
fae9a0d8
GC
167 iommu_merge = 1;
168 force_iommu = 1;
169 }
170 if (!strncmp(p, "panic", 5))
171 panic_on_overflow = 1;
172 if (!strncmp(p, "nopanic", 7))
173 panic_on_overflow = 0;
174 if (!strncmp(p, "merge", 5)) {
175 iommu_merge = 1;
176 force_iommu = 1;
177 }
178 if (!strncmp(p, "nomerge", 7))
179 iommu_merge = 0;
180 if (!strncmp(p, "forcesac", 8))
181 iommu_sac_force = 1;
182 if (!strncmp(p, "allowdac", 8))
183 forbid_dac = 0;
184 if (!strncmp(p, "nodac", 5))
2ae8bb75 185 forbid_dac = 1;
fae9a0d8
GC
186 if (!strncmp(p, "usedac", 6)) {
187 forbid_dac = -1;
188 return 1;
189 }
190#ifdef CONFIG_SWIOTLB
191 if (!strncmp(p, "soft", 4))
192 swiotlb = 1;
3238c0c4 193#endif
80286879 194 if (!strncmp(p, "pt", 2))
4ed0d3e6 195 iommu_pass_through = 1;
bcb71abe
AW
196 if (!strncmp(p, "group_mf", 8))
197 iommu_group_mf = 1;
fae9a0d8 198
fae9a0d8 199 gart_parse_options(p);
fae9a0d8
GC
200
201#ifdef CONFIG_CALGARY_IOMMU
202 if (!strncmp(p, "calgary", 7))
203 use_calgary = 1;
204#endif /* CONFIG_CALGARY_IOMMU */
205
206 p += strcspn(p, ",");
207 if (*p == ',')
208 ++p;
209 }
210 return 0;
211}
212early_param("iommu", iommu_setup);
213
8e0c3797
GC
214int dma_supported(struct device *dev, u64 mask)
215{
160c1d8e 216 struct dma_map_ops *ops = get_dma_ops(dev);
8d8bb39b 217
8e0c3797
GC
218#ifdef CONFIG_PCI
219 if (mask > 0xffffffff && forbid_dac > 0) {
fc3a8828 220 dev_info(dev, "PCI: Disallowing DAC for device\n");
8e0c3797
GC
221 return 0;
222 }
223#endif
224
8d8bb39b
FT
225 if (ops->dma_supported)
226 return ops->dma_supported(dev, mask);
8e0c3797
GC
227
228 /* Copied from i386. Doesn't make much sense, because it will
229 only work for pci_alloc_coherent.
230 The caller just has to use GFP_DMA in this case. */
2f4f27d4 231 if (mask < DMA_BIT_MASK(24))
8e0c3797
GC
232 return 0;
233
234 /* Tell the device to use SAC when IOMMU force is on. This
235 allows the driver to use cheaper accesses in some cases.
236
237 Problem with this is that if we overflow the IOMMU area and
238 return DAC as fallback address the device may not handle it
239 correctly.
240
241 As a special case some controllers have a 39bit address
242 mode that is as efficient as 32bit (aic79xx). Don't force
243 SAC for these. Assume all masks <= 40 bits are of this
244 type. Normally this doesn't make any difference, but gives
245 more gentle handling of IOMMU overflow. */
50cf156a 246 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
fc3a8828 247 dev_info(dev, "Force SAC with mask %Lx\n", mask);
8e0c3797
GC
248 return 0;
249 }
250
251 return 1;
252}
253EXPORT_SYMBOL(dma_supported);
254
cb5867a5
GC
255static int __init pci_iommu_init(void)
256{
ee1f284f 257 struct iommu_table_entry *p;
2118d0c5
JR
258 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
259
86f31952
JR
260#ifdef CONFIG_PCI
261 dma_debug_add_bus(&pci_bus_type);
262#endif
d07c1be0
FT
263 x86_init.iommu.iommu_init();
264
ee1f284f
KRW
265 for (p = __iommu_table; p < __iommu_table_end; p++) {
266 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
267 p->late_init();
268 }
75f1cdf1 269
cb5867a5
GC
270 return 0;
271}
cb5867a5 272/* Must execute after PCI subsystem */
9a821b23 273rootfs_initcall(pci_iommu_init);
3b15e581
FY
274
275#ifdef CONFIG_PCI
276/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
277
278static __devinit void via_no_dac(struct pci_dev *dev)
279{
c484b241 280 if (forbid_dac == 0) {
13bf7576 281 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
3b15e581
FY
282 forbid_dac = 1;
283 }
284}
c484b241
YL
285DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
286 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
3b15e581 287#endif
This page took 0.326774 seconds and 5 git commands to generate.