x86/asm/64: Rename thread_struct's fs and gs to fsbase and gsbase
[deliverable/linux.git] / arch / x86 / kernel / pci-nommu.c
CommitLineData
1da177e4
LT
1/* Fallback functions when the main IOMMU code is not compiled in. This
2 code is roughly equivalent to i386. */
8fa3d6fc 3#include <linux/dma-mapping.h>
b922f53b 4#include <linux/scatterlist.h>
1894e367 5#include <linux/string.h>
5a0e3ad6 6#include <linux/gfp.h>
1894e367
JSR
7#include <linux/pci.h>
8#include <linux/mm.h>
8fa3d6fc 9
1da177e4 10#include <asm/processor.h>
1894e367 11#include <asm/iommu.h>
17a941d8 12#include <asm/dma.h>
1da177e4 13
17a941d8
MBY
14static int
15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
1da177e4 16{
a4c2baa6 17 if (hwdev && !dma_capable(hwdev, bus, size)) {
284901a9 18 if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
f0fdabf8 19 printk(KERN_ERR
8fa3d6fc
AM
20 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
21 name, (long long)bus, size,
22 (long long)*hwdev->dma_mask);
17a941d8 23 return 0;
1da177e4 24 }
17a941d8
MBY
25 return 1;
26}
1da177e4 27
33feffd4
FT
28static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
29 unsigned long offset, size_t size,
30 enum dma_data_direction dir,
31 struct dma_attrs *attrs)
17a941d8 32{
33feffd4 33 dma_addr_t bus = page_to_phys(page) + offset;
5b3e5b72 34 WARN_ON(size == 0);
33feffd4 35 if (!check_addr("map_single", dev, bus, size))
8fd524b3 36 return DMA_ERROR_CODE;
e4dcdd6b 37 flush_write_buffers();
17a941d8 38 return bus;
1da177e4 39}
1da177e4 40
17a941d8
MBY
41/* Map a set of buffers described by scatterlist in streaming
42 * mode for DMA. This is the scatter-gather version of the
43 * above pci_map_single interface. Here the scatter gather list
44 * elements are each tagged with the appropriate dma address
45 * and length. They are obtained via sg_dma_{address,length}(SG).
46 *
47 * NOTE: An implementation may be able to use a smaller number of
48 * DMA address/length pairs than there are SG table elements.
49 * (for example via virtual mapping capabilities)
50 * The routine returns the number of addr/length pairs actually
51 * used, at most nents.
52 *
53 * Device ownership issues as mentioned above for pci_map_single are
54 * the same here.
55 */
1048fa52 56static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
160c1d8e
FT
57 int nents, enum dma_data_direction dir,
58 struct dma_attrs *attrs)
1da177e4 59{
b922f53b 60 struct scatterlist *s;
17a941d8 61 int i;
1da177e4 62
5b3e5b72
GC
63 WARN_ON(nents == 0 || sg[0].length == 0);
64
b922f53b 65 for_each_sg(sg, s, nents, i) {
58b053e4 66 BUG_ON(!sg_page(s));
30db2cbf 67 s->dma_address = sg_phys(s);
17a941d8
MBY
68 if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
69 return 0;
70 s->dma_length = s->length;
71 }
e4dcdd6b 72 flush_write_buffers();
17a941d8
MBY
73 return nents;
74}
1da177e4 75
a8ad568d
AB
76static void nommu_sync_single_for_device(struct device *dev,
77 dma_addr_t addr, size_t size,
78 enum dma_data_direction dir)
79{
80 flush_write_buffers();
81}
82
83
84static void nommu_sync_sg_for_device(struct device *dev,
85 struct scatterlist *sg, int nelems,
86 enum dma_data_direction dir)
87{
88 flush_write_buffers();
89}
90
160c1d8e 91struct dma_map_ops nommu_dma_ops = {
baa676fc 92 .alloc = dma_generic_alloc_coherent,
0a2b9a6e 93 .free = dma_generic_free_coherent,
a8ad568d
AB
94 .map_sg = nommu_map_sg,
95 .map_page = nommu_map_page,
96 .sync_single_for_device = nommu_sync_single_for_device,
97 .sync_sg_for_device = nommu_sync_sg_for_device,
98 .is_phys = 1,
17a941d8 99};
This page took 0.740342 seconds and 5 git commands to generate.