x86: move dma_map_page and dma_unmap_page to common header
[deliverable/linux.git] / arch / x86 / kernel / pci-swiotlb_64.c
CommitLineData
17a941d8
MBY
1/* Glue code to lib/swiotlb.c */
2
3#include <linux/pci.h>
4#include <linux/cache.h>
5#include <linux/module.h>
d6bd3a39
REB
6#include <linux/dma-mapping.h>
7
395624fc 8#include <asm/gart.h>
17a941d8
MBY
9#include <asm/swiotlb.h>
10#include <asm/dma.h>
11
12int swiotlb __read_mostly;
17a941d8 13
e6584504 14const struct dma_mapping_ops swiotlb_dma_ops = {
17a941d8
MBY
15 .mapping_error = swiotlb_dma_mapping_error,
16 .alloc_coherent = swiotlb_alloc_coherent,
17 .free_coherent = swiotlb_free_coherent,
18 .map_single = swiotlb_map_single,
19 .unmap_single = swiotlb_unmap_single,
20 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
21 .sync_single_for_device = swiotlb_sync_single_for_device,
22 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
23 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
24 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
25 .sync_sg_for_device = swiotlb_sync_sg_for_device,
26 .map_sg = swiotlb_map_sg,
27 .unmap_sg = swiotlb_unmap_sg,
28 .dma_supported = NULL,
29};
30
563aaf06 31void __init pci_swiotlb_init(void)
17a941d8
MBY
32{
33 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
65f87d8a 34 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
17a941d8 35 swiotlb = 1;
65f87d8a
AK
36 if (swiotlb_force)
37 swiotlb = 1;
17a941d8 38 if (swiotlb) {
17a941d8 39 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
5b7b644c 40 swiotlb_init();
17a941d8
MBY
41 dma_ops = &swiotlb_dma_ops;
42 }
43}
This page took 0.368242 seconds and 5 git commands to generate.