perf_counter: x86: Expose INV and EDGE bits
[deliverable/linux.git] / arch / x86 / kernel / pci-swiotlb.c
CommitLineData
17a941d8
MBY
1/* Glue code to lib/swiotlb.c */
2
3#include <linux/pci.h>
4#include <linux/cache.h>
5#include <linux/module.h>
8ce79960
JF
6#include <linux/swiotlb.h>
7#include <linux/bootmem.h>
d6bd3a39
REB
8#include <linux/dma-mapping.h>
9
46a7fa27 10#include <asm/iommu.h>
17a941d8
MBY
11#include <asm/swiotlb.h>
12#include <asm/dma.h>
13
14int swiotlb __read_mostly;
17a941d8 15
79ff56eb 16void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs)
8ce79960
JF
17{
18 return alloc_bootmem_low_pages(size);
19}
20
21void *swiotlb_alloc(unsigned order, unsigned long nslabs)
22{
23 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
24}
25
70a7d3cc 26dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
1d32251e
IC
27{
28 return paddr;
29}
30
31phys_addr_t swiotlb_bus_to_phys(dma_addr_t baddr)
32{
33 return baddr;
34}
35
0b8698ab 36int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
a0863669
IC
37{
38 return 0;
39}
40
03967c52
FT
41static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
42 dma_addr_t *dma_handle, gfp_t flags)
43{
44 void *vaddr;
45
46 vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags);
47 if (vaddr)
48 return vaddr;
49
50 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
51}
52
ff6c6fed 53static struct dma_map_ops swiotlb_dma_ops = {
17a941d8 54 .mapping_error = swiotlb_dma_mapping_error,
03967c52 55 .alloc_coherent = x86_swiotlb_alloc_coherent,
17a941d8 56 .free_coherent = swiotlb_free_coherent,
17a941d8
MBY
57 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
58 .sync_single_for_device = swiotlb_sync_single_for_device,
59 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
60 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
61 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
62 .sync_sg_for_device = swiotlb_sync_sg_for_device,
160c1d8e
FT
63 .map_sg = swiotlb_map_sg_attrs,
64 .unmap_sg = swiotlb_unmap_sg_attrs,
4cf37bb7
FT
65 .map_page = swiotlb_map_page,
66 .unmap_page = swiotlb_unmap_page,
17a941d8
MBY
67 .dma_supported = NULL,
68};
69
563aaf06 70void __init pci_swiotlb_init(void)
17a941d8
MBY
71{
72 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
cfb80c9e 73#ifdef CONFIG_X86_64
c987d12f 74 if (!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)
17a941d8 75 swiotlb = 1;
cfb80c9e 76#endif
65f87d8a
AK
77 if (swiotlb_force)
78 swiotlb = 1;
17a941d8 79 if (swiotlb) {
17a941d8 80 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
5b7b644c 81 swiotlb_init();
17a941d8
MBY
82 dma_ops = &swiotlb_dma_ops;
83 }
84}
This page took 0.329377 seconds and 5 git commands to generate.