From 8e0629c1d4ce86ce7d98ca8756f42769bb17a3c8 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 2 Jun 2014 14:58:25 +0100 Subject: [PATCH] swiotlb: don't assume PA 0 is invalid In 2.6.29 io_tlb_orig_addr[] got converted from storing virtual addresses to storing physical ones. While checking virtual addresses against NULL is a legitimate thing to catch invalid entries, checking physical ones against zero isn't: There's no guarantee that PFN 0 is reserved on a particular platform. Since it is unclear whether the check in swiotlb_tbl_unmap_single() is actually needed, retain it but check against a guaranteed invalid physical address. This requires setting up the array in a suitable fashion. And since the original code failed to invalidate array entries when regions get unmapped, this is being fixed at once along with adding a similar check to swiotlb_tbl_sync_single(). Obviously the less intrusive change would be to simply drop the check in swiotlb_tbl_unmap_single(). Signed-off-by: Jan Beulich Signed-off-by: Konrad Rzeszutek Wilk --- lib/swiotlb.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 649d097853a1..4abda074ea45 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -86,6 +86,7 @@ static unsigned int io_tlb_index; * We need to save away the original address corresponding to a mapped entry * for the sync operations. */ +#define INVALID_PHYS_ADDR (~(phys_addr_t)0) static phys_addr_t *io_tlb_orig_addr; /* @@ -188,12 +189,14 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) io_tlb_list = memblock_virt_alloc( PAGE_ALIGN(io_tlb_nslabs * sizeof(int)), PAGE_SIZE); - for (i = 0; i < io_tlb_nslabs; i++) - io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); - io_tlb_index = 0; io_tlb_orig_addr = memblock_virt_alloc( PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)), PAGE_SIZE); + for (i = 0; i < io_tlb_nslabs; i++) { + io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); + io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; + } + io_tlb_index = 0; if (verbose) swiotlb_print_info(); @@ -313,10 +316,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) if (!io_tlb_list) goto cleanup3; - for (i = 0; i < io_tlb_nslabs; i++) - io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); - io_tlb_index = 0; - io_tlb_orig_addr = (phys_addr_t *) __get_free_pages(GFP_KERNEL, get_order(io_tlb_nslabs * @@ -324,7 +323,11 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) if (!io_tlb_orig_addr) goto cleanup4; - memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t)); + for (i = 0; i < io_tlb_nslabs; i++) { + io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); + io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; + } + io_tlb_index = 0; swiotlb_print_info(); @@ -556,7 +559,8 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, /* * First, sync the memory before unmapping the entry */ - if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) + if (orig_addr != INVALID_PHYS_ADDR && + ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); /* @@ -573,8 +577,10 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, * Step 1: return the slots to the free list, merging the * slots with superceeding slots */ - for (i = index + nslots - 1; i >= index; i--) + for (i = index + nslots - 1; i >= index; i--) { io_tlb_list[i] = ++count; + io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; + } /* * Step 2: merge the returned slots with the preceding slots, * if available (non zero) @@ -593,6 +599,8 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t orig_addr = io_tlb_orig_addr[index]; + if (orig_addr == INVALID_PHYS_ADDR) + return; orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1); switch (target) { -- 2.34.1