2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
17 #include <asm/cacheflush.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
31 int ioremap_change_attr(unsigned long vaddr
, unsigned long size
,
32 unsigned long prot_val
)
34 unsigned long nrpages
= size
>> PAGE_SHIFT
;
40 err
= _set_memory_uc(vaddr
, nrpages
);
43 err
= _set_memory_wc(vaddr
, nrpages
);
46 err
= _set_memory_wb(vaddr
, nrpages
);
53 static int __ioremap_check_ram(unsigned long start_pfn
, unsigned long nr_pages
,
58 for (i
= 0; i
< nr_pages
; ++i
)
59 if (pfn_valid(start_pfn
+ i
) &&
60 !PageReserved(pfn_to_page(start_pfn
+ i
)))
63 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn
);
69 * Remap an arbitrary physical address space into the kernel virtual
70 * address space. Needed when the kernel wants to access high addresses
73 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
74 * have to convert them into an offset in a page-aligned mapping, but the
75 * caller shouldn't need to know that small detail.
77 static void __iomem
*__ioremap_caller(resource_size_t phys_addr
,
78 unsigned long size
, unsigned long prot_val
, void *caller
)
80 unsigned long offset
, vaddr
;
81 resource_size_t pfn
, last_pfn
, last_addr
;
82 const resource_size_t unaligned_phys_addr
= phys_addr
;
83 const unsigned long unaligned_size
= size
;
84 struct vm_struct
*area
;
85 unsigned long new_prot_val
;
88 void __iomem
*ret_addr
;
91 /* Don't allow wraparound or zero size */
92 last_addr
= phys_addr
+ size
- 1;
93 if (!size
|| last_addr
< phys_addr
)
96 if (!phys_addr_valid(phys_addr
)) {
97 printk(KERN_WARNING
"ioremap: invalid physical address %llx\n",
98 (unsigned long long)phys_addr
);
104 * Don't remap the low PCI/ISA area, it's always mapped..
106 if (is_ISA_range(phys_addr
, last_addr
))
107 return (__force
void __iomem
*)phys_to_virt(phys_addr
);
110 * Don't allow anybody to remap normal RAM that we're using..
112 /* First check if whole region can be identified as RAM or not */
113 ram_region
= region_is_ram(phys_addr
, size
);
114 if (ram_region
> 0) {
115 WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
116 (unsigned long int)phys_addr
,
117 (unsigned long int)last_addr
);
121 /* If could not be identified(-1), check page by page */
122 if (ram_region
< 0) {
123 pfn
= phys_addr
>> PAGE_SHIFT
;
124 last_pfn
= last_addr
>> PAGE_SHIFT
;
125 if (walk_system_ram_range(pfn
, last_pfn
- pfn
+ 1, NULL
,
126 __ioremap_check_ram
) == 1)
130 * Mappings have to be page-aligned
132 offset
= phys_addr
& ~PAGE_MASK
;
133 phys_addr
&= PHYSICAL_PAGE_MASK
;
134 size
= PAGE_ALIGN(last_addr
+1) - phys_addr
;
136 retval
= reserve_memtype(phys_addr
, (u64
)phys_addr
+ size
,
137 prot_val
, &new_prot_val
);
139 printk(KERN_ERR
"ioremap reserve_memtype failed %d\n", retval
);
143 if (prot_val
!= new_prot_val
) {
144 if (!is_new_memtype_allowed(phys_addr
, size
,
145 prot_val
, new_prot_val
)) {
147 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
148 (unsigned long long)phys_addr
,
149 (unsigned long long)(phys_addr
+ size
),
150 prot_val
, new_prot_val
);
151 goto err_free_memtype
;
153 prot_val
= new_prot_val
;
159 prot
= PAGE_KERNEL_IO_NOCACHE
;
161 case _PAGE_CACHE_UC_MINUS
:
162 prot
= PAGE_KERNEL_IO_UC_MINUS
;
165 prot
= PAGE_KERNEL_IO_WC
;
168 prot
= PAGE_KERNEL_IO
;
175 area
= get_vm_area_caller(size
, VM_IOREMAP
, caller
);
177 goto err_free_memtype
;
178 area
->phys_addr
= phys_addr
;
179 vaddr
= (unsigned long) area
->addr
;
181 if (kernel_map_sync_memtype(phys_addr
, size
, prot_val
))
184 if (ioremap_page_range(vaddr
, vaddr
+ size
, phys_addr
, prot
))
187 ret_addr
= (void __iomem
*) (vaddr
+ offset
);
188 mmiotrace_ioremap(unaligned_phys_addr
, unaligned_size
, ret_addr
);
191 * Check if the request spans more than any BAR in the iomem resource
194 WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr
, unaligned_size
),
195 KERN_INFO
"Info: mapping multiple BARs. Your kernel is fine.");
201 free_memtype(phys_addr
, phys_addr
+ size
);
206 * ioremap_nocache - map bus memory into CPU space
207 * @phys_addr: bus address of the memory
208 * @size: size of the resource to map
210 * ioremap_nocache performs a platform specific sequence of operations to
211 * make bus memory CPU accessible via the readb/readw/readl/writeb/
212 * writew/writel functions and the other mmio helpers. The returned
213 * address is not guaranteed to be usable directly as a virtual
216 * This version of ioremap ensures that the memory is marked uncachable
217 * on the CPU as well as honouring existing caching rules from things like
218 * the PCI bus. Note that there are other caches and buffers on many
219 * busses. In particular driver authors should read up on PCI writes
221 * It's useful if some control registers are in such an area and
222 * write combining or read caching is not desirable:
224 * Must be freed with iounmap.
226 void __iomem
*ioremap_nocache(resource_size_t phys_addr
, unsigned long size
)
229 * Ideally, this should be:
230 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
232 * Till we fix all X drivers to use ioremap_wc(), we will use
235 unsigned long val
= _PAGE_CACHE_UC_MINUS
;
237 return __ioremap_caller(phys_addr
, size
, val
,
238 __builtin_return_address(0));
240 EXPORT_SYMBOL(ioremap_nocache
);
243 * ioremap_wc - map memory into CPU space write combined
244 * @phys_addr: bus address of the memory
245 * @size: size of the resource to map
247 * This version of ioremap ensures that the memory is marked write combining.
248 * Write combining allows faster writes to some hardware devices.
250 * Must be freed with iounmap.
252 void __iomem
*ioremap_wc(resource_size_t phys_addr
, unsigned long size
)
255 return __ioremap_caller(phys_addr
, size
, _PAGE_CACHE_WC
,
256 __builtin_return_address(0));
258 return ioremap_nocache(phys_addr
, size
);
260 EXPORT_SYMBOL(ioremap_wc
);
262 void __iomem
*ioremap_cache(resource_size_t phys_addr
, unsigned long size
)
264 return __ioremap_caller(phys_addr
, size
, _PAGE_CACHE_WB
,
265 __builtin_return_address(0));
267 EXPORT_SYMBOL(ioremap_cache
);
269 void __iomem
*ioremap_prot(resource_size_t phys_addr
, unsigned long size
,
270 unsigned long prot_val
)
272 return __ioremap_caller(phys_addr
, size
, (prot_val
& _PAGE_CACHE_MASK
),
273 __builtin_return_address(0));
275 EXPORT_SYMBOL(ioremap_prot
);
278 * iounmap - Free a IO remapping
279 * @addr: virtual address from ioremap_*
281 * Caller must ensure there is only one unmapping for the same pointer.
283 void iounmap(volatile void __iomem
*addr
)
285 struct vm_struct
*p
, *o
;
287 if ((void __force
*)addr
<= high_memory
)
291 * __ioremap special-cases the PCI/ISA range by not instantiating a
292 * vm_area and by simply returning an address into the kernel mapping
293 * of ISA space. So handle that here.
295 if ((void __force
*)addr
>= phys_to_virt(ISA_START_ADDRESS
) &&
296 (void __force
*)addr
< phys_to_virt(ISA_END_ADDRESS
))
299 addr
= (volatile void __iomem
*)
300 (PAGE_MASK
& (unsigned long __force
)addr
);
302 mmiotrace_iounmap(addr
);
304 /* Use the vm area unlocked, assuming the caller
305 ensures there isn't another iounmap for the same address
306 in parallel. Reuse of the virtual address is prevented by
307 leaving it in the global lists until we're done with it.
308 cpa takes care of the direct mappings. */
309 p
= find_vm_area((void __force
*)addr
);
312 printk(KERN_ERR
"iounmap: bad address %p\n", addr
);
317 free_memtype(p
->phys_addr
, p
->phys_addr
+ get_vm_area_size(p
));
319 /* Finally remove it */
320 o
= remove_vm_area((void __force
*)addr
);
321 BUG_ON(p
!= o
|| o
== NULL
);
324 EXPORT_SYMBOL(iounmap
);
327 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
330 void *xlate_dev_mem_ptr(unsigned long phys
)
333 unsigned long start
= phys
& PAGE_MASK
;
335 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
336 if (page_is_ram(start
>> PAGE_SHIFT
))
339 addr
= (void __force
*)ioremap_cache(start
, PAGE_SIZE
);
341 addr
= (void *)((unsigned long)addr
| (phys
& ~PAGE_MASK
));
346 void unxlate_dev_mem_ptr(unsigned long phys
, void *addr
)
348 if (page_is_ram(phys
>> PAGE_SHIFT
))
351 iounmap((void __iomem
*)((unsigned long)addr
& PAGE_MASK
));
355 static pte_t bm_pte
[PAGE_SIZE
/sizeof(pte_t
)] __page_aligned_bss
;
357 static inline pmd_t
* __init
early_ioremap_pmd(unsigned long addr
)
359 /* Don't assume we're using swapper_pg_dir at this point */
360 pgd_t
*base
= __va(read_cr3());
361 pgd_t
*pgd
= &base
[pgd_index(addr
)];
362 pud_t
*pud
= pud_offset(pgd
, addr
);
363 pmd_t
*pmd
= pmd_offset(pud
, addr
);
368 static inline pte_t
* __init
early_ioremap_pte(unsigned long addr
)
370 return &bm_pte
[pte_index(addr
)];
373 bool __init
is_early_ioremap_ptep(pte_t
*ptep
)
375 return ptep
>= &bm_pte
[0] && ptep
< &bm_pte
[PAGE_SIZE
/sizeof(pte_t
)];
378 void __init
early_ioremap_init(void)
383 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE
) & ((1 << PMD_SHIFT
) - 1));
385 WARN_ON((fix_to_virt(0) + PAGE_SIZE
) & ((1 << PMD_SHIFT
) - 1));
388 early_ioremap_setup();
390 pmd
= early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN
));
391 memset(bm_pte
, 0, sizeof(bm_pte
));
392 pmd_populate_kernel(&init_mm
, pmd
, bm_pte
);
395 * The boot-ioremap range spans multiple pmds, for which
396 * we are not prepared:
398 #define __FIXADDR_TOP (-PAGE_SIZE)
399 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN
) >> PMD_SHIFT
)
400 != (__fix_to_virt(FIX_BTMAP_END
) >> PMD_SHIFT
));
402 if (pmd
!= early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END
))) {
404 printk(KERN_WARNING
"pmd %p != %p\n",
405 pmd
, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END
)));
406 printk(KERN_WARNING
"fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
407 fix_to_virt(FIX_BTMAP_BEGIN
));
408 printk(KERN_WARNING
"fix_to_virt(FIX_BTMAP_END): %08lx\n",
409 fix_to_virt(FIX_BTMAP_END
));
411 printk(KERN_WARNING
"FIX_BTMAP_END: %d\n", FIX_BTMAP_END
);
412 printk(KERN_WARNING
"FIX_BTMAP_BEGIN: %d\n",
417 void __init
__early_set_fixmap(enum fixed_addresses idx
,
418 phys_addr_t phys
, pgprot_t flags
)
420 unsigned long addr
= __fix_to_virt(idx
);
423 if (idx
>= __end_of_fixed_addresses
) {
427 pte
= early_ioremap_pte(addr
);
429 if (pgprot_val(flags
))
430 set_pte(pte
, pfn_pte(phys
>> PAGE_SHIFT
, flags
));
432 pte_clear(&init_mm
, addr
, pte
);
433 __flush_tlb_one(addr
);