x86/gup: Replace ACCESS_ONCE with READ_ONCE
[deliverable/linux.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
d61fc448 15#include <linux/mmiotrace.h>
3cbd09e4 16
1da177e4 17#include <asm/cacheflush.h>
3cbd09e4
TG
18#include <asm/e820.h>
19#include <asm/fixmap.h>
1da177e4 20#include <asm/pgtable.h>
3cbd09e4 21#include <asm/tlbflush.h>
f6df72e7 22#include <asm/pgalloc.h>
d7677d40 23#include <asm/pat.h>
1da177e4 24
78c86e5e 25#include "physaddr.h"
240d3a7c 26
e9332cac
TG
27/*
28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
29 * conflicts.
30 */
3a96ce8c 31int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32 unsigned long prot_val)
e9332cac 33{
d806e5ee 34 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 35 int err;
e9332cac 36
3a96ce8c 37 switch (prot_val) {
38 case _PAGE_CACHE_UC:
d806e5ee 39 default:
1219333d 40 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 41 break;
b310f381 42 case _PAGE_CACHE_WC:
43 err = _set_memory_wc(vaddr, nrpages);
44 break;
3a96ce8c 45 case _PAGE_CACHE_WB:
1219333d 46 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
47 break;
48 }
e9332cac
TG
49
50 return err;
51}
52
c81c8a1e
RD
53static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
54 void *arg)
55{
56 unsigned long i;
57
58 for (i = 0; i < nr_pages; ++i)
59 if (pfn_valid(start_pfn + i) &&
60 !PageReserved(pfn_to_page(start_pfn + i)))
61 return 1;
62
63 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
64
65 return 0;
66}
67
1da177e4
LT
68/*
69 * Remap an arbitrary physical address space into the kernel virtual
70 * address space. Needed when the kernel wants to access high addresses
71 * directly.
72 *
73 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
74 * have to convert them into an offset in a page-aligned mapping, but the
75 * caller shouldn't need to know that small detail.
76 */
23016969
CL
77static void __iomem *__ioremap_caller(resource_size_t phys_addr,
78 unsigned long size, unsigned long prot_val, void *caller)
1da177e4 79{
ffa71f33
KK
80 unsigned long offset, vaddr;
81 resource_size_t pfn, last_pfn, last_addr;
87e547fe
PP
82 const resource_size_t unaligned_phys_addr = phys_addr;
83 const unsigned long unaligned_size = size;
91eebf40 84 struct vm_struct *area;
d7677d40 85 unsigned long new_prot_val;
d806e5ee 86 pgprot_t prot;
dee7cbb2 87 int retval;
d61fc448 88 void __iomem *ret_addr;
906e36c5 89 int ram_region;
1da177e4
LT
90
91 /* Don't allow wraparound or zero size */
92 last_addr = phys_addr + size - 1;
93 if (!size || last_addr < phys_addr)
94 return NULL;
95
e3100c82 96 if (!phys_addr_valid(phys_addr)) {
6997ab49 97 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 98 (unsigned long long)phys_addr);
e3100c82
TG
99 WARN_ON_ONCE(1);
100 return NULL;
101 }
102
1da177e4
LT
103 /*
104 * Don't remap the low PCI/ISA area, it's always mapped..
105 */
bcc643dc 106 if (is_ISA_range(phys_addr, last_addr))
4b40fcee 107 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4
LT
108
109 /*
110 * Don't allow anybody to remap normal RAM that we're using..
111 */
906e36c5
MT
112 /* First check if whole region can be identified as RAM or not */
113 ram_region = region_is_ram(phys_addr, size);
114 if (ram_region > 0) {
115 WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
116 (unsigned long int)phys_addr,
117 (unsigned long int)last_addr);
c81c8a1e 118 return NULL;
906e36c5 119 }
1da177e4 120
906e36c5
MT
121 /* If could not be identified(-1), check page by page */
122 if (ram_region < 0) {
123 pfn = phys_addr >> PAGE_SHIFT;
124 last_pfn = last_addr >> PAGE_SHIFT;
125 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
126 __ioremap_check_ram) == 1)
127 return NULL;
128 }
d7677d40 129 /*
130 * Mappings have to be page-aligned
131 */
132 offset = phys_addr & ~PAGE_MASK;
ffa71f33 133 phys_addr &= PHYSICAL_PAGE_MASK;
d7677d40 134 size = PAGE_ALIGN(last_addr+1) - phys_addr;
135
e213e877 136 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
dee7cbb2
VP
137 prot_val, &new_prot_val);
138 if (retval) {
279e669b 139 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
dee7cbb2
VP
140 return NULL;
141 }
142
143 if (prot_val != new_prot_val) {
b855192c
PA
144 if (!is_new_memtype_allowed(phys_addr, size,
145 prot_val, new_prot_val)) {
279e669b 146 printk(KERN_ERR
6997ab49 147 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
4c8337ac
RD
148 (unsigned long long)phys_addr,
149 (unsigned long long)(phys_addr + size),
6997ab49 150 prot_val, new_prot_val);
de2a47cf 151 goto err_free_memtype;
d7677d40 152 }
153 prot_val = new_prot_val;
154 }
155
3a96ce8c 156 switch (prot_val) {
157 case _PAGE_CACHE_UC:
d806e5ee 158 default:
be43d728 159 prot = PAGE_KERNEL_IO_NOCACHE;
d806e5ee 160 break;
de33c442 161 case _PAGE_CACHE_UC_MINUS:
be43d728 162 prot = PAGE_KERNEL_IO_UC_MINUS;
de33c442 163 break;
b310f381 164 case _PAGE_CACHE_WC:
be43d728 165 prot = PAGE_KERNEL_IO_WC;
b310f381 166 break;
3a96ce8c 167 case _PAGE_CACHE_WB:
be43d728 168 prot = PAGE_KERNEL_IO;
d806e5ee
TG
169 break;
170 }
a148ecfd 171
1da177e4
LT
172 /*
173 * Ok, go for it..
174 */
23016969 175 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4 176 if (!area)
de2a47cf 177 goto err_free_memtype;
1da177e4 178 area->phys_addr = phys_addr;
e66aadbe 179 vaddr = (unsigned long) area->addr;
43a432b1 180
de2a47cf
XF
181 if (kernel_map_sync_memtype(phys_addr, size, prot_val))
182 goto err_free_area;
e9332cac 183
de2a47cf
XF
184 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
185 goto err_free_area;
e9332cac 186
d61fc448 187 ret_addr = (void __iomem *) (vaddr + offset);
87e547fe 188 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
d61fc448 189
c7a7b814
TG
190 /*
191 * Check if the request spans more than any BAR in the iomem resource
192 * tree.
193 */
194 WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
195 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
196
d61fc448 197 return ret_addr;
de2a47cf
XF
198err_free_area:
199 free_vm_area(area);
200err_free_memtype:
201 free_memtype(phys_addr, phys_addr + size);
202 return NULL;
1da177e4 203}
1da177e4
LT
204
205/**
206 * ioremap_nocache - map bus memory into CPU space
9efc31b8 207 * @phys_addr: bus address of the memory
1da177e4
LT
208 * @size: size of the resource to map
209 *
210 * ioremap_nocache performs a platform specific sequence of operations to
211 * make bus memory CPU accessible via the readb/readw/readl/writeb/
212 * writew/writel functions and the other mmio helpers. The returned
213 * address is not guaranteed to be usable directly as a virtual
91eebf40 214 * address.
1da177e4
LT
215 *
216 * This version of ioremap ensures that the memory is marked uncachable
217 * on the CPU as well as honouring existing caching rules from things like
91eebf40 218 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
219 * busses. In particular driver authors should read up on PCI writes
220 *
221 * It's useful if some control registers are in such an area and
222 * write combining or read caching is not desirable:
91eebf40 223 *
1da177e4
LT
224 * Must be freed with iounmap.
225 */
b9e76a00 226void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 227{
de33c442
SS
228 /*
229 * Ideally, this should be:
499f8f84 230 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
de33c442
SS
231 *
232 * Till we fix all X drivers to use ioremap_wc(), we will use
233 * UC MINUS.
234 */
235 unsigned long val = _PAGE_CACHE_UC_MINUS;
236
237 return __ioremap_caller(phys_addr, size, val,
23016969 238 __builtin_return_address(0));
1da177e4 239}
129f6946 240EXPORT_SYMBOL(ioremap_nocache);
1da177e4 241
b310f381 242/**
243 * ioremap_wc - map memory into CPU space write combined
9efc31b8 244 * @phys_addr: bus address of the memory
b310f381 245 * @size: size of the resource to map
246 *
247 * This version of ioremap ensures that the memory is marked write combining.
248 * Write combining allows faster writes to some hardware devices.
249 *
250 * Must be freed with iounmap.
251 */
d639bab8 252void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
b310f381 253{
499f8f84 254 if (pat_enabled)
23016969
CL
255 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
256 __builtin_return_address(0));
b310f381 257 else
258 return ioremap_nocache(phys_addr, size);
259}
260EXPORT_SYMBOL(ioremap_wc);
261
b9e76a00 262void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 263{
23016969
CL
264 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
265 __builtin_return_address(0));
5f868152
TG
266}
267EXPORT_SYMBOL(ioremap_cache);
268
28b2ee20
RR
269void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
270 unsigned long prot_val)
271{
272 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
273 __builtin_return_address(0));
274}
275EXPORT_SYMBOL(ioremap_prot);
276
bf5421c3
AK
277/**
278 * iounmap - Free a IO remapping
279 * @addr: virtual address from ioremap_*
280 *
281 * Caller must ensure there is only one unmapping for the same pointer.
282 */
1da177e4
LT
283void iounmap(volatile void __iomem *addr)
284{
bf5421c3 285 struct vm_struct *p, *o;
c23a4e96
AM
286
287 if ((void __force *)addr <= high_memory)
1da177e4
LT
288 return;
289
290 /*
291 * __ioremap special-cases the PCI/ISA range by not instantiating a
292 * vm_area and by simply returning an address into the kernel mapping
293 * of ISA space. So handle that here.
294 */
6e92a5a6
TG
295 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
296 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
297 return;
298
91eebf40
TG
299 addr = (volatile void __iomem *)
300 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3 301
d61fc448
PP
302 mmiotrace_iounmap(addr);
303
bf5421c3
AK
304 /* Use the vm area unlocked, assuming the caller
305 ensures there isn't another iounmap for the same address
306 in parallel. Reuse of the virtual address is prevented by
307 leaving it in the global lists until we're done with it.
308 cpa takes care of the direct mappings. */
ef932473 309 p = find_vm_area((void __force *)addr);
bf5421c3
AK
310
311 if (!p) {
91eebf40 312 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 313 dump_stack();
bf5421c3 314 return;
1da177e4
LT
315 }
316
d7677d40 317 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
318
bf5421c3 319 /* Finally remove it */
6e92a5a6 320 o = remove_vm_area((void __force *)addr);
bf5421c3 321 BUG_ON(p != o || o == NULL);
91eebf40 322 kfree(p);
1da177e4 323}
129f6946 324EXPORT_SYMBOL(iounmap);
1da177e4 325
e045fb2a 326/*
327 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
328 * access
329 */
330void *xlate_dev_mem_ptr(unsigned long phys)
331{
332 void *addr;
333 unsigned long start = phys & PAGE_MASK;
334
335 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
336 if (page_is_ram(start >> PAGE_SHIFT))
337 return __va(phys);
338
2fb8f4e6 339 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
e045fb2a 340 if (addr)
341 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
342
343 return addr;
344}
345
346void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
347{
348 if (page_is_ram(phys >> PAGE_SHIFT))
349 return;
350
351 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
352 return;
353}
354
45c7b28f 355static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
0947b2f3 356
551889a6 357static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 358{
37cc8d7f
JF
359 /* Don't assume we're using swapper_pg_dir at this point */
360 pgd_t *base = __va(read_cr3());
361 pgd_t *pgd = &base[pgd_index(addr)];
551889a6
IC
362 pud_t *pud = pud_offset(pgd, addr);
363 pmd_t *pmd = pmd_offset(pud, addr);
364
365 return pmd;
0947b2f3
HY
366}
367
551889a6 368static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 369{
551889a6 370 return &bm_pte[pte_index(addr)];
0947b2f3
HY
371}
372
fef5ba79
JF
373bool __init is_early_ioremap_ptep(pte_t *ptep)
374{
375 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
376}
377
beacfaac 378void __init early_ioremap_init(void)
0947b2f3 379{
551889a6 380 pmd_t *pmd;
0947b2f3 381
73159fdc
AL
382#ifdef CONFIG_X86_64
383 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
384#else
385 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
386#endif
387
5b7c73e0 388 early_ioremap_setup();
8827247f 389
551889a6 390 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
45c7b28f
JF
391 memset(bm_pte, 0, sizeof(bm_pte));
392 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 393
0e3a9549 394 /*
551889a6 395 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
396 * we are not prepared:
397 */
499a5f1e
JB
398#define __FIXADDR_TOP (-PAGE_SIZE)
399 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
400 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
401#undef __FIXADDR_TOP
551889a6 402 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 403 WARN_ON(1);
551889a6
IC
404 printk(KERN_WARNING "pmd %p != %p\n",
405 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 406 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 407 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 408 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 409 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
410
411 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
412 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
413 FIX_BTMAP_BEGIN);
0e3a9549 414 }
0947b2f3
HY
415}
416
5b7c73e0
MS
417void __init __early_set_fixmap(enum fixed_addresses idx,
418 phys_addr_t phys, pgprot_t flags)
0947b2f3 419{
551889a6
IC
420 unsigned long addr = __fix_to_virt(idx);
421 pte_t *pte;
0947b2f3
HY
422
423 if (idx >= __end_of_fixed_addresses) {
424 BUG();
425 return;
426 }
beacfaac 427 pte = early_ioremap_pte(addr);
4583ed51 428
0947b2f3 429 if (pgprot_val(flags))
551889a6 430 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 431 else
4f9c11dd 432 pte_clear(&init_mm, addr, pte);
0947b2f3
HY
433 __flush_tlb_one(addr);
434}
This page took 4.217019 seconds and 5 git commands to generate.