2 * Provide common bits of early_ioremap() support for architectures needing
3 * temporary mappings during boot before ioremap() is available.
5 * This is mostly a direct copy of the x86 early_ioremap implementation.
7 * (C) Copyright 1995 1996, 2014 Linus Torvalds
10 #include <linux/kernel.h>
11 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <asm/fixmap.h>
20 static int early_ioremap_debug __initdata
;
22 static int __init
early_ioremap_debug_setup(char *str
)
24 early_ioremap_debug
= 1;
28 early_param("early_ioremap_debug", early_ioremap_debug_setup
);
30 static int after_paging_init __initdata
;
32 void __init __weak
early_ioremap_shutdown(void)
36 void __init
early_ioremap_reset(void)
38 early_ioremap_shutdown();
39 after_paging_init
= 1;
43 * Generally, ioremap() is available after paging_init() has been called.
44 * Architectures wanting to allow early_ioremap after paging_init() can
45 * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
47 #ifndef __late_set_fixmap
48 static inline void __init
__late_set_fixmap(enum fixed_addresses idx
,
49 phys_addr_t phys
, pgprot_t prot
)
55 #ifndef __late_clear_fixmap
56 static inline void __init
__late_clear_fixmap(enum fixed_addresses idx
)
62 static void __iomem
*prev_map
[FIX_BTMAPS_SLOTS
] __initdata
;
63 static unsigned long prev_size
[FIX_BTMAPS_SLOTS
] __initdata
;
64 static unsigned long slot_virt
[FIX_BTMAPS_SLOTS
] __initdata
;
66 void __init
early_ioremap_setup(void)
70 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++)
71 if (WARN_ON(prev_map
[i
]))
74 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++)
75 slot_virt
[i
] = __fix_to_virt(FIX_BTMAP_BEGIN
- NR_FIX_BTMAPS
*i
);
78 static int __init
check_early_ioremap_leak(void)
83 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++)
87 if (WARN(count
, KERN_WARNING
88 "Debug warning: early ioremap leak of %d areas detected.\n"
89 "please boot with early_ioremap_debug and report the dmesg.\n",
94 late_initcall(check_early_ioremap_leak
);
96 static void __init __iomem
*
97 __early_ioremap(resource_size_t phys_addr
, unsigned long size
, pgprot_t prot
)
100 resource_size_t last_addr
;
101 unsigned int nrpages
;
102 enum fixed_addresses idx
;
105 WARN_ON(system_state
!= SYSTEM_BOOTING
);
108 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++) {
115 if (WARN(slot
< 0, "%s(%08llx, %08lx) not found slot\n",
116 __func__
, (u64
)phys_addr
, size
))
119 /* Don't allow wraparound or zero size */
120 last_addr
= phys_addr
+ size
- 1;
121 if (WARN_ON(!size
|| last_addr
< phys_addr
))
124 prev_size
[slot
] = size
;
126 * Mappings have to be page-aligned
128 offset
= phys_addr
& ~PAGE_MASK
;
129 phys_addr
&= PAGE_MASK
;
130 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
133 * Mappings have to fit in the FIX_BTMAP area.
135 nrpages
= size
>> PAGE_SHIFT
;
136 if (WARN_ON(nrpages
> NR_FIX_BTMAPS
))
142 idx
= FIX_BTMAP_BEGIN
- NR_FIX_BTMAPS
*slot
;
143 while (nrpages
> 0) {
144 if (after_paging_init
)
145 __late_set_fixmap(idx
, phys_addr
, prot
);
147 __early_set_fixmap(idx
, phys_addr
, prot
);
148 phys_addr
+= PAGE_SIZE
;
152 WARN(early_ioremap_debug
, "%s(%08llx, %08lx) [%d] => %08lx + %08lx\n",
153 __func__
, (u64
)phys_addr
, size
, slot
, offset
, slot_virt
[slot
]);
155 prev_map
[slot
] = (void __iomem
*)(offset
+ slot_virt
[slot
]);
156 return prev_map
[slot
];
159 void __init
early_iounmap(void __iomem
*addr
, unsigned long size
)
161 unsigned long virt_addr
;
162 unsigned long offset
;
163 unsigned int nrpages
;
164 enum fixed_addresses idx
;
168 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++) {
169 if (prev_map
[i
] == addr
) {
175 if (WARN(slot
< 0, "early_iounmap(%p, %08lx) not found slot\n",
179 if (WARN(prev_size
[slot
] != size
,
180 "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
181 addr
, size
, slot
, prev_size
[slot
]))
184 WARN(early_ioremap_debug
, "early_iounmap(%p, %08lx) [%d]\n",
187 virt_addr
= (unsigned long)addr
;
188 if (WARN_ON(virt_addr
< fix_to_virt(FIX_BTMAP_BEGIN
)))
191 offset
= virt_addr
& ~PAGE_MASK
;
192 nrpages
= PAGE_ALIGN(offset
+ size
) >> PAGE_SHIFT
;
194 idx
= FIX_BTMAP_BEGIN
- NR_FIX_BTMAPS
*slot
;
195 while (nrpages
> 0) {
196 if (after_paging_init
)
197 __late_clear_fixmap(idx
);
199 __early_set_fixmap(idx
, 0, FIXMAP_PAGE_CLEAR
);
203 prev_map
[slot
] = NULL
;
206 /* Remap an IO device */
207 void __init __iomem
*
208 early_ioremap(resource_size_t phys_addr
, unsigned long size
)
210 return __early_ioremap(phys_addr
, size
, FIXMAP_PAGE_IO
);
215 early_memremap(resource_size_t phys_addr
, unsigned long size
)
217 return (__force
void *)__early_ioremap(phys_addr
, size
,
220 #else /* CONFIG_MMU */
222 void __init __iomem
*
223 early_ioremap(resource_size_t phys_addr
, unsigned long size
)
225 return (__force
void __iomem
*)phys_addr
;
230 early_memremap(resource_size_t phys_addr
, unsigned long size
)
232 return (void *)phys_addr
;
235 void __init
early_iounmap(void __iomem
*addr
, unsigned long size
)
239 #endif /* CONFIG_MMU */
242 void __init
early_memunmap(void *addr
, unsigned long size
)
244 early_iounmap((__force
void __iomem
*)addr
, size
);