2 * Based on arch/arm/mm/mmu.c
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/mman.h>
25 #include <linux/nodemask.h>
26 #include <linux/memblock.h>
30 #include <asm/cputype.h>
31 #include <asm/fixmap.h>
32 #include <asm/sections.h>
33 #include <asm/setup.h>
34 #include <asm/sizes.h>
36 #include <asm/memblock.h>
37 #include <asm/mmu_context.h>
42 * Empty_zero_page is a special page that is used for zero-initialized data
45 struct page
*empty_zero_page
;
46 EXPORT_SYMBOL(empty_zero_page
);
49 const char policy
[16];
54 static struct cachepolicy cache_policies
[] __initdata
= {
57 .mair
= 0x44, /* inner, outer non-cacheable */
58 .tcr
= TCR_IRGN_NC
| TCR_ORGN_NC
,
60 .policy
= "writethrough",
61 .mair
= 0xaa, /* inner, outer write-through, read-allocate */
62 .tcr
= TCR_IRGN_WT
| TCR_ORGN_WT
,
64 .policy
= "writeback",
65 .mair
= 0xee, /* inner, outer write-back, read-allocate */
66 .tcr
= TCR_IRGN_WBnWA
| TCR_ORGN_WBnWA
,
71 * These are useful for identifying cache coherency problems by allowing the
72 * cache or the cache and writebuffer to be turned off. It changes the Normal
73 * memory caching attributes in the MAIR_EL1 register.
75 static int __init
early_cachepolicy(char *p
)
80 for (i
= 0; i
< ARRAY_SIZE(cache_policies
); i
++) {
81 int len
= strlen(cache_policies
[i
].policy
);
83 if (memcmp(p
, cache_policies
[i
].policy
, len
) == 0)
86 if (i
== ARRAY_SIZE(cache_policies
)) {
87 pr_err("ERROR: unknown or unsupported cache policy: %s\n", p
);
94 * Modify MT_NORMAL attributes in MAIR_EL1.
98 " bfi %0, %1, %2, #8\n"
102 : "r" (cache_policies
[i
].mair
), "i" (MT_NORMAL
* 8));
105 * Modify TCR PTW cacheability attributes.
114 : "r" (cache_policies
[i
].tcr
), "r" (TCR_IRGN_MASK
| TCR_ORGN_MASK
));
120 early_param("cachepolicy", early_cachepolicy
);
122 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
123 unsigned long size
, pgprot_t vma_prot
)
126 return pgprot_noncached(vma_prot
);
127 else if (file
->f_flags
& O_SYNC
)
128 return pgprot_writecombine(vma_prot
);
131 EXPORT_SYMBOL(phys_mem_access_prot
);
133 static void __init
*early_alloc(unsigned long sz
)
135 void *ptr
= __va(memblock_alloc(sz
, sz
));
140 static void __init
alloc_init_pte(pmd_t
*pmd
, unsigned long addr
,
141 unsigned long end
, unsigned long pfn
,
146 if (pmd_none(*pmd
)) {
147 pte
= early_alloc(PTRS_PER_PTE
* sizeof(pte_t
));
148 __pmd_populate(pmd
, __pa(pte
), PMD_TYPE_TABLE
);
150 BUG_ON(pmd_bad(*pmd
));
152 pte
= pte_offset_kernel(pmd
, addr
);
154 set_pte(pte
, pfn_pte(pfn
, prot
));
156 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
159 static void __init
alloc_init_pmd(struct mm_struct
*mm
, pud_t
*pud
,
160 unsigned long addr
, unsigned long end
,
161 phys_addr_t phys
, pgprot_t prot
)
167 * Check for initial section mappings in the pgd/pud and remove them.
169 if (pud_none(*pud
) || pud_bad(*pud
)) {
170 pmd
= early_alloc(PTRS_PER_PMD
* sizeof(pmd_t
));
171 pud_populate(mm
, pud
, pmd
);
174 pmd
= pmd_offset(pud
, addr
);
176 next
= pmd_addr_end(addr
, end
);
177 /* try section mapping first */
178 if (((addr
| next
| phys
) & ~SECTION_MASK
) == 0) {
180 set_pmd(pmd
, __pmd(phys
|
181 pgprot_val(mk_sect_prot(prot
))));
183 * Check for previous table entries created during
184 * boot (__create_page_tables) and flush them.
186 if (!pmd_none(old_pmd
))
189 alloc_init_pte(pmd
, addr
, next
, __phys_to_pfn(phys
),
193 } while (pmd
++, addr
= next
, addr
!= end
);
196 static void __init
alloc_init_pud(struct mm_struct
*mm
, pgd_t
*pgd
,
197 unsigned long addr
, unsigned long end
,
198 phys_addr_t phys
, pgprot_t prot
)
203 if (pgd_none(*pgd
)) {
204 pud
= early_alloc(PTRS_PER_PUD
* sizeof(pud_t
));
205 pgd_populate(mm
, pgd
, pud
);
207 BUG_ON(pgd_bad(*pgd
));
209 pud
= pud_offset(pgd
, addr
);
211 next
= pud_addr_end(addr
, end
);
214 * For 4K granule only, attempt to put down a 1GB block
216 if ((PAGE_SHIFT
== 12) &&
217 ((addr
| next
| phys
) & ~PUD_MASK
) == 0) {
218 pud_t old_pud
= *pud
;
219 set_pud(pud
, __pud(phys
|
220 pgprot_val(mk_sect_prot(prot
))));
223 * If we have an old value for a pud, it will
224 * be pointing to a pmd table that we no longer
225 * need (from swapper_pg_dir).
227 * Look up the old pmd table and free it.
229 if (!pud_none(old_pud
)) {
230 phys_addr_t table
= __pa(pmd_offset(&old_pud
, 0));
231 memblock_free(table
, PAGE_SIZE
);
235 alloc_init_pmd(mm
, pud
, addr
, next
, phys
, prot
);
238 } while (pud
++, addr
= next
, addr
!= end
);
242 * Create the page directory entries and any necessary page tables for the
243 * mapping specified by 'md'.
245 static void __init
__create_mapping(struct mm_struct
*mm
, pgd_t
*pgd
,
246 phys_addr_t phys
, unsigned long virt
,
247 phys_addr_t size
, pgprot_t prot
)
249 unsigned long addr
, length
, end
, next
;
251 addr
= virt
& PAGE_MASK
;
252 length
= PAGE_ALIGN(size
+ (virt
& ~PAGE_MASK
));
256 next
= pgd_addr_end(addr
, end
);
257 alloc_init_pud(mm
, pgd
, addr
, next
, phys
, prot
);
259 } while (pgd
++, addr
= next
, addr
!= end
);
262 static void __init
create_mapping(phys_addr_t phys
, unsigned long virt
,
265 if (virt
< VMALLOC_START
) {
266 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
270 __create_mapping(&init_mm
, pgd_offset_k(virt
& PAGE_MASK
), phys
, virt
,
271 size
, PAGE_KERNEL_EXEC
);
274 void __init
create_pgd_mapping(struct mm_struct
*mm
, phys_addr_t phys
,
275 unsigned long virt
, phys_addr_t size
,
278 __create_mapping(mm
, pgd_offset(mm
, virt
), phys
, virt
, size
, prot
);
281 static void __init
map_mem(void)
283 struct memblock_region
*reg
;
287 * Temporarily limit the memblock range. We need to do this as
288 * create_mapping requires puds, pmds and ptes to be allocated from
289 * memory addressable from the initial direct kernel mapping.
291 * The initial direct kernel mapping, located at swapper_pg_dir, gives
292 * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
293 * PHYS_OFFSET (which must be aligned to 2MB as per
294 * Documentation/arm64/booting.txt).
296 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES
))
297 limit
= PHYS_OFFSET
+ PMD_SIZE
;
299 limit
= PHYS_OFFSET
+ PUD_SIZE
;
300 memblock_set_current_limit(limit
);
302 /* map all the memory banks */
303 for_each_memblock(memory
, reg
) {
304 phys_addr_t start
= reg
->base
;
305 phys_addr_t end
= start
+ reg
->size
;
310 #ifndef CONFIG_ARM64_64K_PAGES
312 * For the first memory bank align the start address and
313 * current memblock limit to prevent create_mapping() from
314 * allocating pte page tables from unmapped memory.
315 * When 64K pages are enabled, the pte page table for the
316 * first PGDIR_SIZE is already present in swapper_pg_dir.
319 start
= ALIGN(start
, PMD_SIZE
);
321 limit
= end
& PMD_MASK
;
322 memblock_set_current_limit(limit
);
326 create_mapping(start
, __phys_to_virt(start
), end
- start
);
329 /* Limit no longer required. */
330 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE
);
334 * paging_init() sets up the page tables, initialises the zone memory
335 * maps and sets up the zero page.
337 void __init
paging_init(void)
344 * Finally flush the caches and tlb to ensure that we're in a
350 /* allocate the zero page. */
351 zero_page
= early_alloc(PAGE_SIZE
);
355 empty_zero_page
= virt_to_page(zero_page
);
358 * TTBR0 is only used for the identity mapping at this stage. Make it
359 * point to zero page to avoid speculatively fetching new entries.
361 cpu_set_reserved_ttbr0();
366 * Enable the identity mapping to allow the MMU disabling.
368 void setup_mm_for_reboot(void)
370 cpu_switch_mm(idmap_pg_dir
, &init_mm
);
375 * Check whether a kernel address is valid (derived from arch/x86/).
377 int kern_addr_valid(unsigned long addr
)
384 if ((((long)addr
) >> VA_BITS
) != -1UL)
387 pgd
= pgd_offset_k(addr
);
391 pud
= pud_offset(pgd
, addr
);
396 return pfn_valid(pud_pfn(*pud
));
398 pmd
= pmd_offset(pud
, addr
);
403 return pfn_valid(pmd_pfn(*pmd
));
405 pte
= pte_offset_kernel(pmd
, addr
);
409 return pfn_valid(pte_pfn(*pte
));
411 #ifdef CONFIG_SPARSEMEM_VMEMMAP
412 #ifdef CONFIG_ARM64_64K_PAGES
413 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
)
415 return vmemmap_populate_basepages(start
, end
, node
);
417 #else /* !CONFIG_ARM64_64K_PAGES */
418 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
)
420 unsigned long addr
= start
;
427 next
= pmd_addr_end(addr
, end
);
429 pgd
= vmemmap_pgd_populate(addr
, node
);
433 pud
= vmemmap_pud_populate(pgd
, addr
, node
);
437 pmd
= pmd_offset(pud
, addr
);
438 if (pmd_none(*pmd
)) {
441 p
= vmemmap_alloc_block_buf(PMD_SIZE
, node
);
445 set_pmd(pmd
, __pmd(__pa(p
) | PROT_SECT_NORMAL
));
447 vmemmap_verify((pte_t
*)pmd
, node
, addr
, next
);
448 } while (addr
= next
, addr
!= end
);
452 #endif /* CONFIG_ARM64_64K_PAGES */
453 void vmemmap_free(unsigned long start
, unsigned long end
)
456 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
458 static pte_t bm_pte
[PTRS_PER_PTE
] __page_aligned_bss
;
459 #if CONFIG_ARM64_PGTABLE_LEVELS > 2
460 static pmd_t bm_pmd
[PTRS_PER_PMD
] __page_aligned_bss
;
462 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
463 static pud_t bm_pud
[PTRS_PER_PUD
] __page_aligned_bss
;
466 static inline pud_t
* fixmap_pud(unsigned long addr
)
468 pgd_t
*pgd
= pgd_offset_k(addr
);
470 BUG_ON(pgd_none(*pgd
) || pgd_bad(*pgd
));
472 return pud_offset(pgd
, addr
);
475 static inline pmd_t
* fixmap_pmd(unsigned long addr
)
477 pud_t
*pud
= fixmap_pud(addr
);
479 BUG_ON(pud_none(*pud
) || pud_bad(*pud
));
481 return pmd_offset(pud
, addr
);
484 static inline pte_t
* fixmap_pte(unsigned long addr
)
486 pmd_t
*pmd
= fixmap_pmd(addr
);
488 BUG_ON(pmd_none(*pmd
) || pmd_bad(*pmd
));
490 return pte_offset_kernel(pmd
, addr
);
493 void __init
early_fixmap_init(void)
498 unsigned long addr
= FIXADDR_START
;
500 pgd
= pgd_offset_k(addr
);
501 pgd_populate(&init_mm
, pgd
, bm_pud
);
502 pud
= pud_offset(pgd
, addr
);
503 pud_populate(&init_mm
, pud
, bm_pmd
);
504 pmd
= pmd_offset(pud
, addr
);
505 pmd_populate_kernel(&init_mm
, pmd
, bm_pte
);
508 * The boot-ioremap range spans multiple pmds, for which
509 * we are not preparted:
511 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN
) >> PMD_SHIFT
)
512 != (__fix_to_virt(FIX_BTMAP_END
) >> PMD_SHIFT
));
514 if ((pmd
!= fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN
)))
515 || pmd
!= fixmap_pmd(fix_to_virt(FIX_BTMAP_END
))) {
517 pr_warn("pmd %p != %p, %p\n",
518 pmd
, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN
)),
519 fixmap_pmd(fix_to_virt(FIX_BTMAP_END
)));
520 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
521 fix_to_virt(FIX_BTMAP_BEGIN
));
522 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
523 fix_to_virt(FIX_BTMAP_END
));
525 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END
);
526 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN
);
530 void __set_fixmap(enum fixed_addresses idx
,
531 phys_addr_t phys
, pgprot_t flags
)
533 unsigned long addr
= __fix_to_virt(idx
);
536 if (idx
>= __end_of_fixed_addresses
) {
541 pte
= fixmap_pte(addr
);
543 if (pgprot_val(flags
)) {
544 set_pte(pte
, pfn_pte(phys
>> PAGE_SHIFT
, flags
));
546 pte_clear(&init_mm
, addr
, pte
);
547 flush_tlb_kernel_range(addr
, addr
+PAGE_SIZE
);