Merge tag 'sound-4.4-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[deliverable/linux.git] / arch / arm64 / mm / mmu.c
1 /*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/libfdt.h>
25 #include <linux/mman.h>
26 #include <linux/nodemask.h>
27 #include <linux/memblock.h>
28 #include <linux/fs.h>
29 #include <linux/io.h>
30 #include <linux/slab.h>
31 #include <linux/stop_machine.h>
32
33 #include <asm/cputype.h>
34 #include <asm/fixmap.h>
35 #include <asm/kernel-pgtable.h>
36 #include <asm/sections.h>
37 #include <asm/setup.h>
38 #include <asm/sizes.h>
39 #include <asm/tlb.h>
40 #include <asm/memblock.h>
41 #include <asm/mmu_context.h>
42
43 #include "mm.h"
44
45 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
46
47 /*
48 * Empty_zero_page is a special page that is used for zero-initialized data
49 * and COW.
50 */
51 struct page *empty_zero_page;
52 EXPORT_SYMBOL(empty_zero_page);
53
54 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
55 unsigned long size, pgprot_t vma_prot)
56 {
57 if (!pfn_valid(pfn))
58 return pgprot_noncached(vma_prot);
59 else if (file->f_flags & O_SYNC)
60 return pgprot_writecombine(vma_prot);
61 return vma_prot;
62 }
63 EXPORT_SYMBOL(phys_mem_access_prot);
64
65 static void __init *early_alloc(unsigned long sz)
66 {
67 phys_addr_t phys;
68 void *ptr;
69
70 phys = memblock_alloc(sz, sz);
71 BUG_ON(!phys);
72 ptr = __va(phys);
73 memset(ptr, 0, sz);
74 return ptr;
75 }
76
77 /*
78 * remap a PMD into pages
79 */
80 static void split_pmd(pmd_t *pmd, pte_t *pte)
81 {
82 unsigned long pfn = pmd_pfn(*pmd);
83 int i = 0;
84
85 do {
86 /*
87 * Need to have the least restrictive permissions available
88 * permissions will be fixed up later
89 */
90 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
91 pfn++;
92 } while (pte++, i++, i < PTRS_PER_PTE);
93 }
94
95 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
96 unsigned long end, unsigned long pfn,
97 pgprot_t prot,
98 void *(*alloc)(unsigned long size))
99 {
100 pte_t *pte;
101
102 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
103 pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
104 if (pmd_sect(*pmd))
105 split_pmd(pmd, pte);
106 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
107 flush_tlb_all();
108 }
109 BUG_ON(pmd_bad(*pmd));
110
111 pte = pte_offset_kernel(pmd, addr);
112 do {
113 set_pte(pte, pfn_pte(pfn, prot));
114 pfn++;
115 } while (pte++, addr += PAGE_SIZE, addr != end);
116 }
117
118 static void split_pud(pud_t *old_pud, pmd_t *pmd)
119 {
120 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
121 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
122 int i = 0;
123
124 do {
125 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
126 addr += PMD_SIZE;
127 } while (pmd++, i++, i < PTRS_PER_PMD);
128 }
129
130 static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
131 unsigned long addr, unsigned long end,
132 phys_addr_t phys, pgprot_t prot,
133 void *(*alloc)(unsigned long size))
134 {
135 pmd_t *pmd;
136 unsigned long next;
137
138 /*
139 * Check for initial section mappings in the pgd/pud and remove them.
140 */
141 if (pud_none(*pud) || pud_sect(*pud)) {
142 pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
143 if (pud_sect(*pud)) {
144 /*
145 * need to have the 1G of mappings continue to be
146 * present
147 */
148 split_pud(pud, pmd);
149 }
150 pud_populate(mm, pud, pmd);
151 flush_tlb_all();
152 }
153 BUG_ON(pud_bad(*pud));
154
155 pmd = pmd_offset(pud, addr);
156 do {
157 next = pmd_addr_end(addr, end);
158 /* try section mapping first */
159 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
160 pmd_t old_pmd =*pmd;
161 set_pmd(pmd, __pmd(phys |
162 pgprot_val(mk_sect_prot(prot))));
163 /*
164 * Check for previous table entries created during
165 * boot (__create_page_tables) and flush them.
166 */
167 if (!pmd_none(old_pmd)) {
168 flush_tlb_all();
169 if (pmd_table(old_pmd)) {
170 phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
171 if (!WARN_ON_ONCE(slab_is_available()))
172 memblock_free(table, PAGE_SIZE);
173 }
174 }
175 } else {
176 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
177 prot, alloc);
178 }
179 phys += next - addr;
180 } while (pmd++, addr = next, addr != end);
181 }
182
183 static inline bool use_1G_block(unsigned long addr, unsigned long next,
184 unsigned long phys)
185 {
186 if (PAGE_SHIFT != 12)
187 return false;
188
189 if (((addr | next | phys) & ~PUD_MASK) != 0)
190 return false;
191
192 return true;
193 }
194
195 static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
196 unsigned long addr, unsigned long end,
197 phys_addr_t phys, pgprot_t prot,
198 void *(*alloc)(unsigned long size))
199 {
200 pud_t *pud;
201 unsigned long next;
202
203 if (pgd_none(*pgd)) {
204 pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
205 pgd_populate(mm, pgd, pud);
206 }
207 BUG_ON(pgd_bad(*pgd));
208
209 pud = pud_offset(pgd, addr);
210 do {
211 next = pud_addr_end(addr, end);
212
213 /*
214 * For 4K granule only, attempt to put down a 1GB block
215 */
216 if (use_1G_block(addr, next, phys)) {
217 pud_t old_pud = *pud;
218 set_pud(pud, __pud(phys |
219 pgprot_val(mk_sect_prot(prot))));
220
221 /*
222 * If we have an old value for a pud, it will
223 * be pointing to a pmd table that we no longer
224 * need (from swapper_pg_dir).
225 *
226 * Look up the old pmd table and free it.
227 */
228 if (!pud_none(old_pud)) {
229 flush_tlb_all();
230 if (pud_table(old_pud)) {
231 phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
232 if (!WARN_ON_ONCE(slab_is_available()))
233 memblock_free(table, PAGE_SIZE);
234 }
235 }
236 } else {
237 alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
238 }
239 phys += next - addr;
240 } while (pud++, addr = next, addr != end);
241 }
242
243 /*
244 * Create the page directory entries and any necessary page tables for the
245 * mapping specified by 'md'.
246 */
247 static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
248 phys_addr_t phys, unsigned long virt,
249 phys_addr_t size, pgprot_t prot,
250 void *(*alloc)(unsigned long size))
251 {
252 unsigned long addr, length, end, next;
253
254 addr = virt & PAGE_MASK;
255 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
256
257 end = addr + length;
258 do {
259 next = pgd_addr_end(addr, end);
260 alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
261 phys += next - addr;
262 } while (pgd++, addr = next, addr != end);
263 }
264
265 static void *late_alloc(unsigned long size)
266 {
267 void *ptr;
268
269 BUG_ON(size > PAGE_SIZE);
270 ptr = (void *)__get_free_page(PGALLOC_GFP);
271 BUG_ON(!ptr);
272 return ptr;
273 }
274
275 static void __init create_mapping(phys_addr_t phys, unsigned long virt,
276 phys_addr_t size, pgprot_t prot)
277 {
278 if (virt < VMALLOC_START) {
279 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
280 &phys, virt);
281 return;
282 }
283 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
284 size, prot, early_alloc);
285 }
286
287 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
288 unsigned long virt, phys_addr_t size,
289 pgprot_t prot)
290 {
291 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
292 late_alloc);
293 }
294
295 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
296 phys_addr_t size, pgprot_t prot)
297 {
298 if (virt < VMALLOC_START) {
299 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
300 &phys, virt);
301 return;
302 }
303
304 return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
305 phys, virt, size, prot, late_alloc);
306 }
307
308 #ifdef CONFIG_DEBUG_RODATA
309 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
310 {
311 /*
312 * Set up the executable regions using the existing section mappings
313 * for now. This will get more fine grained later once all memory
314 * is mapped
315 */
316 unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
317 unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
318
319 if (end < kernel_x_start) {
320 create_mapping(start, __phys_to_virt(start),
321 end - start, PAGE_KERNEL);
322 } else if (start >= kernel_x_end) {
323 create_mapping(start, __phys_to_virt(start),
324 end - start, PAGE_KERNEL);
325 } else {
326 if (start < kernel_x_start)
327 create_mapping(start, __phys_to_virt(start),
328 kernel_x_start - start,
329 PAGE_KERNEL);
330 create_mapping(kernel_x_start,
331 __phys_to_virt(kernel_x_start),
332 kernel_x_end - kernel_x_start,
333 PAGE_KERNEL_EXEC);
334 if (kernel_x_end < end)
335 create_mapping(kernel_x_end,
336 __phys_to_virt(kernel_x_end),
337 end - kernel_x_end,
338 PAGE_KERNEL);
339 }
340
341 }
342 #else
343 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
344 {
345 create_mapping(start, __phys_to_virt(start), end - start,
346 PAGE_KERNEL_EXEC);
347 }
348 #endif
349
350 static void __init map_mem(void)
351 {
352 struct memblock_region *reg;
353 phys_addr_t limit;
354
355 /*
356 * Temporarily limit the memblock range. We need to do this as
357 * create_mapping requires puds, pmds and ptes to be allocated from
358 * memory addressable from the initial direct kernel mapping.
359 *
360 * The initial direct kernel mapping, located at swapper_pg_dir, gives
361 * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
362 * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
363 * per Documentation/arm64/booting.txt).
364 */
365 limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
366 memblock_set_current_limit(limit);
367
368 /* map all the memory banks */
369 for_each_memblock(memory, reg) {
370 phys_addr_t start = reg->base;
371 phys_addr_t end = start + reg->size;
372
373 if (start >= end)
374 break;
375
376 if (ARM64_SWAPPER_USES_SECTION_MAPS) {
377 /*
378 * For the first memory bank align the start address and
379 * current memblock limit to prevent create_mapping() from
380 * allocating pte page tables from unmapped memory. With
381 * the section maps, if the first block doesn't end on section
382 * size boundary, create_mapping() will try to allocate a pte
383 * page, which may be returned from an unmapped area.
384 * When section maps are not used, the pte page table for the
385 * current limit is already present in swapper_pg_dir.
386 */
387 if (start < limit)
388 start = ALIGN(start, SECTION_SIZE);
389 if (end < limit) {
390 limit = end & SECTION_MASK;
391 memblock_set_current_limit(limit);
392 }
393 }
394 __map_memblock(start, end);
395 }
396
397 /* Limit no longer required. */
398 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
399 }
400
401 static void __init fixup_executable(void)
402 {
403 #ifdef CONFIG_DEBUG_RODATA
404 /* now that we are actually fully mapped, make the start/end more fine grained */
405 if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
406 unsigned long aligned_start = round_down(__pa(_stext),
407 SWAPPER_BLOCK_SIZE);
408
409 create_mapping(aligned_start, __phys_to_virt(aligned_start),
410 __pa(_stext) - aligned_start,
411 PAGE_KERNEL);
412 }
413
414 if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
415 unsigned long aligned_end = round_up(__pa(__init_end),
416 SWAPPER_BLOCK_SIZE);
417 create_mapping(__pa(__init_end), (unsigned long)__init_end,
418 aligned_end - __pa(__init_end),
419 PAGE_KERNEL);
420 }
421 #endif
422 }
423
424 #ifdef CONFIG_DEBUG_RODATA
425 void mark_rodata_ro(void)
426 {
427 create_mapping_late(__pa(_stext), (unsigned long)_stext,
428 (unsigned long)_etext - (unsigned long)_stext,
429 PAGE_KERNEL_ROX);
430
431 }
432 #endif
433
434 void fixup_init(void)
435 {
436 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
437 (unsigned long)__init_end - (unsigned long)__init_begin,
438 PAGE_KERNEL);
439 }
440
441 /*
442 * paging_init() sets up the page tables, initialises the zone memory
443 * maps and sets up the zero page.
444 */
445 void __init paging_init(void)
446 {
447 void *zero_page;
448
449 map_mem();
450 fixup_executable();
451
452 /* allocate the zero page. */
453 zero_page = early_alloc(PAGE_SIZE);
454
455 bootmem_init();
456
457 empty_zero_page = virt_to_page(zero_page);
458
459 /*
460 * TTBR0 is only used for the identity mapping at this stage. Make it
461 * point to zero page to avoid speculatively fetching new entries.
462 */
463 cpu_set_reserved_ttbr0();
464 local_flush_tlb_all();
465 cpu_set_default_tcr_t0sz();
466 }
467
468 /*
469 * Check whether a kernel address is valid (derived from arch/x86/).
470 */
471 int kern_addr_valid(unsigned long addr)
472 {
473 pgd_t *pgd;
474 pud_t *pud;
475 pmd_t *pmd;
476 pte_t *pte;
477
478 if ((((long)addr) >> VA_BITS) != -1UL)
479 return 0;
480
481 pgd = pgd_offset_k(addr);
482 if (pgd_none(*pgd))
483 return 0;
484
485 pud = pud_offset(pgd, addr);
486 if (pud_none(*pud))
487 return 0;
488
489 if (pud_sect(*pud))
490 return pfn_valid(pud_pfn(*pud));
491
492 pmd = pmd_offset(pud, addr);
493 if (pmd_none(*pmd))
494 return 0;
495
496 if (pmd_sect(*pmd))
497 return pfn_valid(pmd_pfn(*pmd));
498
499 pte = pte_offset_kernel(pmd, addr);
500 if (pte_none(*pte))
501 return 0;
502
503 return pfn_valid(pte_pfn(*pte));
504 }
505 #ifdef CONFIG_SPARSEMEM_VMEMMAP
506 #if !ARM64_SWAPPER_USES_SECTION_MAPS
507 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
508 {
509 return vmemmap_populate_basepages(start, end, node);
510 }
511 #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
512 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
513 {
514 unsigned long addr = start;
515 unsigned long next;
516 pgd_t *pgd;
517 pud_t *pud;
518 pmd_t *pmd;
519
520 do {
521 next = pmd_addr_end(addr, end);
522
523 pgd = vmemmap_pgd_populate(addr, node);
524 if (!pgd)
525 return -ENOMEM;
526
527 pud = vmemmap_pud_populate(pgd, addr, node);
528 if (!pud)
529 return -ENOMEM;
530
531 pmd = pmd_offset(pud, addr);
532 if (pmd_none(*pmd)) {
533 void *p = NULL;
534
535 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
536 if (!p)
537 return -ENOMEM;
538
539 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
540 } else
541 vmemmap_verify((pte_t *)pmd, node, addr, next);
542 } while (addr = next, addr != end);
543
544 return 0;
545 }
546 #endif /* CONFIG_ARM64_64K_PAGES */
547 void vmemmap_free(unsigned long start, unsigned long end)
548 {
549 }
550 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
551
552 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
553 #if CONFIG_PGTABLE_LEVELS > 2
554 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
555 #endif
556 #if CONFIG_PGTABLE_LEVELS > 3
557 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
558 #endif
559
560 static inline pud_t * fixmap_pud(unsigned long addr)
561 {
562 pgd_t *pgd = pgd_offset_k(addr);
563
564 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
565
566 return pud_offset(pgd, addr);
567 }
568
569 static inline pmd_t * fixmap_pmd(unsigned long addr)
570 {
571 pud_t *pud = fixmap_pud(addr);
572
573 BUG_ON(pud_none(*pud) || pud_bad(*pud));
574
575 return pmd_offset(pud, addr);
576 }
577
578 static inline pte_t * fixmap_pte(unsigned long addr)
579 {
580 pmd_t *pmd = fixmap_pmd(addr);
581
582 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
583
584 return pte_offset_kernel(pmd, addr);
585 }
586
587 void __init early_fixmap_init(void)
588 {
589 pgd_t *pgd;
590 pud_t *pud;
591 pmd_t *pmd;
592 unsigned long addr = FIXADDR_START;
593
594 pgd = pgd_offset_k(addr);
595 pgd_populate(&init_mm, pgd, bm_pud);
596 pud = pud_offset(pgd, addr);
597 pud_populate(&init_mm, pud, bm_pmd);
598 pmd = pmd_offset(pud, addr);
599 pmd_populate_kernel(&init_mm, pmd, bm_pte);
600
601 /*
602 * The boot-ioremap range spans multiple pmds, for which
603 * we are not preparted:
604 */
605 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
606 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
607
608 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
609 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
610 WARN_ON(1);
611 pr_warn("pmd %p != %p, %p\n",
612 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
613 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
614 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
615 fix_to_virt(FIX_BTMAP_BEGIN));
616 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
617 fix_to_virt(FIX_BTMAP_END));
618
619 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
620 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
621 }
622 }
623
624 void __set_fixmap(enum fixed_addresses idx,
625 phys_addr_t phys, pgprot_t flags)
626 {
627 unsigned long addr = __fix_to_virt(idx);
628 pte_t *pte;
629
630 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
631
632 pte = fixmap_pte(addr);
633
634 if (pgprot_val(flags)) {
635 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
636 } else {
637 pte_clear(&init_mm, addr, pte);
638 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
639 }
640 }
641
642 void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
643 {
644 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
645 pgprot_t prot = PAGE_KERNEL_RO;
646 int size, offset;
647 void *dt_virt;
648
649 /*
650 * Check whether the physical FDT address is set and meets the minimum
651 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
652 * at least 8 bytes so that we can always access the size field of the
653 * FDT header after mapping the first chunk, double check here if that
654 * is indeed the case.
655 */
656 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
657 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
658 return NULL;
659
660 /*
661 * Make sure that the FDT region can be mapped without the need to
662 * allocate additional translation table pages, so that it is safe
663 * to call create_mapping() this early.
664 *
665 * On 64k pages, the FDT will be mapped using PTEs, so we need to
666 * be in the same PMD as the rest of the fixmap.
667 * On 4k pages, we'll use section mappings for the FDT so we only
668 * have to be in the same PUD.
669 */
670 BUILD_BUG_ON(dt_virt_base % SZ_2M);
671
672 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
673 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
674
675 offset = dt_phys % SWAPPER_BLOCK_SIZE;
676 dt_virt = (void *)dt_virt_base + offset;
677
678 /* map the first chunk so we can read the size from the header */
679 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
680 SWAPPER_BLOCK_SIZE, prot);
681
682 if (fdt_check_header(dt_virt) != 0)
683 return NULL;
684
685 size = fdt_totalsize(dt_virt);
686 if (size > MAX_FDT_SIZE)
687 return NULL;
688
689 if (offset + size > SWAPPER_BLOCK_SIZE)
690 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
691 round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
692
693 memblock_reserve(dt_phys, size);
694
695 return dt_virt;
696 }
This page took 0.046818 seconds and 6 git commands to generate.