x86, 64bit, mm: Make pgd next calculation consistent with pud/pmd
[deliverable/linux.git] / arch / x86 / realmode / init.c
CommitLineData
084ee1c6
JS
1#include <linux/io.h>
2#include <linux/memblock.h>
3
4#include <asm/cacheflush.h>
5#include <asm/pgtable.h>
6#include <asm/realmode.h>
7
b429dbf6 8struct real_mode_header *real_mode_header;
cda846f1 9u32 *trampoline_cr4_features;
084ee1c6
JS
10
11void __init setup_real_mode(void)
12{
13 phys_addr_t mem;
14 u16 real_mode_seg;
15 u32 *rel;
16 u32 count;
17 u32 *ptr;
18 u16 *seg;
19 int i;
b429dbf6 20 unsigned char *base;
f37240f1 21 struct trampoline_header *trampoline_header;
b429dbf6 22 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
f37240f1
JS
23#ifdef CONFIG_X86_64
24 u64 *trampoline_pgd;
638d957b 25 u64 efer;
f37240f1 26#endif
084ee1c6
JS
27
28 /* Has to be in very low memory so we can execute real-mode AP code. */
29 mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
30 if (!mem)
31 panic("Cannot allocate trampoline\n");
32
b429dbf6 33 base = __va(mem);
084ee1c6 34 memblock_reserve(mem, size);
b429dbf6 35 real_mode_header = (struct real_mode_header *) base;
084ee1c6 36 printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
b429dbf6 37 base, (unsigned long long)mem, size);
084ee1c6 38
b429dbf6 39 memcpy(base, real_mode_blob, size);
084ee1c6 40
b429dbf6 41 real_mode_seg = __pa(base) >> 4;
084ee1c6
JS
42 rel = (u32 *) real_mode_relocs;
43
44 /* 16-bit segment relocations. */
45 count = rel[0];
46 rel = &rel[1];
47 for (i = 0; i < count; i++) {
b429dbf6 48 seg = (u16 *) (base + rel[i]);
084ee1c6
JS
49 *seg = real_mode_seg;
50 }
51
52 /* 32-bit linear relocations. */
53 count = rel[i];
54 rel = &rel[i + 1];
55 for (i = 0; i < count; i++) {
b429dbf6
JS
56 ptr = (u32 *) (base + rel[i]);
57 *ptr += __pa(base);
084ee1c6
JS
58 }
59
f37240f1
JS
60 /* Must be perfomed *after* relocation. */
61 trampoline_header = (struct trampoline_header *)
62 __va(real_mode_header->trampoline_header);
63
48927bbb 64#ifdef CONFIG_X86_32
f37240f1
JS
65 trampoline_header->start = __pa(startup_32_smp);
66 trampoline_header->gdt_limit = __BOOT_DS + 7;
67 trampoline_header->gdt_base = __pa(boot_gdt);
48927bbb 68#else
79603879
PA
69 /*
70 * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
71 * so we need to mask it out.
72 */
638d957b
PA
73 rdmsrl(MSR_EFER, efer);
74 trampoline_header->efer = efer & ~EFER_LMA;
cda846f1 75
f37240f1 76 trampoline_header->start = (u64) secondary_startup_64;
cda846f1
JS
77 trampoline_cr4_features = &trampoline_header->cr4;
78 *trampoline_cr4_features = read_cr4();
79
f37240f1
JS
80 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
81 trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
82 trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE;
48927bbb 83#endif
084ee1c6
JS
84}
85
86/*
87 * set_real_mode_permissions() gets called very early, to guarantee the
88 * availability of low memory. This is before the proper kernel page
89 * tables are set up, so we cannot set page permissions in that
90 * function. Thus, we use an arch_initcall instead.
91 */
92static int __init set_real_mode_permissions(void)
93{
b429dbf6
JS
94 unsigned char *base = (unsigned char *) real_mode_header;
95 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
084ee1c6 96
f156ffc4 97 size_t ro_size =
b429dbf6
JS
98 PAGE_ALIGN(real_mode_header->ro_end) -
99 __pa(base);
f156ffc4
JS
100
101 size_t text_size =
b429dbf6
JS
102 PAGE_ALIGN(real_mode_header->ro_end) -
103 real_mode_header->text_start;
f156ffc4
JS
104
105 unsigned long text_start =
b429dbf6 106 (unsigned long) __va(real_mode_header->text_start);
f156ffc4 107
b429dbf6
JS
108 set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
109 set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
f156ffc4
JS
110 set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
111
084ee1c6
JS
112 return 0;
113}
114
115arch_initcall(set_real_mode_permissions);
This page took 0.057753 seconds and 5 git commands to generate.