Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * | |
3 | * Trampoline.S Derived from Setup.S by Linus Torvalds | |
4 | * | |
5 | * 4 Jan 1997 Michael Chastain: changed to gnu as. | |
90b1c208 | 6 | * 15 Sept 2005 Eric Biederman: 64bit PIC support |
1da177e4 LT |
7 | * |
8 | * Entry: CS:IP point to the start of our code, we are | |
9 | * in real mode with no stack, but the rest of the | |
10 | * trampoline page to make our stack and everything else | |
11 | * is a mystery. | |
12 | * | |
1da177e4 LT |
13 | * On entry to trampoline_data, the processor is in real mode |
14 | * with 16-bit addressing and 16-bit data. CS has some value | |
15 | * and IP is zero. Thus, data addresses need to be absolute | |
16 | * (no relocation) and are taken with regard to r_base. | |
17 | * | |
90b1c208 VG |
18 | * With the addition of trampoline_level4_pgt this code can |
19 | * now enter a 64bit kernel that lives at arbitrary 64bit | |
20 | * physical addresses. | |
21 | * | |
1da177e4 LT |
22 | * If you work on this file, check the object module with objdump |
23 | * --full-contents --reloc to make sure there are no relocation | |
90b1c208 | 24 | * entries. |
1da177e4 LT |
25 | */ |
26 | ||
27 | #include <linux/linkage.h> | |
90b1c208 | 28 | #include <asm/pgtable.h> |
1da177e4 | 29 | #include <asm/page.h> |
90b1c208 VG |
30 | #include <asm/msr.h> |
31 | #include <asm/segment.h> | |
1da177e4 | 32 | |
121d7bf5 | 33 | .section .rodata, "a", @progbits |
1da177e4 LT |
34 | |
35 | .code16 | |
36 | ||
37 | ENTRY(trampoline_data) | |
38 | r_base = . | |
90b1c208 | 39 | cli # We should be safe anyway |
1da177e4 LT |
40 | wbinvd |
41 | mov %cs, %ax # Code and data in the same place | |
42 | mov %ax, %ds | |
90b1c208 VG |
43 | mov %ax, %es |
44 | mov %ax, %ss | |
1da177e4 | 45 | |
1da177e4 LT |
46 | |
47 | movl $0xA5A5A5A5, trampoline_data - r_base | |
48 | # write marker for master knows we're running | |
49 | ||
90b1c208 VG |
50 | # Setup stack |
51 | movw $(trampoline_stack_end - r_base), %sp | |
52 | ||
53 | call verify_cpu # Verify the cpu supports long mode | |
a4831e08 VG |
54 | testl %eax, %eax # Check for return code |
55 | jnz no_longmode | |
90b1c208 VG |
56 | |
57 | mov %cs, %ax | |
58 | movzx %ax, %esi # Find the 32bit trampoline location | |
59 | shll $4, %esi | |
60 | ||
61 | # Fixup the vectors | |
62 | addl %esi, startup_32_vector - r_base | |
63 | addl %esi, startup_64_vector - r_base | |
64 | addl %esi, tgdt + 2 - r_base # Fixup the gdt pointer | |
65 | ||
983d5dbd VG |
66 | /* |
67 | * GDT tables in non default location kernel can be beyond 16MB and | |
68 | * lgdt will not be able to load the address as in real mode default | |
69 | * operand size is 16bit. Use lgdtl instead to force operand size | |
70 | * to 32 bit. | |
71 | */ | |
72 | ||
90b1c208 VG |
73 | lidtl tidt - r_base # load idt with 0, 0 |
74 | lgdtl tgdt - r_base # load gdt with whatever is appropriate | |
1da177e4 LT |
75 | |
76 | xor %ax, %ax | |
77 | inc %ax # protected mode (PE) bit | |
78 | lmsw %ax # into protected mode | |
90b1c208 VG |
79 | |
80 | # flush prefetch and jump to startup_32 | |
81 | ljmpl *(startup_32_vector - r_base) | |
82 | ||
83 | .code32 | |
84 | .balign 4 | |
85 | startup_32: | |
86 | movl $__KERNEL_DS, %eax # Initialize the %ds segment register | |
87 | movl %eax, %ds | |
88 | ||
89 | xorl %eax, %eax | |
90 | btsl $5, %eax # Enable PAE mode | |
91 | movl %eax, %cr4 | |
92 | ||
93 | # Setup trampoline 4 level pagetables | |
94 | leal (trampoline_level4_pgt - r_base)(%esi), %eax | |
95 | movl %eax, %cr3 | |
96 | ||
97 | movl $MSR_EFER, %ecx | |
98 | movl $(1 << _EFER_LME), %eax # Enable Long Mode | |
99 | xorl %edx, %edx | |
100 | wrmsr | |
101 | ||
102 | xorl %eax, %eax | |
103 | btsl $31, %eax # Enable paging and in turn activate Long Mode | |
104 | btsl $0, %eax # Enable protected mode | |
105 | movl %eax, %cr0 | |
106 | ||
107 | /* | |
108 | * At this point we're in long mode but in 32bit compatibility mode | |
109 | * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn | |
110 | * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use | |
111 | * the new gdt/idt that has __KERNEL_CS with CS.L = 1. | |
112 | */ | |
113 | ljmp *(startup_64_vector - r_base)(%esi) | |
114 | ||
115 | .code64 | |
116 | .balign 4 | |
117 | startup_64: | |
118 | # Now jump into the kernel using virtual addresses | |
119 | movq $secondary_startup_64, %rax | |
120 | jmp *%rax | |
121 | ||
122 | .code16 | |
90b1c208 VG |
123 | no_longmode: |
124 | hlt | |
125 | jmp no_longmode | |
e0a84f68 | 126 | #include "verify_cpu_64.S" |
1da177e4 LT |
127 | |
128 | # Careful these need to be in the same 64K segment as the above; | |
90b1c208 | 129 | tidt: |
1da177e4 LT |
130 | .word 0 # idt limit = 0 |
131 | .word 0, 0 # idt base = 0L | |
132 | ||
90b1c208 VG |
133 | # Duplicate the global descriptor table |
134 | # so the kernel can live anywhere | |
135 | .balign 4 | |
136 | tgdt: | |
137 | .short tgdt_end - tgdt # gdt limit | |
138 | .long tgdt - r_base | |
139 | .short 0 | |
140 | .quad 0x00cf9b000000ffff # __KERNEL32_CS | |
141 | .quad 0x00af9b000000ffff # __KERNEL_CS | |
142 | .quad 0x00cf93000000ffff # __KERNEL_DS | |
143 | tgdt_end: | |
144 | ||
145 | .balign 4 | |
146 | startup_32_vector: | |
147 | .long startup_32 - r_base | |
148 | .word __KERNEL32_CS, 0 | |
149 | ||
150 | .balign 4 | |
151 | startup_64_vector: | |
152 | .long startup_64 - r_base | |
153 | .word __KERNEL_CS, 0 | |
154 | ||
155 | trampoline_stack: | |
156 | .org 0x1000 | |
157 | trampoline_stack_end: | |
158 | ENTRY(trampoline_level4_pgt) | |
159 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE | |
160 | .fill 510,8,0 | |
161 | .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE | |
1da177e4 | 162 | |
90b1c208 | 163 | ENTRY(trampoline_end) |