Merge remote-tracking branch 'tip/auto-latest'
[deliverable/linux.git] / arch / x86 / kernel / head_64.S
1 /*
2 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
9 */
10
11
12 #include <linux/linkage.h>
13 #include <linux/threads.h>
14 #include <linux/init.h>
15 #include <asm/segment.h>
16 #include <asm/pgtable.h>
17 #include <asm/page.h>
18 #include <asm/msr.h>
19 #include <asm/cache.h>
20 #include <asm/processor-flags.h>
21 #include <asm/percpu.h>
22 #include <asm/nops.h>
23 #include "../entry/calling.h"
24 #include <asm/export.h>
25
26 #ifdef CONFIG_PARAVIRT
27 #include <asm/asm-offsets.h>
28 #include <asm/paravirt.h>
29 #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
30 #else
31 #define GET_CR2_INTO(reg) movq %cr2, reg
32 #define INTERRUPT_RETURN iretq
33 #endif
34
35 /* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
36 * because we need identity-mapped pages.
37 *
38 */
39
40 #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
41
42 L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
43 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
44 L3_START_KERNEL = pud_index(__START_KERNEL_map)
45
46 .text
47 __HEAD
48 .code64
49 .globl startup_64
50 startup_64:
51 /*
52 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
53 * and someone has loaded an identity mapped page table
54 * for us. These identity mapped page tables map all of the
55 * kernel pages and possibly all of memory.
56 *
57 * %rsi holds a physical pointer to real_mode_data.
58 *
59 * We come here either directly from a 64bit bootloader, or from
60 * arch/x86/boot/compressed/head_64.S.
61 *
62 * We only come here initially at boot nothing else comes here.
63 *
64 * Since we may be loaded at an address different from what we were
65 * compiled to run at we first fixup the physical addresses in our page
66 * tables and then reload them.
67 */
68
69 /*
70 * Setup stack for verify_cpu(). "-8" because initial_stack is defined
71 * this way, see below. Our best guess is a NULL ptr for stack
72 * termination heuristics and we don't want to break anything which
73 * might depend on it (kgdb, ...).
74 */
75 leaq (__end_init_task - 8)(%rip), %rsp
76
77 /* Sanitize CPU configuration */
78 call verify_cpu
79
80 /*
81 * Compute the delta between the address I am compiled to run at and the
82 * address I am actually running at.
83 */
84 leaq _text(%rip), %rbp
85 subq $_text - __START_KERNEL_map, %rbp
86
87 /* Is the address not 2M aligned? */
88 testl $~PMD_PAGE_MASK, %ebp
89 jnz bad_address
90
91 /*
92 * Is the address too large?
93 */
94 leaq _text(%rip), %rax
95 shrq $MAX_PHYSMEM_BITS, %rax
96 jnz bad_address
97
98 /*
99 * Fixup the physical addresses in the page table
100 */
101 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
102
103 addq %rbp, level3_kernel_pgt + (510*8)(%rip)
104 addq %rbp, level3_kernel_pgt + (511*8)(%rip)
105
106 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
107
108 /*
109 * Set up the identity mapping for the switchover. These
110 * entries should *NOT* have the global bit set! This also
111 * creates a bunch of nonsense entries but that is fine --
112 * it avoids problems around wraparound.
113 */
114 leaq _text(%rip), %rdi
115 leaq early_level4_pgt(%rip), %rbx
116
117 movq %rdi, %rax
118 shrq $PGDIR_SHIFT, %rax
119
120 leaq (4096 + _KERNPG_TABLE)(%rbx), %rdx
121 movq %rdx, 0(%rbx,%rax,8)
122 movq %rdx, 8(%rbx,%rax,8)
123
124 addq $4096, %rdx
125 movq %rdi, %rax
126 shrq $PUD_SHIFT, %rax
127 andl $(PTRS_PER_PUD-1), %eax
128 movq %rdx, 4096(%rbx,%rax,8)
129 incl %eax
130 andl $(PTRS_PER_PUD-1), %eax
131 movq %rdx, 4096(%rbx,%rax,8)
132
133 addq $8192, %rbx
134 movq %rdi, %rax
135 shrq $PMD_SHIFT, %rdi
136 addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
137 leaq (_end - 1)(%rip), %rcx
138 shrq $PMD_SHIFT, %rcx
139 subq %rdi, %rcx
140 incl %ecx
141
142 1:
143 andq $(PTRS_PER_PMD - 1), %rdi
144 movq %rax, (%rbx,%rdi,8)
145 incq %rdi
146 addq $PMD_SIZE, %rax
147 decl %ecx
148 jnz 1b
149
150 /*
151 * Fixup the kernel text+data virtual addresses. Note that
152 * we might write invalid pmds, when the kernel is relocated
153 * cleanup_highmap() fixes this up along with the mappings
154 * beyond _end.
155 */
156 leaq level2_kernel_pgt(%rip), %rdi
157 leaq 4096(%rdi), %r8
158 /* See if it is a valid page table entry */
159 1: testb $1, 0(%rdi)
160 jz 2f
161 addq %rbp, 0(%rdi)
162 /* Go to the next page */
163 2: addq $8, %rdi
164 cmp %r8, %rdi
165 jne 1b
166
167 /* Fixup phys_base */
168 addq %rbp, phys_base(%rip)
169
170 movq $(early_level4_pgt - __START_KERNEL_map), %rax
171 jmp 1f
172 ENTRY(secondary_startup_64)
173 /*
174 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
175 * and someone has loaded a mapped page table.
176 *
177 * %rsi holds a physical pointer to real_mode_data.
178 *
179 * We come here either from startup_64 (using physical addresses)
180 * or from trampoline.S (using virtual addresses).
181 *
182 * Using virtual addresses from trampoline.S removes the need
183 * to have any identity mapped pages in the kernel page table
184 * after the boot processor executes this code.
185 */
186
187 /* Sanitize CPU configuration */
188 call verify_cpu
189
190 movq $(init_level4_pgt - __START_KERNEL_map), %rax
191 1:
192
193 /* Enable PAE mode and PGE */
194 movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
195 movq %rcx, %cr4
196
197 /* Setup early boot stage 4 level pagetables. */
198 addq phys_base(%rip), %rax
199 movq %rax, %cr3
200
201 /* Ensure I am executing from virtual addresses */
202 movq $1f, %rax
203 jmp *%rax
204 1:
205
206 /* Check if nx is implemented */
207 movl $0x80000001, %eax
208 cpuid
209 movl %edx,%edi
210
211 /* Setup EFER (Extended Feature Enable Register) */
212 movl $MSR_EFER, %ecx
213 rdmsr
214 btsl $_EFER_SCE, %eax /* Enable System Call */
215 btl $20,%edi /* No Execute supported? */
216 jnc 1f
217 btsl $_EFER_NX, %eax
218 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
219 1: wrmsr /* Make changes effective */
220
221 /* Setup cr0 */
222 #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
223 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
224 X86_CR0_PG)
225 movl $CR0_STATE, %eax
226 /* Make changes effective */
227 movq %rax, %cr0
228
229 /* Setup a boot time stack */
230 movq initial_stack(%rip), %rsp
231
232 /* zero EFLAGS after setting rsp */
233 pushq $0
234 popfq
235
236 /*
237 * We must switch to a new descriptor in kernel space for the GDT
238 * because soon the kernel won't have access anymore to the userspace
239 * addresses where we're currently running on. We have to do that here
240 * because in 32bit we couldn't load a 64bit linear address.
241 */
242 lgdt early_gdt_descr(%rip)
243
244 /* set up data segments */
245 xorl %eax,%eax
246 movl %eax,%ds
247 movl %eax,%ss
248 movl %eax,%es
249
250 /*
251 * We don't really need to load %fs or %gs, but load them anyway
252 * to kill any stale realmode selectors. This allows execution
253 * under VT hardware.
254 */
255 movl %eax,%fs
256 movl %eax,%gs
257
258 /* Set up %gs.
259 *
260 * The base of %gs always points to the bottom of the irqstack
261 * union. If the stack protector canary is enabled, it is
262 * located at %gs:40. Note that, on SMP, the boot cpu uses
263 * init data section till per cpu areas are set up.
264 */
265 movl $MSR_GS_BASE,%ecx
266 movl initial_gs(%rip),%eax
267 movl initial_gs+4(%rip),%edx
268 wrmsr
269
270 /* rsi is pointer to real mode structure with interesting info.
271 pass it to C */
272 movq %rsi, %rdi
273
274 /* Finally jump to run C code and to be on real kernel address
275 * Since we are running on identity-mapped space we have to jump
276 * to the full 64bit address, this is only possible as indirect
277 * jump. In addition we need to ensure %cs is set so we make this
278 * a far return.
279 *
280 * Note: do not change to far jump indirect with 64bit offset.
281 *
282 * AMD does not support far jump indirect with 64bit offset.
283 * AMD64 Architecture Programmer's Manual, Volume 3: states only
284 * JMP FAR mem16:16 FF /5 Far jump indirect,
285 * with the target specified by a far pointer in memory.
286 * JMP FAR mem16:32 FF /5 Far jump indirect,
287 * with the target specified by a far pointer in memory.
288 *
289 * Intel64 does support 64bit offset.
290 * Software Developer Manual Vol 2: states:
291 * FF /5 JMP m16:16 Jump far, absolute indirect,
292 * address given in m16:16
293 * FF /5 JMP m16:32 Jump far, absolute indirect,
294 * address given in m16:32.
295 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
296 * address given in m16:64.
297 */
298 movq initial_code(%rip),%rax
299 pushq $0 # fake return address to stop unwinder
300 pushq $__KERNEL_CS # set correct cs
301 pushq %rax # target address in negative space
302 lretq
303 ENDPROC(secondary_startup_64)
304
305 #include "verify_cpu.S"
306
307 #ifdef CONFIG_HOTPLUG_CPU
308 /*
309 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
310 * up already except stack. We just set up stack here. Then call
311 * start_secondary().
312 */
313 ENTRY(start_cpu0)
314 movq initial_stack(%rip),%rsp
315 movq initial_code(%rip),%rax
316 pushq $0 # fake return address to stop unwinder
317 pushq $__KERNEL_CS # set correct cs
318 pushq %rax # target address in negative space
319 lretq
320 ENDPROC(start_cpu0)
321 #endif
322
323 /* Both SMP bootup and ACPI suspend change these variables */
324 __REFDATA
325 .balign 8
326 GLOBAL(initial_code)
327 .quad x86_64_start_kernel
328 GLOBAL(initial_gs)
329 .quad INIT_PER_CPU_VAR(irq_stack_union)
330 GLOBAL(initial_stack)
331 .quad init_thread_union+THREAD_SIZE-8
332 __FINITDATA
333
334 bad_address:
335 jmp bad_address
336
337 __INIT
338 ENTRY(early_idt_handler_array)
339 # 104(%rsp) %rflags
340 # 96(%rsp) %cs
341 # 88(%rsp) %rip
342 # 80(%rsp) error code
343 i = 0
344 .rept NUM_EXCEPTION_VECTORS
345 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
346 pushq $0 # Dummy error code, to make stack frame uniform
347 .endif
348 pushq $i # 72(%rsp) Vector number
349 jmp early_idt_handler_common
350 i = i + 1
351 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
352 .endr
353 ENDPROC(early_idt_handler_array)
354
355 early_idt_handler_common:
356 /*
357 * The stack is the hardware frame, an error code or zero, and the
358 * vector number.
359 */
360 cld
361
362 incl early_recursion_flag(%rip)
363
364 /* The vector number is currently in the pt_regs->di slot. */
365 pushq %rsi /* pt_regs->si */
366 movq 8(%rsp), %rsi /* RSI = vector number */
367 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */
368 pushq %rdx /* pt_regs->dx */
369 pushq %rcx /* pt_regs->cx */
370 pushq %rax /* pt_regs->ax */
371 pushq %r8 /* pt_regs->r8 */
372 pushq %r9 /* pt_regs->r9 */
373 pushq %r10 /* pt_regs->r10 */
374 pushq %r11 /* pt_regs->r11 */
375 pushq %rbx /* pt_regs->bx */
376 pushq %rbp /* pt_regs->bp */
377 pushq %r12 /* pt_regs->r12 */
378 pushq %r13 /* pt_regs->r13 */
379 pushq %r14 /* pt_regs->r14 */
380 pushq %r15 /* pt_regs->r15 */
381
382 cmpq $14,%rsi /* Page fault? */
383 jnz 10f
384 GET_CR2_INTO(%rdi) /* Can clobber any volatile register if pv */
385 call early_make_pgtable
386 andl %eax,%eax
387 jz 20f /* All good */
388
389 10:
390 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */
391 call early_fixup_exception
392
393 20:
394 decl early_recursion_flag(%rip)
395 jmp restore_regs_and_iret
396 ENDPROC(early_idt_handler_common)
397
398 __INITDATA
399
400 .balign 4
401 GLOBAL(early_recursion_flag)
402 .long 0
403
404 #define NEXT_PAGE(name) \
405 .balign PAGE_SIZE; \
406 GLOBAL(name)
407
408 /* Automate the creation of 1 to 1 mapping pmd entries */
409 #define PMDS(START, PERM, COUNT) \
410 i = 0 ; \
411 .rept (COUNT) ; \
412 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
413 i = i + 1 ; \
414 .endr
415
416 __INITDATA
417 NEXT_PAGE(early_level4_pgt)
418 .fill 511,8,0
419 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
420
421 NEXT_PAGE(early_dynamic_pgts)
422 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
423
424 .data
425
426 #ifndef CONFIG_XEN
427 NEXT_PAGE(init_level4_pgt)
428 .fill 512,8,0
429 #else
430 NEXT_PAGE(init_level4_pgt)
431 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
432 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
433 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
434 .org init_level4_pgt + L4_START_KERNEL*8, 0
435 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
436 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
437
438 NEXT_PAGE(level3_ident_pgt)
439 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
440 .fill 511, 8, 0
441 NEXT_PAGE(level2_ident_pgt)
442 /* Since I easily can, map the first 1G.
443 * Don't set NX because code runs from these pages.
444 */
445 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
446 #endif
447
448 NEXT_PAGE(level3_kernel_pgt)
449 .fill L3_START_KERNEL,8,0
450 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
451 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
452 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
453
454 NEXT_PAGE(level2_kernel_pgt)
455 /*
456 * 512 MB kernel mapping. We spend a full page on this pagetable
457 * anyway.
458 *
459 * The kernel code+data+bss must not be bigger than that.
460 *
461 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
462 * If you want to increase this then increase MODULES_VADDR
463 * too.)
464 */
465 PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
466 KERNEL_IMAGE_SIZE/PMD_SIZE)
467
468 NEXT_PAGE(level2_fixmap_pgt)
469 .fill 506,8,0
470 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
471 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
472 .fill 5,8,0
473
474 NEXT_PAGE(level1_fixmap_pgt)
475 .fill 512,8,0
476
477 #undef PMDS
478
479 .data
480 .align 16
481 .globl early_gdt_descr
482 early_gdt_descr:
483 .word GDT_ENTRIES*8-1
484 early_gdt_descr_base:
485 .quad INIT_PER_CPU_VAR(gdt_page)
486
487 ENTRY(phys_base)
488 /* This must match the first entry in level2_kernel_pgt */
489 .quad 0x0000000000000000
490 EXPORT_SYMBOL(phys_base)
491
492 #include "../../x86/xen/xen-head.S"
493
494 __PAGE_ALIGNED_BSS
495 NEXT_PAGE(empty_zero_page)
496 .skip PAGE_SIZE
497 EXPORT_SYMBOL(empty_zero_page)
498
This page took 0.044058 seconds and 5 git commands to generate.