x86, build: Dynamically find entry points in compressed startup code
[deliverable/linux.git] / arch / x86 / boot / compressed / head_32.S
1 /*
2 * linux/boot/head.S
3 *
4 * Copyright (C) 1991, 1992, 1993 Linus Torvalds
5 */
6
7 /*
8 * head.S contains the 32-bit startup code.
9 *
10 * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
11 * the page directory will exist. The startup code will be overwritten by
12 * the page directory. [According to comments etc elsewhere on a compressed
13 * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
14 *
15 * Page 0 is deliberately kept safe, since System Management Mode code in
16 * laptops may need to access the BIOS data stored there. This is also
17 * useful for future device drivers that either access the BIOS via VM86
18 * mode.
19 */
20
21 /*
22 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
23 */
24 .text
25
26 #include <linux/init.h>
27 #include <linux/linkage.h>
28 #include <asm/segment.h>
29 #include <asm/page_types.h>
30 #include <asm/boot.h>
31 #include <asm/asm-offsets.h>
32
33 __HEAD
34 ENTRY(startup_32)
35 #ifdef CONFIG_EFI_STUB
36 jmp preferred_addr
37
38 /*
39 * We don't need the return address, so set up the stack so
40 * efi_main() can find its arguments.
41 */
42 ENTRY(efi_pe_entry)
43 add $0x4, %esp
44
45 call make_boot_params
46 cmpl $0, %eax
47 je 1f
48 movl 0x4(%esp), %esi
49 movl (%esp), %ecx
50 pushl %eax
51 pushl %esi
52 pushl %ecx
53 sub $0x4, %esp
54
55 ENTRY(efi_stub_entry)
56 add $0x4, %esp
57 call efi_main
58 cmpl $0, %eax
59 movl %eax, %esi
60 jne 2f
61 1:
62 /* EFI init failed, so hang. */
63 hlt
64 jmp 1b
65 2:
66 call 3f
67 3:
68 popl %eax
69 subl $3b, %eax
70 subl BP_pref_address(%esi), %eax
71 add BP_code32_start(%esi), %eax
72 leal preferred_addr(%eax), %eax
73 jmp *%eax
74
75 preferred_addr:
76 #endif
77 cld
78 /*
79 * Test KEEP_SEGMENTS flag to see if the bootloader is asking
80 * us to not reload segments
81 */
82 testb $(1<<6), BP_loadflags(%esi)
83 jnz 1f
84
85 cli
86 movl $__BOOT_DS, %eax
87 movl %eax, %ds
88 movl %eax, %es
89 movl %eax, %fs
90 movl %eax, %gs
91 movl %eax, %ss
92 1:
93
94 /*
95 * Calculate the delta between where we were compiled to run
96 * at and where we were actually loaded at. This can only be done
97 * with a short local call on x86. Nothing else will tell us what
98 * address we are running at. The reserved chunk of the real-mode
99 * data at 0x1e4 (defined as a scratch field) are used as the stack
100 * for this calculation. Only 4 bytes are needed.
101 */
102 leal (BP_scratch+4)(%esi), %esp
103 call 1f
104 1: popl %ebp
105 subl $1b, %ebp
106
107 /*
108 * %ebp contains the address we are loaded at by the boot loader and %ebx
109 * contains the address where we should move the kernel image temporarily
110 * for safe in-place decompression.
111 */
112
113 #ifdef CONFIG_RELOCATABLE
114 movl %ebp, %ebx
115 movl BP_kernel_alignment(%esi), %eax
116 decl %eax
117 addl %eax, %ebx
118 notl %eax
119 andl %eax, %ebx
120 #else
121 movl $LOAD_PHYSICAL_ADDR, %ebx
122 #endif
123
124 /* Target address to relocate to for decompression */
125 addl $z_extract_offset, %ebx
126
127 /* Set up the stack */
128 leal boot_stack_end(%ebx), %esp
129
130 /* Zero EFLAGS */
131 pushl $0
132 popfl
133
134 /*
135 * Copy the compressed kernel to the end of our buffer
136 * where decompression in place becomes safe.
137 */
138 pushl %esi
139 leal (_bss-4)(%ebp), %esi
140 leal (_bss-4)(%ebx), %edi
141 movl $(_bss - startup_32), %ecx
142 shrl $2, %ecx
143 std
144 rep movsl
145 cld
146 popl %esi
147
148 /*
149 * Jump to the relocated address.
150 */
151 leal relocated(%ebx), %eax
152 jmp *%eax
153 ENDPROC(startup_32)
154
155 .text
156 relocated:
157
158 /*
159 * Clear BSS (stack is currently empty)
160 */
161 xorl %eax, %eax
162 leal _bss(%ebx), %edi
163 leal _ebss(%ebx), %ecx
164 subl %edi, %ecx
165 shrl $2, %ecx
166 rep stosl
167
168 /*
169 * Adjust our own GOT
170 */
171 leal _got(%ebx), %edx
172 leal _egot(%ebx), %ecx
173 1:
174 cmpl %ecx, %edx
175 jae 2f
176 addl %ebx, (%edx)
177 addl $4, %edx
178 jmp 1b
179 2:
180
181 /*
182 * Do the decompression, and jump to the new kernel..
183 */
184 leal z_extract_offset_negative(%ebx), %ebp
185 /* push arguments for decompress_kernel: */
186 pushl %ebp /* output address */
187 pushl $z_input_len /* input_len */
188 leal input_data(%ebx), %eax
189 pushl %eax /* input_data */
190 leal boot_heap(%ebx), %eax
191 pushl %eax /* heap area */
192 pushl %esi /* real mode pointer */
193 call decompress_kernel
194 addl $20, %esp
195
196 #if CONFIG_RELOCATABLE
197 /*
198 * Find the address of the relocations.
199 */
200 leal z_output_len(%ebp), %edi
201
202 /*
203 * Calculate the delta between where vmlinux was compiled to run
204 * and where it was actually loaded.
205 */
206 movl %ebp, %ebx
207 subl $LOAD_PHYSICAL_ADDR, %ebx
208 jz 2f /* Nothing to be done if loaded at compiled addr. */
209 /*
210 * Process relocations.
211 */
212
213 1: subl $4, %edi
214 movl (%edi), %ecx
215 testl %ecx, %ecx
216 jz 2f
217 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
218 jmp 1b
219 2:
220 #endif
221
222 /*
223 * Jump to the decompressed kernel.
224 */
225 xorl %ebx, %ebx
226 jmp *%ebp
227
228 /*
229 * Stack and heap for uncompression
230 */
231 .bss
232 .balign 4
233 boot_heap:
234 .fill BOOT_HEAP_SIZE, 1, 0
235 boot_stack:
236 .fill BOOT_STACK_SIZE, 1, 0
237 boot_stack_end:
This page took 0.057454 seconds and 5 git commands to generate.