x86, vmlinux.lds: unify parainstructions
[deliverable/linux.git] / arch / x86 / kernel / vmlinux.lds.S
CommitLineData
17ce265d
SR
1/*
2 * ld script for the x86 kernel
3 *
4 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 *
6 * Modernisation and unification done by Sam Ravnborg <sam@ravnborg.org>
7 *
8 *
9 * Don't define absolute symbols until and unless you know that symbol
10 * value is should remain constant even if kernel image is relocated
11 * at run time. Absolute symbols are not relocated. If symbol value should
12 * change if kernel is relocated, make the symbol section relative and
13 * put it inside the section definition.
14 */
15
16#ifdef CONFIG_X86_32
17#define LOAD_OFFSET __PAGE_OFFSET
18#else
19#define LOAD_OFFSET __START_KERNEL_map
20#endif
21
22#include <asm-generic/vmlinux.lds.h>
23#include <asm/asm-offsets.h>
24#include <asm/thread_info.h>
25#include <asm/page_types.h>
26#include <asm/cache.h>
27#include <asm/boot.h>
28
29#undef i386 /* in case the preprocessor is a 32bit one */
30
31OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
32
33#ifdef CONFIG_X86_32
34OUTPUT_ARCH(i386)
35ENTRY(phys_startup_32)
36jiffies = jiffies_64;
37#else
38OUTPUT_ARCH(i386:x86-64)
39ENTRY(phys_startup_64)
40jiffies_64 = jiffies;
41#endif
42
afb8095a
SR
43PHDRS {
44 text PT_LOAD FLAGS(5); /* R_E */
45 data PT_LOAD FLAGS(7); /* RWE */
46#ifdef CONFIG_X86_64
47 user PT_LOAD FLAGS(7); /* RWE */
48 data.init PT_LOAD FLAGS(7); /* RWE */
49#ifdef CONFIG_SMP
50 percpu PT_LOAD FLAGS(7); /* RWE */
51#endif
52 data.init2 PT_LOAD FLAGS(7); /* RWE */
53#endif
54 note PT_NOTE FLAGS(0); /* ___ */
55}
17ce265d 56
444e0ae4
SR
57SECTIONS
58{
59#ifdef CONFIG_X86_32
60 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
61 phys_startup_32 = startup_32 - LOAD_OFFSET;
62#else
63 . = __START_KERNEL;
64 phys_startup_64 = startup_64 - LOAD_OFFSET;
65#endif
66
dfc20895
SR
67 /* Text and read-only data */
68
69 /* bootstrapping code */
70 .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
71 _text = .;
72 *(.text.head)
73 } :text = 0x9090
74
75 /* The rest of the text */
76 .text : AT(ADDR(.text) - LOAD_OFFSET) {
77#ifdef CONFIG_X86_32
78 /* not really needed, already page aligned */
79 . = ALIGN(PAGE_SIZE);
80 *(.text.page_aligned)
81#endif
82 . = ALIGN(8);
83 _stext = .;
84 TEXT_TEXT
85 SCHED_TEXT
86 LOCK_TEXT
87 KPROBES_TEXT
88 IRQENTRY_TEXT
89 *(.fixup)
90 *(.gnu.warning)
91 /* End of text section */
92 _etext = .;
93 } :text = 0x9090
94
95 NOTES :text :note
96
448bc3ab
SR
97 /* Exception table */
98 . = ALIGN(16);
99 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
100 __start___ex_table = .;
101 *(__ex_table)
102 __stop___ex_table = .;
103 } :text = 0x9090
104
105 RODATA
106
1f6397ba
SR
107 /* Data */
108 . = ALIGN(PAGE_SIZE);
109 .data : AT(ADDR(.data) - LOAD_OFFSET) {
110 DATA_DATA
111 CONSTRUCTORS
112
113#ifdef CONFIG_X86_64
114 /* End of data section */
115 _edata = .;
116#endif
117 } :data
118
119#ifdef CONFIG_X86_32
120 /* 32 bit has nosave before _edata */
121 . = ALIGN(PAGE_SIZE);
122 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
123 __nosave_begin = .;
124 *(.data.nosave)
125 . = ALIGN(PAGE_SIZE);
126 __nosave_end = .;
127 }
128#endif
129
130 . = ALIGN(PAGE_SIZE);
131 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
132 *(.data.page_aligned)
133 *(.data.idt)
134 }
135
136#ifdef CONFIG_X86_32
137 . = ALIGN(32);
138#else
139 . = ALIGN(PAGE_SIZE);
140 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
141#endif
142 .data.cacheline_aligned :
143 AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
144 *(.data.cacheline_aligned)
145 }
146
147 /* rarely changed data like cpu maps */
148#ifdef CONFIG_X86_32
149 . = ALIGN(32);
150#else
151 . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
152#endif
153 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
154 *(.data.read_mostly)
155
156#ifdef CONFIG_X86_32
157 /* End of data section */
158 _edata = .;
159#endif
160 }
161
ff6f87e1
SR
162#ifdef CONFIG_X86_64
163
164#define VSYSCALL_ADDR (-10*1024*1024)
165#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \
166 SIZEOF(.data.read_mostly) + 4095) & ~(4095))
167#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \
168 SIZEOF(.data.read_mostly) + 4095) & ~(4095))
169
170#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
171#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
172
173#define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
174#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
175
176 . = VSYSCALL_ADDR;
177 .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) {
178 *(.vsyscall_0)
179 } :user
180
181 __vsyscall_0 = VSYSCALL_VIRT_ADDR;
182
183 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
184 .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
185 *(.vsyscall_fn)
186 }
187
188 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
189 .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
190 *(.vsyscall_gtod_data)
191 }
192
193 vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
194 .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
195 *(.vsyscall_clock)
196 }
197 vsyscall_clock = VVIRT(.vsyscall_clock);
198
199
200 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
201 *(.vsyscall_1)
202 }
203 .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
204 *(.vsyscall_2)
205 }
206
207 .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
208 *(.vgetcpu_mode)
209 }
210 vgetcpu_mode = VVIRT(.vgetcpu_mode);
211
212 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
213 .jiffies : AT(VLOAD(.jiffies)) {
214 *(.jiffies)
215 }
216 jiffies = VVIRT(.jiffies);
217
218 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
219 *(.vsyscall_3)
220 }
221
222 . = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
223
224#undef VSYSCALL_ADDR
225#undef VSYSCALL_PHYS_ADDR
226#undef VSYSCALL_VIRT_ADDR
227#undef VLOAD_OFFSET
228#undef VLOAD
229#undef VVIRT_OFFSET
230#undef VVIRT
231
232#endif /* CONFIG_X86_64 */
dfc20895 233
e58bdaa8
SR
234 /* init_task */
235 . = ALIGN(THREAD_SIZE);
236 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
237 *(.data.init_task)
238 }
239#ifdef CONFIG_X86_64
240 :data.init
241#endif
242
243 /*
244 * smp_locks might be freed after init
245 * start/end must be page aligned
246 */
247 . = ALIGN(PAGE_SIZE);
248 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
249 __smp_locks = .;
250 *(.smp_locks)
251 __smp_locks_end = .;
252 . = ALIGN(PAGE_SIZE);
253 }
254
255 /* Init code and data - will be freed after init */
256 . = ALIGN(PAGE_SIZE);
257 __init_begin = .; /* paired with __init_end */
258 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
259 _sinittext = .;
260 INIT_TEXT
261 _einittext = .;
262 }
263
264 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
265 INIT_DATA
266 }
267
268 . = ALIGN(16);
269 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
270 __setup_start = .;
271 *(.init.setup)
272 __setup_end = .;
273 }
274 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
275 __initcall_start = .;
276 INITCALLS
277 __initcall_end = .;
278 }
279
280 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
281 __con_initcall_start = .;
282 *(.con_initcall.init)
283 __con_initcall_end = .;
284 }
285
286 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
287 __x86_cpu_dev_start = .;
288 *(.x86_cpu_dev.init)
289 __x86_cpu_dev_end = .;
290 }
291
292 SECURITY_INIT
293
ae618362
SR
294 . = ALIGN(8);
295 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
296 __parainstructions = .;
297 *(.parainstructions)
298 __parainstructions_end = .;
299 }
300
301 . = ALIGN(8);
302 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
303 __alt_instructions = .;
304 *(.altinstructions)
305 __alt_instructions_end = .;
306 }
307
308 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
309 *(.altinstr_replacement)
310 }
311
e58bdaa8 312
a60b778b
TG
313#ifdef CONFIG_X86_32
314# include "vmlinux_32.lds.S"
315#else
316# include "vmlinux_64.lds.S"
67d38229 317#endif
17ce265d 318
444e0ae4
SR
319 STABS_DEBUG
320 DWARF_DEBUG
321}
322
17ce265d
SR
323
324#ifdef CONFIG_X86_32
325ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
326 "kernel image bigger than KERNEL_IMAGE_SIZE")
327#else
328/*
329 * Per-cpu symbols which need to be offset from __per_cpu_load
330 * for the boot processor.
331 */
332#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
333INIT_PER_CPU(gdt_page);
334INIT_PER_CPU(irq_stack_union);
335
336/*
337 * Build-time check on the image size:
338 */
339ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
340 "kernel image bigger than KERNEL_IMAGE_SIZE")
341
342#ifdef CONFIG_SMP
343ASSERT((per_cpu__irq_stack_union == 0),
344 "irq_stack_union is not at start of per-cpu area");
345#endif
346
347#endif /* CONFIG_X86_32 */
348
349#ifdef CONFIG_KEXEC
350#include <asm/kexec.h>
351
352ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
353 "kexec control code size is too big")
354#endif
355
This page took 0.443798 seconds and 5 git commands to generate.