Merge tag 'for-v3.18-rc' of git://git.infradead.org/battery-2.6
[deliverable/linux.git] / include / asm-generic / vmlinux.lds.h
1 /*
2 * Helper macros to support writing architecture specific
3 * linker scripts.
4 *
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
7 *
8 * OUTPUT_FORMAT(...)
9 * OUTPUT_ARCH(...)
10 * ENTRY(...)
11 * SECTIONS
12 * {
13 * . = START;
14 * __init_begin = .;
15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU_SECTION(CACHELINE_SIZE)
19 * __init_end = .;
20 *
21 * _stext = .;
22 * TEXT_SECTION = 0
23 * _etext = .;
24 *
25 * _sdata = .;
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
28 * _edata = .;
29 *
30 * EXCEPTION_TABLE(...)
31 * NOTES
32 *
33 * BSS_SECTION(0, 0, 0)
34 * _end = .;
35 *
36 * STABS_DEBUG
37 * DWARF_DEBUG
38 *
39 * DISCARDS // must be the last
40 * }
41 *
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * // __init_begin and __init_end should be page aligned, so that we can
44 * // free the whole .init memory
45 * [_stext, _etext] is the text section
46 * [_sdata, _edata] is the data section
47 *
48 * Some of the included output section have their own set of constants.
49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
50 * [__nosave_begin, __nosave_end] for the nosave data
51 */
52
53 #ifndef LOAD_OFFSET
54 #define LOAD_OFFSET 0
55 #endif
56
57 #include <linux/export.h>
58
59 /* Align . to a 8 byte boundary equals to maximum function alignment. */
60 #define ALIGN_FUNCTION() . = ALIGN(8)
61
62 /*
63 * Align to a 32 byte boundary equal to the
64 * alignment gcc 4.5 uses for a struct
65 */
66 #define STRUCT_ALIGNMENT 32
67 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
68
69 /* The actual configuration determine if the init/exit sections
70 * are handled as text/data or they can be discarded (which
71 * often happens at runtime)
72 */
73 #ifdef CONFIG_HOTPLUG_CPU
74 #define CPU_KEEP(sec) *(.cpu##sec)
75 #define CPU_DISCARD(sec)
76 #else
77 #define CPU_KEEP(sec)
78 #define CPU_DISCARD(sec) *(.cpu##sec)
79 #endif
80
81 #if defined(CONFIG_MEMORY_HOTPLUG)
82 #define MEM_KEEP(sec) *(.mem##sec)
83 #define MEM_DISCARD(sec)
84 #else
85 #define MEM_KEEP(sec)
86 #define MEM_DISCARD(sec) *(.mem##sec)
87 #endif
88
89 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
90 #define MCOUNT_REC() . = ALIGN(8); \
91 VMLINUX_SYMBOL(__start_mcount_loc) = .; \
92 *(__mcount_loc) \
93 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
94 #else
95 #define MCOUNT_REC()
96 #endif
97
98 #ifdef CONFIG_TRACE_BRANCH_PROFILING
99 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
100 *(_ftrace_annotated_branch) \
101 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
102 #else
103 #define LIKELY_PROFILE()
104 #endif
105
106 #ifdef CONFIG_PROFILE_ALL_BRANCHES
107 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
108 *(_ftrace_branch) \
109 VMLINUX_SYMBOL(__stop_branch_profile) = .;
110 #else
111 #define BRANCH_PROFILE()
112 #endif
113
114 #ifdef CONFIG_KPROBES
115 #define KPROBE_BLACKLIST() . = ALIGN(8); \
116 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
117 *(_kprobe_blacklist) \
118 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
119 #else
120 #define KPROBE_BLACKLIST()
121 #endif
122
123 #ifdef CONFIG_EVENT_TRACING
124 #define FTRACE_EVENTS() . = ALIGN(8); \
125 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
126 *(_ftrace_events) \
127 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
128 #else
129 #define FTRACE_EVENTS()
130 #endif
131
132 #ifdef CONFIG_TRACING
133 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
134 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
135 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
136 #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
137 *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
138 VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
139 #else
140 #define TRACE_PRINTKS()
141 #define TRACEPOINT_STR()
142 #endif
143
144 #ifdef CONFIG_FTRACE_SYSCALLS
145 #define TRACE_SYSCALLS() . = ALIGN(8); \
146 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
147 *(__syscalls_metadata) \
148 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
149 #else
150 #define TRACE_SYSCALLS()
151 #endif
152
153
154 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
155 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
156 #define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name)
157 #define _OF_TABLE_0(name)
158 #define _OF_TABLE_1(name) \
159 . = ALIGN(8); \
160 VMLINUX_SYMBOL(__##name##_of_table) = .; \
161 *(__##name##_of_table) \
162 *(__##name##_of_table_end)
163
164 #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
165 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
166 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
167 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
168 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
169 #define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
170
171 #define KERNEL_DTB() \
172 STRUCT_ALIGN(); \
173 VMLINUX_SYMBOL(__dtb_start) = .; \
174 *(.dtb.init.rodata) \
175 VMLINUX_SYMBOL(__dtb_end) = .;
176
177 /* .data section */
178 #define DATA_DATA \
179 *(.data) \
180 *(.ref.data) \
181 *(.data..shared_aligned) /* percpu related */ \
182 MEM_KEEP(init.data) \
183 MEM_KEEP(exit.data) \
184 *(.data.unlikely) \
185 STRUCT_ALIGN(); \
186 *(__tracepoints) \
187 /* implement dynamic printk debug */ \
188 . = ALIGN(8); \
189 VMLINUX_SYMBOL(__start___jump_table) = .; \
190 *(__jump_table) \
191 VMLINUX_SYMBOL(__stop___jump_table) = .; \
192 . = ALIGN(8); \
193 VMLINUX_SYMBOL(__start___verbose) = .; \
194 *(__verbose) \
195 VMLINUX_SYMBOL(__stop___verbose) = .; \
196 LIKELY_PROFILE() \
197 BRANCH_PROFILE() \
198 TRACE_PRINTKS() \
199 TRACEPOINT_STR()
200
201 /*
202 * Data section helpers
203 */
204 #define NOSAVE_DATA \
205 . = ALIGN(PAGE_SIZE); \
206 VMLINUX_SYMBOL(__nosave_begin) = .; \
207 *(.data..nosave) \
208 . = ALIGN(PAGE_SIZE); \
209 VMLINUX_SYMBOL(__nosave_end) = .;
210
211 #define PAGE_ALIGNED_DATA(page_align) \
212 . = ALIGN(page_align); \
213 *(.data..page_aligned)
214
215 #define READ_MOSTLY_DATA(align) \
216 . = ALIGN(align); \
217 *(.data..read_mostly) \
218 . = ALIGN(align);
219
220 #define CACHELINE_ALIGNED_DATA(align) \
221 . = ALIGN(align); \
222 *(.data..cacheline_aligned)
223
224 #define INIT_TASK_DATA(align) \
225 . = ALIGN(align); \
226 *(.data..init_task)
227
228 /*
229 * Read only Data
230 */
231 #define RO_DATA_SECTION(align) \
232 . = ALIGN((align)); \
233 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
234 VMLINUX_SYMBOL(__start_rodata) = .; \
235 *(.rodata) *(.rodata.*) \
236 *(__vermagic) /* Kernel version magic */ \
237 . = ALIGN(8); \
238 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
239 *(__tracepoints_ptrs) /* Tracepoints: pointer array */\
240 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
241 *(__tracepoints_strings)/* Tracepoints: strings */ \
242 } \
243 \
244 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
245 *(.rodata1) \
246 } \
247 \
248 BUG_TABLE \
249 \
250 /* PCI quirks */ \
251 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
252 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
253 *(.pci_fixup_early) \
254 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
255 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
256 *(.pci_fixup_header) \
257 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
258 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
259 *(.pci_fixup_final) \
260 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
261 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
262 *(.pci_fixup_enable) \
263 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
264 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
265 *(.pci_fixup_resume) \
266 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
267 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
268 *(.pci_fixup_resume_early) \
269 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
270 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
271 *(.pci_fixup_suspend) \
272 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
273 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
274 *(.pci_fixup_suspend_late) \
275 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
276 } \
277 \
278 /* Built-in firmware blobs */ \
279 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
280 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
281 *(.builtin_fw) \
282 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
283 } \
284 \
285 TRACEDATA \
286 \
287 /* Kernel symbol table: Normal symbols */ \
288 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
289 VMLINUX_SYMBOL(__start___ksymtab) = .; \
290 *(SORT(___ksymtab+*)) \
291 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
292 } \
293 \
294 /* Kernel symbol table: GPL-only symbols */ \
295 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
296 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
297 *(SORT(___ksymtab_gpl+*)) \
298 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
299 } \
300 \
301 /* Kernel symbol table: Normal unused symbols */ \
302 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
303 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
304 *(SORT(___ksymtab_unused+*)) \
305 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
306 } \
307 \
308 /* Kernel symbol table: GPL-only unused symbols */ \
309 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
310 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
311 *(SORT(___ksymtab_unused_gpl+*)) \
312 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
313 } \
314 \
315 /* Kernel symbol table: GPL-future-only symbols */ \
316 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
317 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
318 *(SORT(___ksymtab_gpl_future+*)) \
319 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
320 } \
321 \
322 /* Kernel symbol table: Normal symbols */ \
323 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
324 VMLINUX_SYMBOL(__start___kcrctab) = .; \
325 *(SORT(___kcrctab+*)) \
326 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
327 } \
328 \
329 /* Kernel symbol table: GPL-only symbols */ \
330 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
331 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
332 *(SORT(___kcrctab_gpl+*)) \
333 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
334 } \
335 \
336 /* Kernel symbol table: Normal unused symbols */ \
337 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
338 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
339 *(SORT(___kcrctab_unused+*)) \
340 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
341 } \
342 \
343 /* Kernel symbol table: GPL-only unused symbols */ \
344 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
345 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
346 *(SORT(___kcrctab_unused_gpl+*)) \
347 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
348 } \
349 \
350 /* Kernel symbol table: GPL-future-only symbols */ \
351 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
352 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
353 *(SORT(___kcrctab_gpl_future+*)) \
354 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
355 } \
356 \
357 /* Kernel symbol table: strings */ \
358 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
359 *(__ksymtab_strings) \
360 } \
361 \
362 /* __*init sections */ \
363 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
364 *(.ref.rodata) \
365 MEM_KEEP(init.rodata) \
366 MEM_KEEP(exit.rodata) \
367 } \
368 \
369 /* Built-in module parameters. */ \
370 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
371 VMLINUX_SYMBOL(__start___param) = .; \
372 *(__param) \
373 VMLINUX_SYMBOL(__stop___param) = .; \
374 } \
375 \
376 /* Built-in module versions. */ \
377 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
378 VMLINUX_SYMBOL(__start___modver) = .; \
379 *(__modver) \
380 VMLINUX_SYMBOL(__stop___modver) = .; \
381 . = ALIGN((align)); \
382 VMLINUX_SYMBOL(__end_rodata) = .; \
383 } \
384 . = ALIGN((align));
385
386 /* RODATA & RO_DATA provided for backward compatibility.
387 * All archs are supposed to use RO_DATA() */
388 #define RODATA RO_DATA_SECTION(4096)
389 #define RO_DATA(align) RO_DATA_SECTION(align)
390
391 #define SECURITY_INIT \
392 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
393 VMLINUX_SYMBOL(__security_initcall_start) = .; \
394 *(.security_initcall.init) \
395 VMLINUX_SYMBOL(__security_initcall_end) = .; \
396 }
397
398 /* .text section. Map to function alignment to avoid address changes
399 * during second ld run in second ld pass when generating System.map */
400 #define TEXT_TEXT \
401 ALIGN_FUNCTION(); \
402 *(.text.hot) \
403 *(.text) \
404 *(.ref.text) \
405 MEM_KEEP(init.text) \
406 MEM_KEEP(exit.text) \
407 *(.text.unlikely)
408
409
410 /* sched.text is aling to function alignment to secure we have same
411 * address even at second ld pass when generating System.map */
412 #define SCHED_TEXT \
413 ALIGN_FUNCTION(); \
414 VMLINUX_SYMBOL(__sched_text_start) = .; \
415 *(.sched.text) \
416 VMLINUX_SYMBOL(__sched_text_end) = .;
417
418 /* spinlock.text is aling to function alignment to secure we have same
419 * address even at second ld pass when generating System.map */
420 #define LOCK_TEXT \
421 ALIGN_FUNCTION(); \
422 VMLINUX_SYMBOL(__lock_text_start) = .; \
423 *(.spinlock.text) \
424 VMLINUX_SYMBOL(__lock_text_end) = .;
425
426 #define KPROBES_TEXT \
427 ALIGN_FUNCTION(); \
428 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
429 *(.kprobes.text) \
430 VMLINUX_SYMBOL(__kprobes_text_end) = .;
431
432 #define ENTRY_TEXT \
433 ALIGN_FUNCTION(); \
434 VMLINUX_SYMBOL(__entry_text_start) = .; \
435 *(.entry.text) \
436 VMLINUX_SYMBOL(__entry_text_end) = .;
437
438 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
439 #define IRQENTRY_TEXT \
440 ALIGN_FUNCTION(); \
441 VMLINUX_SYMBOL(__irqentry_text_start) = .; \
442 *(.irqentry.text) \
443 VMLINUX_SYMBOL(__irqentry_text_end) = .;
444 #else
445 #define IRQENTRY_TEXT
446 #endif
447
448 /* Section used for early init (in .S files) */
449 #define HEAD_TEXT *(.head.text)
450
451 #define HEAD_TEXT_SECTION \
452 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
453 HEAD_TEXT \
454 }
455
456 /*
457 * Exception table
458 */
459 #define EXCEPTION_TABLE(align) \
460 . = ALIGN(align); \
461 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
462 VMLINUX_SYMBOL(__start___ex_table) = .; \
463 *(__ex_table) \
464 VMLINUX_SYMBOL(__stop___ex_table) = .; \
465 }
466
467 /*
468 * Init task
469 */
470 #define INIT_TASK_DATA_SECTION(align) \
471 . = ALIGN(align); \
472 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
473 INIT_TASK_DATA(align) \
474 }
475
476 #ifdef CONFIG_CONSTRUCTORS
477 #define KERNEL_CTORS() . = ALIGN(8); \
478 VMLINUX_SYMBOL(__ctors_start) = .; \
479 *(.ctors) \
480 *(.init_array) \
481 VMLINUX_SYMBOL(__ctors_end) = .;
482 #else
483 #define KERNEL_CTORS()
484 #endif
485
486 /* init and exit section handling */
487 #define INIT_DATA \
488 *(.init.data) \
489 MEM_DISCARD(init.data) \
490 KERNEL_CTORS() \
491 MCOUNT_REC() \
492 *(.init.rodata) \
493 FTRACE_EVENTS() \
494 TRACE_SYSCALLS() \
495 KPROBE_BLACKLIST() \
496 MEM_DISCARD(init.rodata) \
497 CLK_OF_TABLES() \
498 RESERVEDMEM_OF_TABLES() \
499 CLKSRC_OF_TABLES() \
500 CPU_METHOD_OF_TABLES() \
501 KERNEL_DTB() \
502 IRQCHIP_OF_MATCH_TABLE() \
503 EARLYCON_OF_TABLES()
504
505 #define INIT_TEXT \
506 *(.init.text) \
507 MEM_DISCARD(init.text)
508
509 #define EXIT_DATA \
510 *(.exit.data) \
511 MEM_DISCARD(exit.data) \
512 MEM_DISCARD(exit.rodata)
513
514 #define EXIT_TEXT \
515 *(.exit.text) \
516 MEM_DISCARD(exit.text)
517
518 #define EXIT_CALL \
519 *(.exitcall.exit)
520
521 /*
522 * bss (Block Started by Symbol) - uninitialized data
523 * zeroed during startup
524 */
525 #define SBSS(sbss_align) \
526 . = ALIGN(sbss_align); \
527 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
528 *(.sbss) \
529 *(.scommon) \
530 }
531
532 /*
533 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
534 * sections to the front of bss.
535 */
536 #ifndef BSS_FIRST_SECTIONS
537 #define BSS_FIRST_SECTIONS
538 #endif
539
540 #define BSS(bss_align) \
541 . = ALIGN(bss_align); \
542 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
543 BSS_FIRST_SECTIONS \
544 *(.bss..page_aligned) \
545 *(.dynbss) \
546 *(.bss) \
547 *(COMMON) \
548 }
549
550 /*
551 * DWARF debug sections.
552 * Symbols in the DWARF debugging sections are relative to
553 * the beginning of the section so we begin them at 0.
554 */
555 #define DWARF_DEBUG \
556 /* DWARF 1 */ \
557 .debug 0 : { *(.debug) } \
558 .line 0 : { *(.line) } \
559 /* GNU DWARF 1 extensions */ \
560 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
561 .debug_sfnames 0 : { *(.debug_sfnames) } \
562 /* DWARF 1.1 and DWARF 2 */ \
563 .debug_aranges 0 : { *(.debug_aranges) } \
564 .debug_pubnames 0 : { *(.debug_pubnames) } \
565 /* DWARF 2 */ \
566 .debug_info 0 : { *(.debug_info \
567 .gnu.linkonce.wi.*) } \
568 .debug_abbrev 0 : { *(.debug_abbrev) } \
569 .debug_line 0 : { *(.debug_line) } \
570 .debug_frame 0 : { *(.debug_frame) } \
571 .debug_str 0 : { *(.debug_str) } \
572 .debug_loc 0 : { *(.debug_loc) } \
573 .debug_macinfo 0 : { *(.debug_macinfo) } \
574 /* SGI/MIPS DWARF 2 extensions */ \
575 .debug_weaknames 0 : { *(.debug_weaknames) } \
576 .debug_funcnames 0 : { *(.debug_funcnames) } \
577 .debug_typenames 0 : { *(.debug_typenames) } \
578 .debug_varnames 0 : { *(.debug_varnames) } \
579
580 /* Stabs debugging sections. */
581 #define STABS_DEBUG \
582 .stab 0 : { *(.stab) } \
583 .stabstr 0 : { *(.stabstr) } \
584 .stab.excl 0 : { *(.stab.excl) } \
585 .stab.exclstr 0 : { *(.stab.exclstr) } \
586 .stab.index 0 : { *(.stab.index) } \
587 .stab.indexstr 0 : { *(.stab.indexstr) } \
588 .comment 0 : { *(.comment) }
589
590 #ifdef CONFIG_GENERIC_BUG
591 #define BUG_TABLE \
592 . = ALIGN(8); \
593 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
594 VMLINUX_SYMBOL(__start___bug_table) = .; \
595 *(__bug_table) \
596 VMLINUX_SYMBOL(__stop___bug_table) = .; \
597 }
598 #else
599 #define BUG_TABLE
600 #endif
601
602 #ifdef CONFIG_PM_TRACE
603 #define TRACEDATA \
604 . = ALIGN(4); \
605 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
606 VMLINUX_SYMBOL(__tracedata_start) = .; \
607 *(.tracedata) \
608 VMLINUX_SYMBOL(__tracedata_end) = .; \
609 }
610 #else
611 #define TRACEDATA
612 #endif
613
614 #define NOTES \
615 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
616 VMLINUX_SYMBOL(__start_notes) = .; \
617 *(.note.*) \
618 VMLINUX_SYMBOL(__stop_notes) = .; \
619 }
620
621 #define INIT_SETUP(initsetup_align) \
622 . = ALIGN(initsetup_align); \
623 VMLINUX_SYMBOL(__setup_start) = .; \
624 *(.init.setup) \
625 VMLINUX_SYMBOL(__setup_end) = .;
626
627 #define INIT_CALLS_LEVEL(level) \
628 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
629 *(.initcall##level##.init) \
630 *(.initcall##level##s.init) \
631
632 #define INIT_CALLS \
633 VMLINUX_SYMBOL(__initcall_start) = .; \
634 *(.initcallearly.init) \
635 INIT_CALLS_LEVEL(0) \
636 INIT_CALLS_LEVEL(1) \
637 INIT_CALLS_LEVEL(2) \
638 INIT_CALLS_LEVEL(3) \
639 INIT_CALLS_LEVEL(4) \
640 INIT_CALLS_LEVEL(5) \
641 INIT_CALLS_LEVEL(rootfs) \
642 INIT_CALLS_LEVEL(6) \
643 INIT_CALLS_LEVEL(7) \
644 VMLINUX_SYMBOL(__initcall_end) = .;
645
646 #define CON_INITCALL \
647 VMLINUX_SYMBOL(__con_initcall_start) = .; \
648 *(.con_initcall.init) \
649 VMLINUX_SYMBOL(__con_initcall_end) = .;
650
651 #define SECURITY_INITCALL \
652 VMLINUX_SYMBOL(__security_initcall_start) = .; \
653 *(.security_initcall.init) \
654 VMLINUX_SYMBOL(__security_initcall_end) = .;
655
656 #ifdef CONFIG_BLK_DEV_INITRD
657 #define INIT_RAM_FS \
658 . = ALIGN(4); \
659 VMLINUX_SYMBOL(__initramfs_start) = .; \
660 *(.init.ramfs) \
661 . = ALIGN(8); \
662 *(.init.ramfs.info)
663 #else
664 #define INIT_RAM_FS
665 #endif
666
667 /*
668 * Default discarded sections.
669 *
670 * Some archs want to discard exit text/data at runtime rather than
671 * link time due to cross-section references such as alt instructions,
672 * bug table, eh_frame, etc. DISCARDS must be the last of output
673 * section definitions so that such archs put those in earlier section
674 * definitions.
675 */
676 #define DISCARDS \
677 /DISCARD/ : { \
678 EXIT_TEXT \
679 EXIT_DATA \
680 EXIT_CALL \
681 *(.discard) \
682 *(.discard.*) \
683 }
684
685 /**
686 * PERCPU_INPUT - the percpu input sections
687 * @cacheline: cacheline size
688 *
689 * The core percpu section names and core symbols which do not rely
690 * directly upon load addresses.
691 *
692 * @cacheline is used to align subsections to avoid false cacheline
693 * sharing between subsections for different purposes.
694 */
695 #define PERCPU_INPUT(cacheline) \
696 VMLINUX_SYMBOL(__per_cpu_start) = .; \
697 *(.data..percpu..first) \
698 . = ALIGN(PAGE_SIZE); \
699 *(.data..percpu..page_aligned) \
700 . = ALIGN(cacheline); \
701 *(.data..percpu..read_mostly) \
702 . = ALIGN(cacheline); \
703 *(.data..percpu) \
704 *(.data..percpu..shared_aligned) \
705 VMLINUX_SYMBOL(__per_cpu_end) = .;
706
707 /**
708 * PERCPU_VADDR - define output section for percpu area
709 * @cacheline: cacheline size
710 * @vaddr: explicit base address (optional)
711 * @phdr: destination PHDR (optional)
712 *
713 * Macro which expands to output section for percpu area.
714 *
715 * @cacheline is used to align subsections to avoid false cacheline
716 * sharing between subsections for different purposes.
717 *
718 * If @vaddr is not blank, it specifies explicit base address and all
719 * percpu symbols will be offset from the given address. If blank,
720 * @vaddr always equals @laddr + LOAD_OFFSET.
721 *
722 * @phdr defines the output PHDR to use if not blank. Be warned that
723 * output PHDR is sticky. If @phdr is specified, the next output
724 * section in the linker script will go there too. @phdr should have
725 * a leading colon.
726 *
727 * Note that this macros defines __per_cpu_load as an absolute symbol.
728 * If there is no need to put the percpu section at a predetermined
729 * address, use PERCPU_SECTION.
730 */
731 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
732 VMLINUX_SYMBOL(__per_cpu_load) = .; \
733 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
734 - LOAD_OFFSET) { \
735 PERCPU_INPUT(cacheline) \
736 } phdr \
737 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
738
739 /**
740 * PERCPU_SECTION - define output section for percpu area, simple version
741 * @cacheline: cacheline size
742 *
743 * Align to PAGE_SIZE and outputs output section for percpu area. This
744 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
745 * __per_cpu_start will be identical.
746 *
747 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
748 * except that __per_cpu_load is defined as a relative symbol against
749 * .data..percpu which is required for relocatable x86_32 configuration.
750 */
751 #define PERCPU_SECTION(cacheline) \
752 . = ALIGN(PAGE_SIZE); \
753 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
754 VMLINUX_SYMBOL(__per_cpu_load) = .; \
755 PERCPU_INPUT(cacheline) \
756 }
757
758
759 /*
760 * Definition of the high level *_SECTION macros
761 * They will fit only a subset of the architectures
762 */
763
764
765 /*
766 * Writeable data.
767 * All sections are combined in a single .data section.
768 * The sections following CONSTRUCTORS are arranged so their
769 * typical alignment matches.
770 * A cacheline is typical/always less than a PAGE_SIZE so
771 * the sections that has this restriction (or similar)
772 * is located before the ones requiring PAGE_SIZE alignment.
773 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
774 * matches the requirement of PAGE_ALIGNED_DATA.
775 *
776 * use 0 as page_align if page_aligned data is not used */
777 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
778 . = ALIGN(PAGE_SIZE); \
779 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
780 INIT_TASK_DATA(inittask) \
781 NOSAVE_DATA \
782 PAGE_ALIGNED_DATA(pagealigned) \
783 CACHELINE_ALIGNED_DATA(cacheline) \
784 READ_MOSTLY_DATA(cacheline) \
785 DATA_DATA \
786 CONSTRUCTORS \
787 }
788
789 #define INIT_TEXT_SECTION(inittext_align) \
790 . = ALIGN(inittext_align); \
791 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
792 VMLINUX_SYMBOL(_sinittext) = .; \
793 INIT_TEXT \
794 VMLINUX_SYMBOL(_einittext) = .; \
795 }
796
797 #define INIT_DATA_SECTION(initsetup_align) \
798 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
799 INIT_DATA \
800 INIT_SETUP(initsetup_align) \
801 INIT_CALLS \
802 CON_INITCALL \
803 SECURITY_INITCALL \
804 INIT_RAM_FS \
805 }
806
807 #define BSS_SECTION(sbss_align, bss_align, stop_align) \
808 . = ALIGN(sbss_align); \
809 VMLINUX_SYMBOL(__bss_start) = .; \
810 SBSS(sbss_align) \
811 BSS(bss_align) \
812 . = ALIGN(stop_align); \
813 VMLINUX_SYMBOL(__bss_stop) = .;
This page took 0.048941 seconds and 6 git commands to generate.