| 1 | /* ld script to make x86-64 Linux kernel |
| 2 | * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>; |
| 3 | */ |
| 4 | |
| 5 | #define LOAD_OFFSET __START_KERNEL_map |
| 6 | |
| 7 | #include <asm-generic/vmlinux.lds.h> |
| 8 | #include <asm/page.h> |
| 9 | |
| 10 | #undef i386 /* in case the preprocessor is a 32bit one */ |
| 11 | |
| 12 | OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64") |
| 13 | OUTPUT_ARCH(i386:x86-64) |
| 14 | ENTRY(phys_startup_64) |
| 15 | jiffies_64 = jiffies; |
| 16 | _proxy_pda = 1; |
| 17 | PHDRS { |
| 18 | text PT_LOAD FLAGS(5); /* R_E */ |
| 19 | data PT_LOAD FLAGS(7); /* RWE */ |
| 20 | user PT_LOAD FLAGS(7); /* RWE */ |
| 21 | data.init PT_LOAD FLAGS(7); /* RWE */ |
| 22 | note PT_NOTE FLAGS(4); /* R__ */ |
| 23 | } |
| 24 | SECTIONS |
| 25 | { |
| 26 | . = __START_KERNEL; |
| 27 | phys_startup_64 = startup_64 - LOAD_OFFSET; |
| 28 | _text = .; /* Text and read-only data */ |
| 29 | .text : AT(ADDR(.text) - LOAD_OFFSET) { |
| 30 | /* First the code that has to be first for bootstrapping */ |
| 31 | *(.text.head) |
| 32 | _stext = .; |
| 33 | /* Then the rest */ |
| 34 | TEXT_TEXT |
| 35 | SCHED_TEXT |
| 36 | LOCK_TEXT |
| 37 | KPROBES_TEXT |
| 38 | *(.fixup) |
| 39 | *(.gnu.warning) |
| 40 | } :text = 0x9090 |
| 41 | /* out-of-line lock text */ |
| 42 | .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) } |
| 43 | |
| 44 | _etext = .; /* End of text section */ |
| 45 | |
| 46 | . = ALIGN(16); /* Exception table */ |
| 47 | __start___ex_table = .; |
| 48 | __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) } |
| 49 | __stop___ex_table = .; |
| 50 | |
| 51 | NOTES :text :note |
| 52 | |
| 53 | BUG_TABLE :text |
| 54 | |
| 55 | RODATA |
| 56 | |
| 57 | . = ALIGN(4); |
| 58 | .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { |
| 59 | __tracedata_start = .; |
| 60 | *(.tracedata) |
| 61 | __tracedata_end = .; |
| 62 | } |
| 63 | |
| 64 | . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */ |
| 65 | /* Data */ |
| 66 | .data : AT(ADDR(.data) - LOAD_OFFSET) { |
| 67 | DATA_DATA |
| 68 | CONSTRUCTORS |
| 69 | } :data |
| 70 | |
| 71 | _edata = .; /* End of data section */ |
| 72 | |
| 73 | . = ALIGN(PAGE_SIZE); |
| 74 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); |
| 75 | .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { |
| 76 | *(.data.cacheline_aligned) |
| 77 | } |
| 78 | . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES); |
| 79 | .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { |
| 80 | *(.data.read_mostly) |
| 81 | } |
| 82 | |
| 83 | #define VSYSCALL_ADDR (-10*1024*1024) |
| 84 | #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095)) |
| 85 | #define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095)) |
| 86 | |
| 87 | #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR) |
| 88 | #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) |
| 89 | |
| 90 | #define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR) |
| 91 | #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) |
| 92 | |
| 93 | . = VSYSCALL_ADDR; |
| 94 | .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user |
| 95 | __vsyscall_0 = VSYSCALL_VIRT_ADDR; |
| 96 | |
| 97 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); |
| 98 | .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { *(.vsyscall_fn) } |
| 99 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); |
| 100 | .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) |
| 101 | { *(.vsyscall_gtod_data) } |
| 102 | vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data); |
| 103 | .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) |
| 104 | { *(.vsyscall_clock) } |
| 105 | vsyscall_clock = VVIRT(.vsyscall_clock); |
| 106 | |
| 107 | |
| 108 | .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) |
| 109 | { *(.vsyscall_1) } |
| 110 | .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) |
| 111 | { *(.vsyscall_2) } |
| 112 | |
| 113 | .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { *(.vgetcpu_mode) } |
| 114 | vgetcpu_mode = VVIRT(.vgetcpu_mode); |
| 115 | |
| 116 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); |
| 117 | .jiffies : AT(VLOAD(.jiffies)) { *(.jiffies) } |
| 118 | jiffies = VVIRT(.jiffies); |
| 119 | |
| 120 | .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) |
| 121 | { *(.vsyscall_3) } |
| 122 | |
| 123 | . = VSYSCALL_VIRT_ADDR + 4096; |
| 124 | |
| 125 | #undef VSYSCALL_ADDR |
| 126 | #undef VSYSCALL_PHYS_ADDR |
| 127 | #undef VSYSCALL_VIRT_ADDR |
| 128 | #undef VLOAD_OFFSET |
| 129 | #undef VLOAD |
| 130 | #undef VVIRT_OFFSET |
| 131 | #undef VVIRT |
| 132 | |
| 133 | . = ALIGN(8192); /* init_task */ |
| 134 | .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { |
| 135 | *(.data.init_task) |
| 136 | }:data.init |
| 137 | |
| 138 | . = ALIGN(4096); |
| 139 | .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { |
| 140 | *(.data.page_aligned) |
| 141 | } |
| 142 | |
| 143 | /* might get freed after init */ |
| 144 | . = ALIGN(4096); |
| 145 | __smp_alt_begin = .; |
| 146 | __smp_locks = .; |
| 147 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { |
| 148 | *(.smp_locks) |
| 149 | } |
| 150 | __smp_locks_end = .; |
| 151 | . = ALIGN(4096); |
| 152 | __smp_alt_end = .; |
| 153 | |
| 154 | . = ALIGN(4096); /* Init code and data */ |
| 155 | __init_begin = .; |
| 156 | .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { |
| 157 | _sinittext = .; |
| 158 | *(.init.text) |
| 159 | _einittext = .; |
| 160 | } |
| 161 | __initdata_begin = .; |
| 162 | .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) } |
| 163 | __initdata_end = .; |
| 164 | . = ALIGN(16); |
| 165 | __setup_start = .; |
| 166 | .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) } |
| 167 | __setup_end = .; |
| 168 | __initcall_start = .; |
| 169 | .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { |
| 170 | INITCALLS |
| 171 | } |
| 172 | __initcall_end = .; |
| 173 | __con_initcall_start = .; |
| 174 | .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { |
| 175 | *(.con_initcall.init) |
| 176 | } |
| 177 | __con_initcall_end = .; |
| 178 | SECURITY_INIT |
| 179 | . = ALIGN(8); |
| 180 | __alt_instructions = .; |
| 181 | .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { |
| 182 | *(.altinstructions) |
| 183 | } |
| 184 | __alt_instructions_end = .; |
| 185 | .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { |
| 186 | *(.altinstr_replacement) |
| 187 | } |
| 188 | /* .exit.text is discard at runtime, not link time, to deal with references |
| 189 | from .altinstructions and .eh_frame */ |
| 190 | .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) } |
| 191 | .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) } |
| 192 | |
| 193 | /* vdso blob that is mapped into user space */ |
| 194 | vdso_start = . ; |
| 195 | .vdso : AT(ADDR(.vdso) - LOAD_OFFSET) { *(.vdso) } |
| 196 | . = ALIGN(4096); |
| 197 | vdso_end = .; |
| 198 | |
| 199 | #ifdef CONFIG_BLK_DEV_INITRD |
| 200 | . = ALIGN(4096); |
| 201 | __initramfs_start = .; |
| 202 | .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) } |
| 203 | __initramfs_end = .; |
| 204 | #endif |
| 205 | |
| 206 | PERCPU(4096) |
| 207 | |
| 208 | . = ALIGN(4096); |
| 209 | __init_end = .; |
| 210 | |
| 211 | . = ALIGN(4096); |
| 212 | __nosave_begin = .; |
| 213 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) } |
| 214 | . = ALIGN(4096); |
| 215 | __nosave_end = .; |
| 216 | |
| 217 | __bss_start = .; /* BSS */ |
| 218 | .bss : AT(ADDR(.bss) - LOAD_OFFSET) { |
| 219 | *(.bss.page_aligned) |
| 220 | *(.bss) |
| 221 | } |
| 222 | __bss_stop = .; |
| 223 | |
| 224 | _end = . ; |
| 225 | |
| 226 | /* Sections to be discarded */ |
| 227 | /DISCARD/ : { |
| 228 | *(.exitcall.exit) |
| 229 | *(.eh_frame) |
| 230 | } |
| 231 | |
| 232 | STABS_DEBUG |
| 233 | |
| 234 | DWARF_DEBUG |
| 235 | } |