Commit | Line | Data |
---|---|---|
5234f5eb EB |
1 | /* |
2 | * machine_kexec.c - handle transition of Linux booting another kernel | |
3 | * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com> | |
4 | * | |
5 | * This source code is licensed under the GNU General Public License, | |
6 | * Version 2. See the file COPYING for more details. | |
7 | */ | |
8 | ||
9 | #include <linux/mm.h> | |
10 | #include <linux/kexec.h> | |
5234f5eb EB |
11 | #include <linux/string.h> |
12 | #include <linux/reboot.h> | |
5234f5eb | 13 | #include <asm/pgtable.h> |
5234f5eb EB |
14 | #include <asm/tlbflush.h> |
15 | #include <asm/mmu_context.h> | |
16 | #include <asm/io.h> | |
8bf27556 | 17 | |
4bfaaef0 MD |
18 | #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE))) |
19 | static u64 kexec_pgd[512] PAGE_ALIGNED; | |
20 | static u64 kexec_pud0[512] PAGE_ALIGNED; | |
21 | static u64 kexec_pmd0[512] PAGE_ALIGNED; | |
22 | static u64 kexec_pte0[512] PAGE_ALIGNED; | |
23 | static u64 kexec_pud1[512] PAGE_ALIGNED; | |
24 | static u64 kexec_pmd1[512] PAGE_ALIGNED; | |
25 | static u64 kexec_pte1[512] PAGE_ALIGNED; | |
26 | ||
8bf27556 | 27 | static void init_level2_page(pmd_t *level2p, unsigned long addr) |
5234f5eb EB |
28 | { |
29 | unsigned long end_addr; | |
72414d3f | 30 | |
5234f5eb | 31 | addr &= PAGE_MASK; |
8bf27556 | 32 | end_addr = addr + PUD_SIZE; |
72414d3f | 33 | while (addr < end_addr) { |
8bf27556 EB |
34 | set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC)); |
35 | addr += PMD_SIZE; | |
5234f5eb EB |
36 | } |
37 | } | |
38 | ||
8bf27556 | 39 | static int init_level3_page(struct kimage *image, pud_t *level3p, |
72414d3f | 40 | unsigned long addr, unsigned long last_addr) |
5234f5eb EB |
41 | { |
42 | unsigned long end_addr; | |
43 | int result; | |
72414d3f | 44 | |
5234f5eb EB |
45 | result = 0; |
46 | addr &= PAGE_MASK; | |
8bf27556 | 47 | end_addr = addr + PGDIR_SIZE; |
72414d3f | 48 | while ((addr < last_addr) && (addr < end_addr)) { |
5234f5eb | 49 | struct page *page; |
8bf27556 | 50 | pmd_t *level2p; |
72414d3f | 51 | |
5234f5eb EB |
52 | page = kimage_alloc_control_pages(image, 0); |
53 | if (!page) { | |
54 | result = -ENOMEM; | |
55 | goto out; | |
56 | } | |
8bf27556 | 57 | level2p = (pmd_t *)page_address(page); |
5234f5eb | 58 | init_level2_page(level2p, addr); |
8bf27556 EB |
59 | set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE)); |
60 | addr += PUD_SIZE; | |
5234f5eb EB |
61 | } |
62 | /* clear the unused entries */ | |
72414d3f | 63 | while (addr < end_addr) { |
8bf27556 EB |
64 | pud_clear(level3p++); |
65 | addr += PUD_SIZE; | |
5234f5eb EB |
66 | } |
67 | out: | |
68 | return result; | |
69 | } | |
70 | ||
71 | ||
8bf27556 | 72 | static int init_level4_page(struct kimage *image, pgd_t *level4p, |
72414d3f | 73 | unsigned long addr, unsigned long last_addr) |
5234f5eb EB |
74 | { |
75 | unsigned long end_addr; | |
76 | int result; | |
72414d3f | 77 | |
5234f5eb EB |
78 | result = 0; |
79 | addr &= PAGE_MASK; | |
8bf27556 | 80 | end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE); |
72414d3f | 81 | while ((addr < last_addr) && (addr < end_addr)) { |
5234f5eb | 82 | struct page *page; |
8bf27556 | 83 | pud_t *level3p; |
72414d3f | 84 | |
5234f5eb EB |
85 | page = kimage_alloc_control_pages(image, 0); |
86 | if (!page) { | |
87 | result = -ENOMEM; | |
88 | goto out; | |
89 | } | |
8bf27556 | 90 | level3p = (pud_t *)page_address(page); |
5234f5eb EB |
91 | result = init_level3_page(image, level3p, addr, last_addr); |
92 | if (result) { | |
93 | goto out; | |
94 | } | |
8bf27556 EB |
95 | set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE)); |
96 | addr += PGDIR_SIZE; | |
5234f5eb EB |
97 | } |
98 | /* clear the unused entries */ | |
72414d3f | 99 | while (addr < end_addr) { |
8bf27556 EB |
100 | pgd_clear(level4p++); |
101 | addr += PGDIR_SIZE; | |
5234f5eb | 102 | } |
72414d3f | 103 | out: |
5234f5eb EB |
104 | return result; |
105 | } | |
106 | ||
107 | ||
108 | static int init_pgtable(struct kimage *image, unsigned long start_pgtable) | |
109 | { | |
8bf27556 EB |
110 | pgd_t *level4p; |
111 | level4p = (pgd_t *)__va(start_pgtable); | |
72414d3f | 112 | return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT); |
5234f5eb EB |
113 | } |
114 | ||
115 | static void set_idt(void *newidt, u16 limit) | |
116 | { | |
36c4fd23 | 117 | struct desc_ptr curidt; |
5234f5eb EB |
118 | |
119 | /* x86-64 supports unaliged loads & stores */ | |
36c4fd23 EB |
120 | curidt.size = limit; |
121 | curidt.address = (unsigned long)newidt; | |
5234f5eb EB |
122 | |
123 | __asm__ __volatile__ ( | |
36c4fd23 EB |
124 | "lidtq %0\n" |
125 | : : "m" (curidt) | |
5234f5eb EB |
126 | ); |
127 | }; | |
128 | ||
129 | ||
130 | static void set_gdt(void *newgdt, u16 limit) | |
131 | { | |
36c4fd23 | 132 | struct desc_ptr curgdt; |
5234f5eb EB |
133 | |
134 | /* x86-64 supports unaligned loads & stores */ | |
36c4fd23 EB |
135 | curgdt.size = limit; |
136 | curgdt.address = (unsigned long)newgdt; | |
5234f5eb EB |
137 | |
138 | __asm__ __volatile__ ( | |
36c4fd23 EB |
139 | "lgdtq %0\n" |
140 | : : "m" (curgdt) | |
5234f5eb EB |
141 | ); |
142 | }; | |
143 | ||
144 | static void load_segments(void) | |
145 | { | |
146 | __asm__ __volatile__ ( | |
36c4fd23 EB |
147 | "\tmovl %0,%%ds\n" |
148 | "\tmovl %0,%%es\n" | |
149 | "\tmovl %0,%%ss\n" | |
150 | "\tmovl %0,%%fs\n" | |
151 | "\tmovl %0,%%gs\n" | |
2ec5e3a8 | 152 | : : "a" (__KERNEL_DS) : "memory" |
5234f5eb | 153 | ); |
5234f5eb EB |
154 | } |
155 | ||
5234f5eb EB |
156 | int machine_kexec_prepare(struct kimage *image) |
157 | { | |
4bfaaef0 | 158 | unsigned long start_pgtable; |
5234f5eb EB |
159 | int result; |
160 | ||
161 | /* Calculate the offsets */ | |
72414d3f | 162 | start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; |
5234f5eb EB |
163 | |
164 | /* Setup the identity mapped 64bit page table */ | |
165 | result = init_pgtable(image, start_pgtable); | |
72414d3f | 166 | if (result) |
5234f5eb | 167 | return result; |
5234f5eb | 168 | |
5234f5eb EB |
169 | return 0; |
170 | } | |
171 | ||
172 | void machine_kexec_cleanup(struct kimage *image) | |
173 | { | |
174 | return; | |
175 | } | |
176 | ||
177 | /* | |
178 | * Do not allocate memory (or fail in any way) in machine_kexec(). | |
179 | * We are past the point of no return, committed to rebooting now. | |
180 | */ | |
181 | NORET_TYPE void machine_kexec(struct kimage *image) | |
182 | { | |
4bfaaef0 MD |
183 | unsigned long page_list[PAGES_NR]; |
184 | void *control_page; | |
5234f5eb EB |
185 | |
186 | /* Interrupts aren't acceptable while we reboot */ | |
187 | local_irq_disable(); | |
188 | ||
4bfaaef0 MD |
189 | control_page = page_address(image->control_code_page) + PAGE_SIZE; |
190 | memcpy(control_page, relocate_kernel, PAGE_SIZE); | |
191 | ||
192 | page_list[PA_CONTROL_PAGE] = __pa(control_page); | |
193 | page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel; | |
0dbf7028 | 194 | page_list[PA_PGD] = __pa_symbol(&kexec_pgd); |
4bfaaef0 | 195 | page_list[VA_PGD] = (unsigned long)kexec_pgd; |
0dbf7028 | 196 | page_list[PA_PUD_0] = __pa_symbol(&kexec_pud0); |
4bfaaef0 | 197 | page_list[VA_PUD_0] = (unsigned long)kexec_pud0; |
0dbf7028 | 198 | page_list[PA_PMD_0] = __pa_symbol(&kexec_pmd0); |
4bfaaef0 | 199 | page_list[VA_PMD_0] = (unsigned long)kexec_pmd0; |
0dbf7028 | 200 | page_list[PA_PTE_0] = __pa_symbol(&kexec_pte0); |
4bfaaef0 | 201 | page_list[VA_PTE_0] = (unsigned long)kexec_pte0; |
0dbf7028 | 202 | page_list[PA_PUD_1] = __pa_symbol(&kexec_pud1); |
4bfaaef0 | 203 | page_list[VA_PUD_1] = (unsigned long)kexec_pud1; |
0dbf7028 | 204 | page_list[PA_PMD_1] = __pa_symbol(&kexec_pmd1); |
4bfaaef0 | 205 | page_list[VA_PMD_1] = (unsigned long)kexec_pmd1; |
0dbf7028 | 206 | page_list[PA_PTE_1] = __pa_symbol(&kexec_pte1); |
4bfaaef0 MD |
207 | page_list[VA_PTE_1] = (unsigned long)kexec_pte1; |
208 | ||
209 | page_list[PA_TABLE_PAGE] = | |
210 | (unsigned long)__pa(page_address(image->control_code_page)); | |
5234f5eb | 211 | |
2a8a3d5b EB |
212 | /* The segment registers are funny things, they have both a |
213 | * visible and an invisible part. Whenever the visible part is | |
214 | * set to a specific selector, the invisible part is loaded | |
215 | * with from a table in memory. At no other time is the | |
216 | * descriptor table in memory accessed. | |
5234f5eb EB |
217 | * |
218 | * I take advantage of this here by force loading the | |
219 | * segments, before I zap the gdt with an invalid value. | |
220 | */ | |
221 | load_segments(); | |
222 | /* The gdt & idt are now invalid. | |
223 | * If you want to load them you must set up your own idt & gdt. | |
224 | */ | |
225 | set_gdt(phys_to_virt(0),0); | |
226 | set_idt(phys_to_virt(0),0); | |
4bfaaef0 | 227 | |
5234f5eb | 228 | /* now call it */ |
4bfaaef0 MD |
229 | relocate_kernel((unsigned long)image->head, (unsigned long)page_list, |
230 | image->start); | |
5234f5eb | 231 | } |
2c8c0e6b AK |
232 | |
233 | /* crashkernel=size@addr specifies the location to reserve for | |
234 | * a crash kernel. By reserving this memory we guarantee | |
235 | * that linux never set's it up as a DMA target. | |
236 | * Useful for holding code to do something appropriate | |
237 | * after a kernel panic. | |
238 | */ | |
239 | static int __init setup_crashkernel(char *arg) | |
240 | { | |
241 | unsigned long size, base; | |
242 | char *p; | |
243 | if (!arg) | |
244 | return -EINVAL; | |
245 | size = memparse(arg, &p); | |
246 | if (arg == p) | |
247 | return -EINVAL; | |
248 | if (*p == '@') { | |
249 | base = memparse(p+1, &p); | |
250 | /* FIXME: Do I want a sanity check to validate the | |
251 | * memory range? Yes you do, but it's too early for | |
252 | * e820 -AK */ | |
253 | crashk_res.start = base; | |
254 | crashk_res.end = base + size - 1; | |
255 | } | |
256 | return 0; | |
257 | } | |
258 | early_param("crashkernel", setup_crashkernel); | |
259 |